uuid
int64
541B
3,299B
dataset
stringclasses
1 value
text
stringlengths
1
4.29M
1,116,691,501,315
arxiv
\chapter*{List of Acronyms} \begin{acronym} \acro{AI}{Artificial Intelligence} \acro{AGL}{Above Ground Level} \acro{ANN}{Artificial Neural Network} \acro{CPU}{Central Processing Unit} \acro{COTS}{commercial-off-the-shelf} \acro{CAGR}{Compound Annual Growth Rate} \acro{CNN}{Convolutional Neural Network} \acro{CRNN}{Convolutional Recurrent Neural Network} \acro{D3QN}{Dueling Double Deep Q-Network} \acro{DART}{Dynamic Animation and Robotics Toolkit} \acro{DPG}{Determinstic Policy Gradient} \acro{DDPG}{Deep Deterministic Policy Gradient} \acro{DoD}{Department of Defense} \acro{DPG}{Deterministic Policy Gradient} \acro{DQN}{Deep Q-Network} \acro{DRL}{Deep Reinforcement Learning} \acro{EKF}{Extended Kalman Filter} \acro{ESC}{Event Sound Classification} \acro{FALA}{Finite Action-set Learning Automata} \acro{FLOPS}{Floating Point Operations Per Second} \acro{FVI}{Fitted Value Iteration} \acro{GPU}{Graphics Processing Unit} \acro{GCS}{Ground Control Station} \acro{GMM}{Gaussian Mixture Model} \acro{GPS}{Global Positioning System} \acro{IMU}{Inertial Measurement Unit} \acro{IR}{Infrared} \acro{ISR}{Intelligence, Surveillance and Reconnaissance} \acro{LSTM}{Long Short Term Memory} \acro{LWLR}{Locally Weighted Linear Regression} \acro{MBRL}{Model Based Reinforcement Learning} \acro{MSL}{Mean Sea Level} \acro{MAV}{Micro Aerial Vehicle} \acro{MDP}{Markov Decision Process} \acro{MPC}{Model Predictive Control} \acro{NUI}{Natural User Interface} \acro{ODE}{Open Dynamics Engine} \acro{OGRE}{Object-Oriented Graphics Rendering Engine} \acro{PPM}{Pulse Position Modulation} \acro{PPO}{Proximal Policy Optimization} \acro{PID}{Proportional-Integral-Derivative} \acro{PS-DNN}{Partially Shared-Deep Neural Network} \acro{RAM}{Random Access Memory} \acro{RDPG}{Recurrent Deterministic Policy Gradient} \acro{RNN}{Recurrent Neural Network} \acro{RL}{Reinforcement Learning} \acro{ROS}{Robot Operating System} \acro{SARSA}{State-action-reward-state-action} \acro{SDF}{Simulation Description Format} \acro{SLAM}{Simultaneous Localization and Mapping} \acro{STARMAC}{Stanford Testbed of Autonomous Rotorcraft for Multi-Agent Control} \acro{STFT}{Short Time Fourier Transform } \acro{SGD}{Stochastic Gradient Descent} \acro{SWaP}{Size, Weight, and Power} \acro{TD}{Temporal Difference} \acro{TRPO}{Trust Region Policy Optimization} \acro{UAS}{Unmanned Aerial System} \acro{UAV}{Unmanned Aerial Vehicle} \acro{UCT}{Upper Confidence bounds applied to Trees} \acro{VTOL}{Vertical Takeoff and Landing} \acro{SDK}{Software Development Kit} \acro{API}{Application Programming Interface} \acro{RTF}{Ready-to-Fly} \acro{FAA}{Federal Aviation Administration} \acro{AMA}{Academy of Model Aeronautics} \acro{GPL}{GNU General Public License} \acro{BSD}{Berkeley Software Distribution} \acro{MAVLink}{Micro Air Vehicle Communication Protocol} \acro{GUI}{Graphical User Interface} \end{acronym} \section{Introduction} \label{sec:Intro} The current era of \acp{UAS} has already made a significant contribution to civilian, commercial, and military applications \cite{gupta2013review, DoD}. The ability to have aerial systems perform tasks without having a human operator/pilot in the cockpit has enabled these systems to evolve in different sizes, forms, capabilities, conduct tasks, and missions that were previously hazardous or infeasible. Since the penetration of UAS into different realms of our lives is only going to increase, it is important to understand the current state-of-the-art, determine open challenges, and provide road-maps to overcome these challenges. The relevance of \acp{UAS} is increasing exponentially at a \ac{CAGR} of 15.5\% to culminate at USD 45.8 billion by 2025 \cite{UAV_Market}. While this growth seems extremely promising, there are several challenges that need to be overcome before UAS can achieve its full potential. The majority of these UASs are predominantly controlled by an operator and depend on reliable wireless communication links to maintain control and accomplish tasks. As the number of these systems increases and the mission complexity escalates, autonomy will play a crucial role in the next generation of UAS. In the next decade, we will see an incredible push towards autonomy for UAS just like how autonomy has evolved in markets like manufacturing, automotive industry, and in other robotics-related market areas. When it comes to autonomy, there are several definitions and levels of autonomy claimed by manufacturers. Similarly, several definitions and requirements for various levels of autonomy exist in literature. According to \cite{Level_autonomy}, autonomy in UAS can be divided into five levels as follows, \begin{itemize} \item \textbf{Level 1 - Pilot Assistance:} At this initial level, the UAS operator still maintains control of the overall operation and safety of the UAS. Meanwhile, the UAS can take over at least one function (to support navigation or maintaining flight stability) for a limited period of time. Therefore, at this level, the UAS is never in control of both speed and direction of flight simultaneously and all these controls are always with the operator. \item \textbf{Level 2 - Partial Automation:} Here, the UAS is capable of taking control of altitude, heading, and speed in some limited scenarios. It is important to understand that the operator is still responsible for the safe operation of the UAS and hence needs to keep monitoring the environment and flight path to take control when needed. This type of automation is predominantly used for application with a pre-planned path and schedules. At this level, the UAS is said to be capable of \textit{sensing}. \item \textbf{Level 3 - Conditional Automation:} This case is similar to Level 2 described before with the exception that the UAS can notify the operator using onboard sensors if intervention is needed. This means the operator can be a little more disengaged as compared to Level 2 and acts as the backup controller. It is important to understand that at this level the scenarios of operation are relatively static. If any change in operating conditions is detected, the UAS will alert the operator to take over the control. At this level, the UAS is said to be capable of \textit{sense and avoid}. \item \textbf{Level 4 - High Automation:} At this level, the UAS is designed to operate without the requirement of the controller in several circumstances with the capability to detect and avoid obstacles using several built-in functionalities, rule sets, or machine learning-based algorithms deployed on the embedded computers on the UAS. While the operator can take control of the UAS, it is not necessary since several backup systems are in place to ensure safety in case one system fails. This is where an ideal system is expected to adapt to highly dynamic environments using powerful techniques like machine learning. At this level, the UAS is said to have achieved complete \textit{sense and navigate} capability. \item \textbf{Level 5 - Full Automation:} In this final level, the UAS operates fully autonomously without any intervention from operators regardless of the operating scenarios. This will not only include sense and navigate but the ability to learn and adapt its objectives and goals or even optimize its operational objectives and make necessary changes on-the-fly. \end{itemize} Several of today's UASs have limited semi-autonomous modes (level 1 to 3) that warrants UAS to perform some autonomous actions such as return to the initial location, follow a pre-determined flight path, perform maneuvering acts, and recover from some standard instabilities, among others. A completely autonomous system (level 4 and 5) that can interact and survive in a dynamic environment without the need for human-in-the-loop are still far from being realized or deployed in a safe and effective manner. Machine learning, a subset of Artificial Intelligence has seen a spike in its application in various domains. This resurgence from its last winter is attributed to two main reasons (i) the exponential growth in computing resources in the last decade (ii) digitization of the modern era that has provided access to a huge quantity of data that can be used to train these machine learning models. Today, we see machine learning algorithms successfully applied to computer vision \cite{sebe2005machine,DL_CV,alexnet}, natural language processing \cite{NLP,NLP_2}, medical application \cite{litjens2017survey}, wireless communication \cite{JagannathAdHoc2019,Ajagannath6G2020}, signal intelligence \cite{Jagannath19MLBook}, robotics \cite{polydoros2017survey}, speech recognition \cite{DL_speech}, among others. These advancements in the field of machine learning have rendered it a perfect candidate to realize autonomy in UAS. To this end, in this chapter, we discuss the advances made in the field of machine learning, specifically deep learning, and reinforcement learning to facilitate autonomy to UAS. We also look at the key challenges and open research problems that need to be addressed for UAS autonomy. We hope this chapter becomes a great guide to beginners as well as seasoned researchers to take larger strides in these areas of research. \subsection{Applications of UAS} \begin{figure}[h] \centering \includegraphics[width=4.7 in]{editor/Application.pdf} \caption{Various applications of UAS} \label{fig:App} \end{figure} The applications of UAS can be broadly divided as follows, (i) \ac{ISR}, (ii) payload/product delivery, and (iii) maintenance and repair as shown in Figure \ref{fig:App}. Presently, ISR is the most common application that \acp{UAS} are employed for in both commercial and military realms. UASs are used for surveillance and remote sensing to map areas of interest using sensors such as traditional cameras or other sensors like acoustic, \ac{IR}, radars, among others. UASs are also used to monitor and survey oilfields, crop surveys, power grids, and other areas that are remote or difficult to access by operators. The surveys are also used for education, environment and climate studies, tourism, mapping, crop assessments, weather, traffic monitoring and border management. Similarly, UASs are used for humanitarian aid and rescue operations by first responders during disasters like flood and earthquakes where access by road does not exist or rendered inaccessible. UAS is also being actively being designed and developed to become efficient agents for the delivery of payloads. These payloads include packages from online retailers, medical supplies to hospitals or areas of disaster, maintenance parts to remote locations in the commercial and civilian domains. As one can imagine delivery of different kinds of payloads will also be critical for several military missions and UAS might provide a safer alternative to accomplish such delivery in hostile areas with limited accessibility. Though not prevalent yet, it is envisioned that UAS will open up the market for several maintenance and repair tasks for the aerospace industry, power grid, wind farms, and other operations that are not easy to access. Currently, UAS are already being deployed to monitor and detect faults as well as provide maintenance alerts to reduce operational expense. It is envisioned that robotics enabled UAS will also be able to intervene when faults or necessary repair are detected in the near future. \subsection{Classification of UAS} \label{subsec:classUAS} \begin{figure}[h] \centering \hspace{-0.5 cm} \includegraphics[width=4.8 in]{editor/Classification.pdf} \caption{Classification of Unmanned Aerial Systems} \label{fig:Class} \end{figure} It is clear from the previous discussion about the applications of UAS the need for versatility in the UAS design. Due to these reasons and the immense prospective that UAS holds for the future, UASs have evolved into different forms and sizes. While most of the discussion in this chapter is not specific to any type of UAS platforms, we provide a succinct classification for UASs in Figure \ref{fig:Class}. Several characteristics are used to classify different types of UASs. Here, we present the three most prevalent ones. The first is the classification of UASs adopted by \ac{DoD} which divides the systems into five groups based on weight, the altitude of flight, and the velocity. Another two sets of classification are based on the wing type and landing and takeoff. All these have been summarized in Figure \ref{fig:Class}. \subsection{Chapter Organization} \begin{figure}[h] \centering \includegraphics[width=4.6 in]{editor/Chapter_Organisation.pdf} \caption{Organization of the Chapter} \label{fig:Organization} \end{figure} This chapter is written for the benefit of a broad array of readers who have different levels of understanding and experience in this area of research. Therefore, for the benefit of readers who are relatively new to machine learning, we start by providing an overview of specific machine learning techniques that are explored in this chapter. The detailed explanation of these techniques would ensure even a beginner in the area of machine learning to grasp these techniques and benefit from the rest of the discussion in the chapter. The core contribution of this chapter is presented in the next four sections. Among these, two sections are dedicated to the discussion of various deep learning and reinforcement learning that has been explored for UAS. In each of these sections, we also discuss the open problems and challenges to motivate researches to explore these areas further. Since the goal of every research endeavor is to ensure the novel algorithms and solutions are effectively deployed on target platforms, in the next two sections, we look at simulation suites and hardware platforms that can help expedite this process. Finally, we conclude the chapter in the final section. The overall chapter organization is depicted in Figure \ref{fig:Organization}. \subsection{Notations} Here, we introduce some standard notations that will be used throughout this chapter. Matrices and vectors will be denoted by boldface upper and lower-case letters, respectively. For a vector $\mathbf{x}$, $x_i$ denotes the i-th element, $\norm{\mathbf{x}}$ indicates the Euclidean norm, $\mathbf{x}^\intercal$ represents its transpose, and $\mathbf{x} \cdot \mathbf{y}$ the Euclidean inner product of $\mathbf{x}$ and $\mathbf{y}$. For a matrix $\mathbf{H}$, $H_{ij}$ will indicate the element at row $i$ and column $j$. The notation $\mathbb{R}$ and $\mathbb{C}$ will indicate the set of real and complex numbers, respectively. The notation $\mathbb{E}_{x\sim p(x)}\left[f(x)\right]$ is used to denote the expected value, or average of the function $f(x)$ where the random variable $x$ is drawn from the distribution $p(x)$. When a probability distribution of a random variable, $x$, is conditioned on a set of parameters, $\boldsymbol{\theta}$, we write $p(x;\boldsymbol{\theta})$ to emphasize the fact that $\boldsymbol{\theta}$ parameterizes the distribution and reserve the typical conditional distribution notation, $p(x|y)$, for the distribution of the random variable $x$ conditioned on the random variable $y$. We use the standard notation for operations on sets where $\cup$ and $\cap$ are the infix operators denoting the union and intersection of two sets, respectively. We use $S_k \subseteq S$ to say that $S_k$ is either a strict subset of or equal to the set $S$ and $x \in S$ to denote that $x$ is an element of the set $S$. $\varnothing$ is used to denote the empty set and $|S|$ represents the cardinality of a set $S$. Lastly, the convolution operator is denoted as $*$. \section{Overview of Machine Learning Techniques} \label{sec:Overview} Machine Learning is a branch of artificial intelligence that is able to learn patterns from raw data and/or learn from observation sampling from the environment enabling computer systems to acquire knowledge. Machine learning is broadly classified into supervised, unsupervised, and reinforcement learning which are further subdivided into subcategories as shown in Fig.\ref{fig:1} (this is a very limited/relevant representation of this vast field). In this section, we elaborate on the key machine learning techniques (which are indicated as gray boxes in the Fig.\ref{fig:1}) prominently used in this chapter to benefit readers in understanding the deep learning approaches for UAS autonomy. \begin{figure}[h] \centering \includegraphics[width=4.6 in]{editor/ML_overview.pdf} \caption{Machine Learning Techniques} \label{fig:1} \end{figure} \subsection{Feedforward Neural Networks} \label{sec:fnn} Feedforward neural networks (FNN) also referred to as multilayer perceptrons are directed layered neural networks with no internal feedback connections. Mathematically, an FNN performs a mapping, i.e., $f:X\longrightarrow Y$. An N-layered FNN is a composite function $y = f(\mathbf{x};\theta)=f_N(f_{N-1}(\cdots f_1(\mathbf{x})))$ mapping input vector $\mathbf{x}\in \mathbb{R}^m$ to a scalar output $y \in \mathbb{R}$. Here, $\theta$ represents the neural network parameters. The number of layers in the neural network dictates the \emph{depth} whereas the number of neurons in the layers defines the \emph{width} of the network. The layers in between the input and output layers for which the output does not show are called \emph{hidden} layers. Figure \ref{fig:fnn} shows a 3-layered FNN accepting a two-dimensional input vector $\mathbf{x}\in \mathbb{R}^2$ approximating it to a scalar output $y \in \mathbb{R}$. \begin{figure}[h] \centering \includegraphics[width=3.5 in]{editor/fnn.pdf} \caption{Three-layered FNN} \label{fig:fnn} \end{figure} In the figure, each node represents a neuron and each link between the nodes $i$ and $j$ are assigned a weight $w_{ij}$. The composite function of the 3-layered FNN is \begin{equation} y = f(\mathbf{x};\theta) = f_3(f_2(f_1(\mathbf{x}))) \label{eq:fnn} \end{equation} In other words, the 3-layer FNN in Fig.\ref{fig:fnn} is the directed acyclic graph equivalent of the composite function in equation (\ref{eq:fnn}). The mapping in the first layer is \begin{equation} \mathbf{h}_1 = f_1(\mathbf{x}) = \mathcal{A}_1(\mathbf{W}_1\mathbf{x} + \mathbf{b}_1) \end{equation} where $\mathcal{A}_1(\circ)$ is the activation function, $\mathbf{b}_1$ is the bias vector, and $\mathbf{W}_1$ represents the weight matrix between the neurons in the first and second layers. Here, the weight matrix $\mathbf{W}_1$ is defined as the link weights between the neurons in the input and second layer \begin{equation} \mathbf{W}_1 = \begin{bmatrix} w_{ab} & w_{db}\\w_{ae} & w_{de} \end{bmatrix}. \end{equation} Similarly, the second layer mapping can be represented as \begin{equation} \mathbf{h}_2 = f_2(\mathbf{h}_1) = \mathcal{A}_2(\mathbf{W}_2\mathbf{h}_1 + \mathbf{b}_2) \end{equation} Finally, the output is \begin{equation} y = f_3(\mathbf{h}_2) = \mathcal{A}_3(\mathbf{W}_3\mathbf{h}_2 + \mathbf{b}_3) \end{equation} The weight matrices in the second and final layers are \begin{equation*} \mathbf{W}_2 = \begin{bmatrix} w_{bc} & w_{ec}\\w_{bf} & w_{ef} \end{bmatrix} \text{ and } \mathbf{W}_3 = \begin{bmatrix} w_{co} & w_{fo} \end{bmatrix}. \end{equation*} The neural network parameters $\theta = \{\mathbf{W}_1,\mathbf{W}_2,\mathbf{W}_3,\mathbf{b}_1,\mathbf{b}_2,\mathbf{b}_3 \}$ comprise the weight matrices and bias vectors across the layers. The objective of the training algorithm is to learn the optimal $\theta^*$ to get the target composite function $f^*$ from the available samples of $\mathbf{x}$. \subsection{Convolutional Neural Networks} \label{sec:cnn} Convolutional networks or convolutional neural networks (CNNs) are a specialized type of feedforward neural network that performs convolution operation in at least one of its layers. The \emph{feature extraction} capability of CNNs mimics the neural activity of the animal visual cortex \cite{CNNcortex}. The visual cortex comprises a complex arrangement of cells that are sensitive to sub-regions of the perceived scene. The convolution operation in CNNs emulates this characteristic of the brain's visual cortex. Consequently, CNNs have been abundantly applied in the field of computer vision \cite{googlenet_inception,alexnet,LeNet5,vgg16,squeezenet,cnn4vision,fastRCNN,CNNface,resnet}. The convolution is an efficient method of feature extraction that reduces the data dimension and consequently reduces the parameters of the network. Hence, CNNs are more efficient and easier to train in contrast to its fully connected feedforward counterpart \ref{sec:fnn}. A typical CNN architecture would often involve convolution, pooling, and output layers. CNNs operate on input tensor $\mathbf{X}\in \mathbb{R}^{W\times H \times D}$ of width $W$, height $H$, and depth $D$ which will be operated on by kernel (filter) $\mathbf{K}\in \mathbb{R}^{w\times h\times D}$ of width $w$, height $h$, and of the same depth as the input tensor to generate an output feature map $\mathbf{M}\in \mathbb{R}^{W_1\times H_1\times D_1}$. The dimension of the feature map is a function of the input as well as kernel dimensions, the number of kernels $N$, stride $S$, and the amount of zero padding $P$. Likewise, the feature map dimensions can be derived as $W_1 = \left(W-w+2P\right)/S + 1, \; H_1 = \left(H-h+2P\right)/S + 1,\; D_1 = N$. Each kernel slice extracts a specific feature from the input region of operation. Kernel refers to the set of weights and biases. The kernel operates on the input slice in a sliding window manner based on the stride. Stride refers to the number of steps with which to slide the kernel along with the input slice. Hence, each depth slice of the input is treated with the same kernel or in other words, shares the same weights and biases - \emph{parameter sharing}. The convolution operation on an input slice $\mathbf{x}$ by a kernel $\mathbf{k}$ is demonstrated in Fig.\ref{fig:cnn_conv}. Here, $b$ represents the bias associated with the kernel slice and $\mathcal{A}\left(\circ\right)$ denotes a non-linear activation function. \begin{figure}[h] \centering \includegraphics[width=4.7 in]{editor/cnn_conv.pdf} \caption{Convolution of input slice with kernel} \label{fig:cnn_conv} \end{figure} The resulting output from the convolution operation is referred to as the \emph{feature map}. Each element of the feature map can be visualized as the output of a neuron which focuses on a small region of the input - \emph{receptive field}. The neural depiction of the convolution interaction is shown in Fig.\ref{fig:neural}. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=2.8 in]{editor/neural.pdf} \caption{Neural representation of convolution} \label{fig:neural} \end{figure} It is evident that each neuron in a layer is connected locally to the neurons in the adjacent layer - \emph{sparse connectivity}. Hence, each neuron is unaffected by variations outside of its receptive field while producing the strongest response for spatially local input pattern. The feature maps are propagated to subsequent layers until it reaches the output layer for a regression or classification task. \emph{Pooling} is a typical operation in CNN to significantly reduce the dimensionality. It operates on a subregion of the input to map it to a single summary statistic depending on the type of pooling operation - max, mean, $L_2$-norm, weighted average, etc. In this way, pooling downsamples its input. A typical pooling dimension is $2\times2$. Larger pooling dimensions might risk losing significant information. Figure \ref{fig:pool} shows max and mean pooling operations. \begin{figure}[h] \centering \includegraphics[width=2.8 in]{editor/pooling.pdf} \caption{Max and mean pooling on input slice with stride 1} \label{fig:pool} \end{figure} A pooling layer of dimensions $W_p\times H_p$ upon operating over an input volume of size $W_1\times H_1\times D_1$ with a stride of $S_1$ will yield an output of volume $W_2 = \left( W_1-W_p\right)/S_1, \;H_2 = \left( H_1-H_p\right)/S_1, \; D_2 = D_1$. Pooling imparts invariance to translation, i.e., if the input to the pooling layer is shifted by a small amount, the pooled output will largely be unaffected \cite{Goodfellow-et-al-2016}. As we have discussed, the three essential characteristics of CNNs that contribute to the statistical efficiency and trainability are parameter sharing, sparse connectivity, and dimensionality reduction. CNNs have demonstrated superior performance in computer vision tasks such as image classification, object detection, semantic scene classification, etc. Consequently, CNNs are increasingly used for UAS imagery and navigation applications \cite{uavapps}. Most notable CNN architectures are LeNet-5 \cite{LeNet5}, AlexNet \cite{alexnet}, VGG-16 \cite{vgg16}, ResNet \cite{resnet}, Inception \cite{googlenet_inception}, and SqueezeNet \cite{squeezenet}. \subsection{Recurrent Neural Networks} \label{sec:rnn} \ac{RNN} \cite{Rumelhart1986} is a type of feedforward neural network specialized to capture temporal dependencies from sequential data. RNN holds internal memory states and recurrent connections between them to capture the sequence history. This characteristic of RNN enables it to exploit the temporal correlation of data rendering them suitable for image captioning, video processing, speech recognition, and natural language processing applications. Unlike CNN and traditional feedforward neural networks, RNN can handle variable-length input sequences with the same model. RNNs operate on input sequence vectors at varying time steps $\mathbf{x}^{t}$ and map it to output sequence vectors $\mathbf{y}^{t}$. The recurrence relation in an RNN parameterized by $\mathbf{\theta}$ can be expressed as \begin{equation} \mathbf{h}^t = \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big) \label{eq:recursive} \end{equation} where $\mathbf{h}^t$ represents the hidden state vector at time $t$. The recurrence relation represents a recursive dynamic system. By this comparison, RNN can be defined as \emph{a recursive dynamic system that is driven by an external signal, i.e, input sequence $\mathbf{x}^{t}$}. The equation (\ref{eq:recursive}) can be unfolded twice as \begin{align} \mathbf{h}^t &= \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{F}\Big(\mathcal{F}\Big(\mathbf{h}^{t-2},\mathbf{x}^{t-1};\mathbf{\theta} \Big),\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{F}\Big(\mathcal{F}\Big(\mathcal{F}\Big(\mathbf{h}^{t-3},\mathbf{x}^{t-2};\mathbf{\theta} \Big),\mathbf{x}^{t-1};\mathbf{\theta} \Big),\mathbf{x}^{t};\mathbf{\theta} \Big) \end{align} The unfolded equations show how RNN processes the whole past sequences $\mathbf{x}^{t}, \mathbf{x}^{t-1},$ $\cdots, \mathbf{x}^{1}$ to produce the current hidden state $\mathbf{h}^{t}$. Another notable inference from the unfolded representation is the \emph{parameter sharing}. Unlike CNN, where the parameters of a spatial locality are shared, in an RNN, the parameters are shared across different positions in time. For this reason, RNN can operate on variable-length sequences allowing the model to learn and generalize well to inputs of varying forms. On the other hand, traditional feedforward network does not share parameters and have a specific parameter per input feature preventing it from generalizing to an input form not seen during training. At the same time, CNN share parameter across a small spatial location but would not generalize to variable-length inputs as well as an RNN. A simple many-to-many RNN architecture which maps multiple input sequences to multiple output sequences is shown in Fig.\ref{fig:mmrnn}. \begin{figure}[h] \centering \includegraphics[width=2 in]{editor/many-to-many rnn.pdf} \caption{Many-to-many RNN architecture} \label{fig:mmrnn} \end{figure} For a simple representation, let us assume the RNN is parameterized by $\mathbf{\theta}$ and $\mathbf{\phi}$ with input-to-hidden, hidden-to-hidden, and hidden-to-output weight matrices being $\mathbf{W}_{ih}, \mathbf{W}_{hh},$ and $\mathbf{W}_{ho}$ respectively. The hidden state at time $t$ can be expressed as \begin{align} \mathbf{h}^t &= \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{A}_h\Big(\mathbf{W}_{hh}\mathbf{h}^{t-1} + \mathbf{W}_{ih}\mathbf{x}^{t} + \mathbf{b}_h\Big). \end{align} where $\mathcal{A}_h(\circ)$ is the activation function of the hidden unit and $\mathbf{b}_h$ is the bias vector. The output at time $t$ can be obtained as a function of the hidden state at time $t$, \begin{align} \mathbf{y}^t &= \mathcal{G}\Big(\mathbf{h}^{t};\mathbf{\phi} \Big)\\ &= \mathcal{A}_o\Big(\mathbf{W}_{ho}\mathbf{h}^t + \mathbf{b}_o\Big) \end{align} where $\mathcal{A}_o(\circ)$ is the activation function of the output unit and $\mathbf{b}_o$ is the bias vector. Other typical RNN architectures are shown in Fig.\ref{fig:allrnn}. \begin{figure}[h] \centering \includegraphics[width=4.8 in]{editor/allrnn.pdf} \caption{RNN architectures. (a) Many-to-one, (b) One-to-many, and (c) One-to-one} \label{fig:allrnn} \end{figure} The RNN architectures discussed so far captures only hidden states from the past. Some applications would also require future states in addition to past. This is accomplished by a bidirectional RNN \cite{biRNN}. In simple words, bidirectional RNN combines an RNN that depends on past states (\emph{i.e.,} from $\mathbf{h}^{1}, \mathbf{h}^{2}, \mathbf{h}^{3}, \cdots, \mathbf{h}^{t}$) with that of an RNN which looks at future states (\emph{i.e.,} from $\mathbf{h}^{t}, \mathbf{h}^{t-1}, \mathbf{h}^{t-2}, \cdots, \mathbf{h}^{1}$). \subsection{Reinforcement Learning} \label{sec:ReinforcementLearning} Reinforcement learning is focused on the idea of a goal-directed agent interacting with an environment based on its observations of the environment \cite{RL_book}. The main goal of reinforcement learning is for the agent to learn how to act i.e., what action to perform in a given environmental state, such that a reward signal is maximized. The agent repeatedly interacts with the environment in a series of discrete time steps by observing the environmental state, choosing, and executing an action. The action chosen by the agent may affect the state of the environment in the next time step. The agent receives a reward signal from the environment and transitions to a new state. The agent has some capability to sense the environmental state; informally the state can be thought of as any information about the environment that is made available to the agent. The agent selects which of the possible actions it can take by following a policy which is a function, in general stochastic, that maps state to actions. A reward signal is used to define the goal of the problem. The reward received by the agent at each time step specifies the immediate desirability of the current state. The objective of the reinforcement learning agent is to maximize the cumulative reward, typically defined by a value function, which defines the long-term goodness of the agent. The agent aims at achieving a goal by continuously interacting with the environment. This interaction which involves taking actions while trading off short and long term rewards renders reinforcement learning a potentially well-suited solution to many autonomous problems \cite{RL_Robotics_Survey_Kober}. The reinforcement learning problem is usually represented mathematically using a finite \ac{MDP}. A finite \ac{MDP} is defined by the following tuple $(S, A, P, R)$, where $S$, $A$, $P$, and $R$ are the state space, action space, transition function, and reward function respectively. Note that in finite \acp{MDP}, the state, action, and reward spaces consist of a finite number of elements. At each time step, the agent observes state $s \in S$, selects and takes action $a \in A$, receives a reward $r$, and transitions to a new state $s' \in S$. The transition function specifies the probability of transitioning from state $s$ to state $s'$ as a consequence of choosing action $a$ as, \begin{equation} P(s,a,s')=Pr(S_{t+1}=s'|S_{t}=s,A_{t}=a). \end{equation} The reward function $R$ defines the expected reward received by the agent after transitioning to state $s'$ from state $s$ after taking action $a$ i.e., \begin{equation} R(s,a)=\mathbb{E}[R_t|S_t=s,A_t=a]. \end{equation} It can be seen that the functions $P$ and $R$ define the dynamics of the \ac{MDP}. A reinforcement learning agent uses a policy to select actions in a given state. The policy, denoted $\pi(s,a)$ provides a probabilistic mapping of states to actions as, \begin{equation} \pi(s,a)=Pr(A_t=a|S_t=s). \end{equation} As discussed earlier, value functions are used to define the long term goodness of the agent. Mathematically, the \emph{state-value function} is denoted as \begin{equation} v_{\pi}(s)=\mathbb{E}_{\pi}\Bigg[\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}|S_t=s\Bigg], \forall s\in S \end{equation} The \emph{state-value function} specifies the expected return, i.e., sum of discounted rewards, if the agent follows policy $\pi$ starting from state $s$. The discount rate $\gamma$, $0 \leq \gamma \leq 1$, is used to weight future rewards progressively less. For example, as $\gamma$ approaches zero the agent is concerned only with immediate rewards whereas when $\gamma$ approaches unity, the agent favors future rewards. The expected discounted return is denoted by $G_t$ i.e., \begin{equation} G_t=\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}. \end{equation} Additionally, the \emph{action-value function} for policy $\pi$ is mathematically represented as \begin{equation} q_{\pi}(s,a)=\mathbb{E}_{\pi}\Bigg[\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}|S_t=s,A_t=a\Bigg] \end{equation} The \emph{action-value function} specifies the expected return if the agent takes action $a$ in state $s$ under policy $\pi$. The \ac{MDP} dynamics of the environment and the notion of value functions have been exploited to develop multiple algorithms. In the case where the \ac{MDP} is fully known, i.e, the agent has knowledge of $P$ and $R$, dynamic programming methods (planning algorithms), such as policy iteration and value iteration can be used to solve the \ac{MDP} for the optimal policy or optimal value function. However, in reinforcement learning, knowledge of the \ac{MDP} dynamics is not usually assumed. Both model-based and model-free approaches exist for solving reinforcement learning problems. In model-based reinforcement learning, the agent attempts to learn a model of the environment directly, by learning $P$ and $R$, and then using the environmental model to plan actions using algorithms similar to policy iteration and value iteration. In model-free reinforcement learning, the agent does not attempt to directly learn a model of the environment but rather attempts to learn an optimal value function or policy. The discussion in this chapter is primarily focused on model-free methods. Generally speaking, model-free reinforcement learning algorithms fall into value function or policy gradient based methods. In value function based methods, the agent attempts to learn an optimal value function, usually action-value, and from which an optimal policy can be found. Value function methods include Monte Carlo, \ac{SARSA}, and Q-Learning. Policy gradient based methods attempt to learn an optimal parameterized policy directly via a gradient of a scalar performance measure with respect to the policy parameter. The REINFORCE algorithm is an example of a policy gradient method \subsubsection*{Monte Carlo} Monte Carlo methods can be utilized to learn value functions and optimal policies by direct experience with the environment. In particular, sequences of states, actions, and rewards can be obtained by the agent interacting with the environment, either directly or in simulation, and the value function can be estimated by averaging the returns beginning from a state-action pair. Monte Carlo methods are typically used for episodic tasks. An episode (sequence of state, action, reward) is generated by the agent following policy $\pi$ in the environment and the value function estimate is updated at the conclusion of each episode. Monte Carlo methods can be used for control i.e., finding the optimal policy, by performing policy improvement. Policy improvement updates the policy such that it is greedy with respect to the current action-value function estimate. The greedy policy for an action-value function is defined such that for each state $s$ the action with the maximum action-value is taken i.e., \begin{equation} \pi(s)\doteq \operatorname*{argmax}_{a \in A} q(s,a). \end{equation} An important consideration for using Monte Carlo methods for value function prediction, and in reinforcement learning in general, is that of maintaining exploration. In order to learn the action-value function, all state-action pairs need to be explored. One way to achieve this is known as exploration whereby each episode begins in a particular state-action pair and all state-action pairs have a non-zero probability of being selected at the start of an episode. Exploration guarantees every state-action pairs will be visited an infinite number of times in the limit of an infinite number of episodes \cite{RL_book}. An alternative approach is to utilize a policy that allows for continued exploration. An example is the $\epsilon$-greedy policy in which most of the time an action (probability of $1-\epsilon$) is selected that maximizes the action-value function while occasionally a random action is chosen with probability $\epsilon$ i.e., \begin{equation} \pi(s,a)=\begin{cases} 1-\epsilon+\frac{\epsilon}{|A|}, & \text{if } a = a^*\\ \frac{\epsilon}{|A|}, & \text{otherwise} \end{cases} \end{equation} There are two approaches to ensure continued exploration: on-policy and off-policy methods. In on-policy methods, the algorithm attempts to evaluate and improve the policy that is being used to select actions in the environment whereas off-policy methods are improving a policy different than the policy used to select actions. In off-policy methods, the agent attempts to learn an optimal policy, called the target policy, by generating actions using another policy that allows for exploration, called the behavior policy. Since the policy learning is from data collected ``off'' the target policy, the methods are called off-policy. Both on-policy and off-policy Monte Carlo control methods exist. \subsubsection*{Temporal Difference Learning} \ac{TD} learning defines another family of value function based reinforcement learning methods. Similar, to Monte Carlo methods, \ac{TD} learns a value function via interaction with the environment. The main difference between \ac{TD} and Monte Carlo is that \ac{TD} updates its estimate of the value function at each time step rather than at the end of the episode. In other words, the value function update is based on the value function estimate of the subsequent state. The idea of updating value function based on the estimated return $(R_{t+1}+\gamma V(S_{t+1}))$ rather than the actual (complete) reward as in Monte Carlo is known as bootstrapping. A simple \ac{TD} update equation for value function is \begin{equation} V(S_t)=V(S_t)+\alpha[R_{t+1}+\gamma V(S_{t+1})-V(S_t)] \end{equation} where $\alpha$ is a step size parameter. In the above equation, it is seen that the \ac{TD} method updates the value function estimate at the next time step. The target value for the \ac{TD} update becomes $R_{t+1}+\gamma V(S_{t+1})$ which is compared to the current value function estimate $(V(S_t))$. The difference between the target and the current estimate is known as the \ac{TD} error i.e., \begin{equation} \delta_{t}\doteq R_{t+1}+\gamma V(S_{t+1})-V(S_t) \end{equation} It can be seen that an advantage of \ac{TD} methods is its ability to update value function predictions at each time step which enables online learning. \textbf{SARSA:} is an example of an on-policy \ac{TD} control algorithm. The \ac{TD} update equation presented above is extended for action-value function prediction yielding the \ac{SARSA} action-value update rule as, \begin{equation} Q(S_t,A_t) \leftarrow Q(S_t,A_t)+\alpha[R_{t+1}+\gamma Q(S_{t+1},A_{t+1})-Q(S_t,A_t)].\label{eq:sarsa} \end{equation} As shown in the equation (\ref{eq:sarsa}), the update is performed after each sequence of $(\cdots,S_t,A_t,R_{t+1},S_{t+1},A_{t+1,\cdots})$ which leads to the name \ac{SARSA}. It is to be noted that the $Q$ estimate is updated based on the sample data generated from the behavior policy $(R_{t+1}+\gamma Q(S_{t+1},A_{t+1}))$. For the control algorithm perspective, a greedy policy like $\epsilon$-greedy is often used. \textbf{Q-Learning:} is an off-policy \ac{TD} control algorithm and its update rule is given below. \begin{equation} Q(S_t,A_t) \leftarrow Q(S_t,A_t)+\alpha[R_{t+1}+\gamma\max_{a}Q(S_{t+1},a)-Q(S_t,A_t)] \label{eq:qlearn} \end{equation} As an off-policy method, the learned action-value function estimate $Q$ is attempting to approximate the optimal action-value function $Q^*$ directly. This can be seen in the update equation (\ref{eq:qlearn}) where the target value is $R_{t+1}+\gamma\max_{a}Q(S_{t+1},a)$ compared to $R_{t+1}+\gamma Q(S_{t+1},A_{t+1})$ of \ac{SARSA}. Unlike in SARSA, the $Q$ value is updated based on the greedy policy for action selection rather than the behavior policy. SARSA does not learn the optimal policy but rather learns the action-values resulting from the $\epsilon$-greedy action selections. However, Q-learning learns the optimal policy resulting from the $\epsilon$-greedy action selections causing the online performance to drop occasionally \cite{RL_book}. The \ac{TD} methods can be further generalized with $n$-step bootstrapping methods which are an intermediate between Monte Carlo and \ac{TD} approaches. The $n$-step methods generalize the \ac{TD} methods discussed earlier by utilizing the next $n$ rewards, states, and actions in the value or action-value function updates. The value function based approaches discussed so far have been presented as tabular methods. The algorithms are tabular because the state-value or action-value function is represented as a table or an array. In many practical problems of interest, the state spaces are very large and it becomes intractable to learn optimal policies using tabular methods due to the time, data, and memory requirements to populate the tables. Additionally with massive state spaces, it is typical that the agent will enter states that are previously unseen requiring the agent to generalize from experiences in similar states. An example of an overwhelmingly large state space occurs when the environmental state is represented as a camera image; for example, an 8-bit, 200x200 pixel RGB image results in $256^{3*200*200}$ possible states. To cope with these challenges, optimal policies can be approximated by utilizing function approximation techniques to represent value functions and policies. The different function approximation techniques used in supervised learning can be applied to reinforcement learning. The specific use of deep neural networks as a means for function approximation is known as \ac{DRL} and is discussed later in this section. When using function approximation techniques, parameterized state-value or action-value functions are used to approximate value functions. A state-value estimate can be denoted as $\hat{v}(s;\boldsymbol{w}) \approx v_{\pi}(s)$ and an action-value estimate as $\hat{q}(s,a;\boldsymbol{w}) \approx q_{\pi}(s,a)$ where $\boldsymbol{w} \in \mathbb{R}^{d}$ is the parameter vector. In principle, any supervised learning method could be used for function approximation. For example, a value function estimate could be computed using techniques ranging from a linear function of the state and weights to nonlinear methods such as an \ac{ANN}. \ac{SGD} and its variants are often used to learn the value of the parameter vectors. \subsubsection*{REINFORCE} In contrast to value function based approaches, policy gradient methods attempt to learn an optimal parameterized policy directly without the requirement of learning the action-value function explicitly. The policy that is learned is defined as \begin{equation} \pi(a|s,\boldsymbol{\theta})=Pr(A_t=a|S_t=s,\boldsymbol{\theta}_t=\boldsymbol{\theta}) \end{equation} which specifies the probability that action $a$ is taken at step $t$ in state $s$ and is parameterized by the vector $\boldsymbol{\theta} \in \mathbb{R}^m$. Policy gradient methods learn the value of the policy parameter based on the gradient of a performance measure $J(\boldsymbol{\theta})$ with respect to the parameter. In the episodic case, the performance measure can be defined in terms of the state value function assuming the episode starts from an initial state $s_0$ as \begin{equation} J(\boldsymbol{\theta})\doteq v_{\pi_{\boldsymbol{\theta}}}(s_0) \end{equation} REINFORCE is an example of a policy gradient algorithm and is derived from the policy gradient theorem \begin{equation} \nabla J(\boldsymbol{\theta}) \propto \sum_{s}\mu(s)\sum_{a}q_{\pi}(s,a)\mathbf{\nabla_{\theta}}\pi(a|s,\boldsymbol{\theta}) \end{equation} where $\mu(s)$ is a distribution over states and the gradients are column vectors with respect to parameter vector $\boldsymbol{\theta}$. The policy gradient theorem provides an expression for the gradient of the performance measure with respect to the parameter vector. From the policy gradient theorem, the following equation is derived for the gradient of $J(\boldsymbol{\theta})$ \begin{equation} \nabla J(\boldsymbol \theta) = \mathbb{E}_{\pi}\Bigg[G_t\frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf \theta)}{\pi(A_t|S_t, \theta)}\Bigg] \end{equation} Using \ac{SGD}, the REINFORCE update rule for the policy parameter vector $\mathbf \theta$ can be derived as \begin{equation} \mathbf{\theta}_{t+1} = \mathbf{\theta}_{t} + \alpha G_t \frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf{\theta}_t)}{\pi(A_t|S_t,\mathbf{\theta}_t)}.\label{eq:rein_up} \end{equation} The update equation (\ref{eq:rein_up}) moves the parameter in a direction that increases the probability of taking action $A_t$ during future visits to the state $S_t$ in proportion to the return $G_t$. This causes the parameter to favor actions that produce the highest return. The normalization prevents choosing actions with a higher probability that may not actually produce the highest return. It is possible to generalize the policy gradient theorem and REINFORCE update rule with the addition of a baseline for comparison to the action values or returns. The baseline can be an arbitrary function or random variable. The motivation behind the use of a baseline is to reduce the variance in policy parameter updates. The update rule for the REINFORCE algorithm with a baseline is given as \begin{equation} \mathbf{\theta}_{t+1} = \mathbf{\theta}_{t} + \alpha (G_t-b(S_t)) \frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf{\theta}_t)}{\pi(A_t|S_t,\mathbf{\theta}_t)} \end{equation} where $b(S_t)$ is the baseline. A common baseline is an estimate of the state-value $\hat{v}(S_t,\mathbf{w})$ parameterized by the weight vector $\mathbf{w} \in \mathbb{R}^l$. The idea of using a state-value function as a baseline can be extended with actor-critic methods. In actor-critic methods, a state-value function, called a critic, is utilized to assess the performance of a policy, called an actor. The critic introduces a bias to the actor's gradient estimates which can substantially reduce variance. The two most recent policy gradient methods are \ac{TRPO} and \ac{PPO}. \ac{TRPO} was introduced in \cite{schulman2015trust} in order to prevent drastic policy changes by introducing an optimization constraint - Kullback-Leibler (KL) divergence. The policy is updated based on a trust-region and the KL constraint ensures that the policy update is not too far away from the original policy. The inclusion of KL constraint in the optimization problem introduces computational and implementation difficulty. However, \ac{PPO} introduced in \cite{schulman2017proximal} mitigates this implementation hurdle by incorporating the constraint term within the objective function. PPO computes the probability ratio between new and old policies. There are two variants of PPO - PPO with KL penalty and PPO with clipped objective. In the first variant, the KL constraint is introduced as a penalty term in the objective function such that it computes a policy update that does not deviate much from the previous policy while minimizing the cost function. In the second variant, the KL divergence is replaced with a clipped objective function such that the advantage function will be clipped if the probability ratio lies outside a range, say $1\pm\phi$. In contrast to TRPO, PPO is simpler to implement and tune. \subsubsection*{Deep Reinforcement Learning} Deep Reinforcement Learning is a popular area of current research that combines techniques from deep learning and reinforcement learning \cite{Arulkumaran_2017}. In particular, deep neural networks are used as function approximators to represent action-value functions and policies used in traditional reinforcement learning algorithms. This is of particular interest for problems that involve large state and action spaces that become intractable to represent using tabular methods or traditional supervised learning function approximators. A key capability of deep learning architectures is the ability to automatically learn representations (features) from raw data. For example, a deep neural network trained for image classification will automatically learn to recognize features such as edges, corners, etc. The use of deep learning enables policies to be learned in an end-to-end fashion, for example, learning control policies directly from raw sensor values. A famous exemplary deep reinforcement learning algorithm is the deep Q-Network that pairs Q-Learning with a deep \ac{CNN} to represent the action-value function \cite{mnih2013playing}. The deep Q-Network was able to achieve super human performance on several Atari games by using only visual information, reward signal, and available actions i.e., no game specific information was given to the agent. The deep Q-Network employs two methods to address the known convergence issues \cite{Tsitsiklis_analysis_td} that can arise when using neural networks to approximate the $Q$ function. These methods are experience replay and the use of a separate target network for $Q$ updates. The experience replay mechanism stores sequences of past experiences, $(s_t,a_t,s_{t+1},r_{t+1})$, over many episodes in replay memory. The past experiences are used in subsequent $Q$ function updates which improve data efficiency, removes correlations between samples, and reduces the variance of updates. The separate target network $\hat Q$ is used for generating targets in the Q-Learning updates. The target network is updated every $C$ time steps as a clone of the current $Q$ network; the use of the target network reduces the chances of oscillations and divergence. A variation of the deep Q-network, known as a Deep Recurrent Q-Network \cite{hausknecht2015deep}, adds a \ac{LSTM} layer to help learn temporal patterns. Additional variations include the double deep Q-network, and \ac{D3QN}. Furthermore, deep reinforcement learning has also been applied to problems with continuous action spaces. In \cite{lillicrap2015continuous}, an actor-critic algorithm known as \ac{DDPG} is presented that is based on the \ac{DPG} algorithm which exploits the idea of experience replay and target networks from the \ac{DQN} as well as batch normalization. \ac{DDPG} is applied successfully to many continuous control problems. In \cite{heess2015memorybased} \ac{RDPG} is introduced as an extension to \ac{DDPG} by the addition of recurrent \ac{LSTM}. The characteristics and capabilities of deep reinforcement learning warrant further investigation for its application to autonomous \ac{UAV} applications. A summary of the different model-free reinforcement learning algorithms is shown in Figure \ref{fig:rloverview}. \begin{figure}[h] \centering \includegraphics[width=4.5 in]{editor/rl_tree.pdf} \caption{Model-free reinforcement learning algorithms} \label{fig:rloverview} \end{figure} \section{Deep Learning for UAS Autonomy} \label{sec:DeepLearning} \emph{Deep learning} has shown great potential in learning complex representations from real environmental data. Its excellent learning capability has shown outstanding results in solving autonomous robotic tasks such as gait analysis, scene perception, navigation, etc., \cite{DL_scene,DL_gait}. The same aspects can be applied for enabling autonomy to the UAS. The various UAS focus areas where deep learning can be applied are scene perception, navigation, obstacle and collision avoidance, swarm operation, and situational awareness. This is also exemplified in the Fig.\ref{fig:dluas}. \begin{figure}[h] \centering \includegraphics[width=4.6in]{editor/dl_classification.pdf} \caption{Deep learning for UAS autonomy discussed in this section.} \label{fig:dluas} \end{figure} Deep learning has been applied as a feature extraction system to learn a high dimensional data representation from the raw sensor output. On the other hand, planning and situational awareness, involve several sub-tasks such as querying or surveying aerial images, navigation control/guidance, collision avoidance, position-dependent control actions, etc. Accordingly, we classify this section into two broad categories: (i) Feature extraction from sensor data and (ii) \ac{UAS} path planning and situational awareness. \subsection{Feature Extraction from Sensor Data} \label{sec:fe} The authors of \cite{Imagery_1} demonstrated the accuracy of a supervised deep learning image classifier to process the monocular images. The classifier predicted outputs of the forest trail direction such as left, right, or straight and claims an accuracy comparable to humans tested on the same image classification task. This scene perception task will require the \ac{MAV} to perceive the trail and react (take actions) to stay on the trail. The authors adopted a typical CNN architecture to accomplish the supervised image classification task. The CNN involved four convolutional layers interlaced with max pooling layers and concluding with two fully connected layers. The output fully connected layer adopted softmax classification that yields the probability of the input image to belong to a particular class. The network was trained using \ac{SGD}. The direction estimates from the CNN were extended to provide navigation control. The navigation control for autonomous trail following was tested on ParrotAR Drone interfaced with a laptop and a standalone quadrotor. The paper reported lower classification accuracy for the real-world testing conditions as opposed to the good quality GoPro images in the training dataset. The AlexNet \cite{alexnet} architecture was employed for palm tree detection and counting in \cite{Imagery_3} from aerial images. The images were collected from the QuickBird satellite. A sliding window technique with a window size of $17\times17$ pixels and a stride of 3 pixels was adopted to collect the image dataset. Only a sample with a palm tree located in the center was classified as positive palm tree detection. Spatial coordinates of the detected palm tree classes are obtained and those corresponding to the same palm tree samples are merged. Those spatial coordinates with a Euclidean distance below a certain threshold are grouped into one coordinate. The remaining coordinates represent the actual coordinates of the detected palm trees. The work reported accurate detection of 96\% palm trees in the study area. Faster R-CNN \cite{fastRCNN} architecture was employed for car detection from low-altitude UAV imagery in \cite{Imagery_4}. Faster R-CNN comprises a region proposal network (RPN) module and a fast R-CNN detector. The RPN module is a deep convolutional architecture that generates region proposals of varying scales and aspect ratios. Region proposals may not necessarily contain the target object. These region proposals are further refined by the fast R-CNN detector. The RPN and fast R-CNN detector modules share their convolutional layers and are jointly trained for object detection. For the car detection task, the VGG-16 model \cite{vgg16} was adopted to form the shared convolutional network. The RPN generates $k$ region proposals in the form of $2k$ box classification and $4k$ box regression outputs. The box regression outputs correspond to the coordinates of the $k$ region proposals while the box classification represents the objectness score, \emph{i.e.,} the probability that each proposal contains the target object (car) or not. The faster R-CNN is trained with a multitask loss function comprising of classification and regression components \iffalse \begin{equation} L\{p_i,t_i\} = \frac{1}{N_{cls}}\sum_i \Big(L_{cls}(p_i,p_i^*) \Big) + \lambda \frac{1}{N_{reg}} \sum_i \Big(p_i^*L_{reg}(t_i,t_i^*) \Big) \label{eq:rcnn} \end{equation} where $N_{cls}$ is the mini-batch size, $N_{reg}$ refers to the number of anchor locations, $p_i$ is the predicated probability of anchor $i$ being an object, $p_i^*$ is the ground truth probability, $t_i$ is the predicted bounding box vector, $t_i^*$ is the actual bounding box vector, and $\lambda$ is the balancing parameter. \fi The car detection imagery was collected with GoPro Hero Black Edition-3 mounted on a DJI Phantom-2 quadcopter. The paper reported car detection accuracy of 94.94\% and demonstrated the robustness of the method to scale, orientation, and illumination variations. For a simple exposition, the faster R-CNN architecture is shown in Fig.\ref{fig:fasterrcnn}. \begin{figure}[h] \centering \includegraphics[width=4.6in]{editor/fasterRCNN.pdf} \caption{Faster R-CNN architecture} \label{fig:fasterrcnn} \end{figure} In \cite{Imagery_5}, the faster R-CNN architecture is applied for maize tassel detection from UAV RGB imagery. Here, different CNN architectures were experimented to form the shared layers between the RPN and fast R-CNN detector modules. The paper reported higher accuracy with ResNet \cite{resnet} in contrast to VGGNet for image resolution of $600\times600$ and UAV altitude of 15 m. The faster R-CNN architecture was compared with You Only Look Once (YOLO v3) \cite{YOLOv3} for car detection from UAV imagery in \cite{Imagery_6}. YOLOv3 is an advancement over its predecessors YOLOv1 \cite{YOLOv1} and YOLOv2 \cite{YOLOv2}. Unlike its predecessors, YOLOv3 can perform multi-label classification of the detected object. Secondly, the bounding box prediction assigns an objectness score of 1 to the predicted box that overlaps the ground truth box more than a predefined threshold. In this way, YOLOv3 assigns one bounding box corresponding to a ground truth object. Additionally, YOLOv3 predicts bounding boxes at 3 different scales. Lastly, it adopts a 53-layered CNN feature extractor named Darknet-53. The study found both YOLOv3 and faster R-CNN performing comparably well in classifying the car object from the image. Although YOLOv3 outperformed faster R-CNN in processing time and sensitivity, \emph{i.e.,} the ability to identify all the cars in the image. In \cite{Acoustic_1}, a \ac{PS-DNN} is used for voice identification of people for emergency rescue missions. The microphone array embedded onboard a Parrot Bebop UAV is used for collecting acoustic data. The PS-DNN is posed as a multitask learning framework to achieve two simultaneous tasks - sound source separation and sound source identification. The PS-DNN for multitask learning is a feedforward neural network with partially shared hidden layers between the two sub-networks. Mel filter bank feature vectors obtained by applying windowed \ac{STFT} on the acoustic signals are fed as input to the PS-DNN. The network was trained with Adam learning optimizer \cite{adam} with a learning rate of $2\times10^{-4}$. The study demonstrated promising accuracy when a partially annotated dataset was employed. Three \ac{ESC} models - CNN, RNN, and \ac{GMM} - were experimented in \cite{Acoustic_2} to detect commercial drones in real noisy environments. The dataset consisted of ordinary real-life noises and sounds from commercial drones such as 3DR Solo, DJI Phantom-3, DJI Phantom-4, and DJI Inspire. The study demonstrated RNN outperforming the CNN and GMM models. The RNN architecture is a bidirectional \ac{LSTM} with 3 layers and 300 LSTM units. An early-stopping strategy is adopted in the training phase such that if the accuracy and loss do not improve after 3 epochs, the training is stopped. RNN exhibited good generalization over unseen data types with an F-score of 0.6984 and a balanced precision and recall while the CNN resulted in false positives. On the other hand, GMM exhibited better detection performance to CNN but low F-scores deterring practical use. Drone identification based on acoustic fingerprints using \ac{CNN}, \ac{RNN}, and \ac{CRNN} is presented in \cite{Acoustic_3}. CRNN \cite{CRNN} exploits the advantages of both CNN and RNN to extract spatio-temporal features. The three different architectures were utilized to extract unique acoustic signatures of the flying drones. The authors collected the drone acoustic dataset by recording the sound produced by the drone's propellers while flying them in an indoor environment. Two types of UAVs from the Parrot family named Bebop and Mambo were utilized in this study. The neural networks classify the audio input as drone and not drone. The work portrayed the CNN outperforming both RNN and CRNN in terms of accuracy, precision, F1-score, and recall while RNN exhibited lesser training time. However, the performance of RNN was very poor on all counts which could be attributed to the short duration audio clips as opposed to long sequential data. CRNN, however, outperformed RNN and exhibited comparable performance to that of CNN with the added benefit of lesser training time. The authors also extended their work to multi-label classification to identify the audio clips as Bebop, Mambo, and Unknown. In this task again, a similar performance trend was observed as with the binary classification. \subsection{UAS Path Planning and Situational Awareness} \label{sec:sa} A CNN-based controller strategy for autonomous indoor UAV navigation is considered in \cite{PSA_1}. The limited precision of \ac{GPS} in the indoor environment and the inability to carry heavy weight sensors render indoor navigation a challenging task. The CNN aims to learn a controller strategy to mimic an expert pilot's navigation decisions. The dataset of seven unique indoor locations was collected with a single forward facing camera onboard a Parrot Bebop Drone. The classifier is trained to return flight commands - Move Left, Move Right, Move Forward, Spin Left, Spin Right, and Stop - by training with manually labeled expert flight commands. The CNN classifier followed the CaffeNet \cite{caffenet} architecture with five convolutional layers and three fully connected layers. The classifier was trained on NVIDIA GTX 970M \ac{GPU} with NVIDIA cuDNN \cite{cudnn}. The trained classifier is tested on a combination of familiar and unseen test environments with different objects, lighting, and geometry. The classifier reported success rates in the range of 60\%-80\% for the test locations implying acceptable robustness in flying autonomously through buildings with different objects and geometry. An interesting approach to UAV navigation is adopted in \cite{PSA_2} where it is taught to fly by crashing. Here, the authors create a crash dataset by crashing the UAV under different scenarios $11500$ times in addition to non-crash data sampled from the same trajectories. In other words, the drone is allowed to learn not to collide into objects by crashing. The collision data is collected by placing the Parrot AR. Drone 2.0 in a random location which is then allowed to takeoff in a random direction and follow a straight line path until the collision. This way the model is allowed to learn if going straight in a specific direction is good or not. The network architecture adopted the AlexNet \cite{alexnet} pre-trained on ImageNet \cite{imagenet}. The pre-trained weights act as initialization for the network weights rather than randomly initialized weights except for the last fully connected layer. The AlexNet architecture involves five convolutional layers and three fully connected layers. The final layer adopts the binary softmax activation function which classifies the navigational actions for the drone. Given an input image, the network decides whether to go left, right or straight. Experimental demonstrations portrayed the efficacy of this supervised learning approach in avoiding glass walls/doors, corridors, and hallways in contrast to an image depth estimation method. A regression CNN for indoor navigation is proposed in \cite{regCNN}. Autonomous indoor navigation is enabled by predicting the distance to collision based on the visual input from the monocular camera onboard. The authors adopt a self-supervised approach to collect indoor flight dataset annotated with distance to the nearest obstacle in three different diverging directions. The automated annotation is enabled with the help of three pairs of infrared and ultrasonic sensors mounted on the UAV pointing towards different directions with respect to the camera's field of view. The regression CNN follows a two-stream architecture with the first two layers of the streams similar to that of the AlexNet CNN. The two streams are fused to concatenate the feature maps from the streams followed by processing with a convolutional layer similar to the third convolutional layer of AlexNet. The two subsequent convolutional layers in the single-stream section also adopt the last two convolutional layers of AlexNet except for the classifier unit in AlexNet which is replaced by a single fully-connected regression layer. The training of the regression CNN was performed with \ac{SGD} with momentum in 30 epochs with a mini-batch size of 128. The implementation and training were performed in MATLAB on a desktop server with Intel Xeon E5-2630 processor, 64GB of RAM, and a GTX1080 \ac{GPU}. The UAV is a Parrot AR-Drone 2.0 with a 720p forward-facing camera onboard. During the experiments, a WiFi connection is established between the UAV and a laptop with an Intel Core i7-6700HQ, 16GB of RAM, and a GTX1070 \ac{GPU} to perform the CNN inference and motion planning. The authors compared the performance of the proposed regression CNN against two previously discussed state-of-the-art schemes \cite{PSA_1} and \cite{PSA_2}. Regression CNN demonstrated continuous navigation time without collision 4.6$\times$ and 1.7$\times$ more compared to \cite{PSA_1} and \cite{PSA_2} respectively. A \ac{MAV}-assisted supervised deep learning approach for ground robot path planning to perform search and rescue operation is proposed in \cite{aerialSearchCNN}. The path planning is executed in three stages. The initial stage involves a human operator flying the MAV in vision-assisted mode to localize a goal location such as a ground robot or a victim. During this initial flight, the camera imagery from the MAV is collected for initial terrain classification. The terrain is mapped to obtain a precise elevation map by monocular 3D reconstruction. The CNN classifier is trained on-the-spot without any \emph{apriori} information. The on-the-spot classifier training involves an operator flying the MAV and labeling a few regions of interest from the live camera imagery. Many training patches are gathered from the few labeled regions by cropping patches that fall on previously labeled areas. The authors of \cite{aerialSearchCNN} also report a spot training time of 10 - 15 min on a CNN. Post training, the patches are classified and projected on to the terrain map. After the goal location is found, the second stage involves an autonomous vision-guided flight to a series of waypoints. The path exploration follows an exhaustive search over the candidate paths in order to effectively reduce the response time. The authors demonstrated the efficacy of their approach via simulation as well as field trials. The MAV for field trials was custom built with onboard \ac{IMU}, quadrotor, downward facing camera, onboard Odroid U3 quad-core computer, and PIXHAWK autopilot software. The ground robot for the experiment was a Bluebotics Absolem which is capable of driving over rough terrain. The field trials with canyon and driveway scenarios demonstrated feasible and efficient path exploration over multiple terrain classes, elevation changes, and untraversable terrain. Another CNN architecture - whereCNN - was proposed in \cite{ground2aerialCNN} to perform ground to aerial geolocalization. The method aims at mapping a street-view query image to its corresponding location on a city-scale aerial-view image. The CNN architecture for cross-view image matching is inspired by Siamese network \cite{siamese} and is comprised of two identical CNNs to learn a shared deep representation across pairs of street and aerial view images. A contrastive loss function is used as the overall loss to train the whereCNN such that the matched pairs are penalized by their squared Euclidean distance and the mismatched pairs by the squared Euclidean distance to a small margin (for the distance that is smaller than the margin). A smaller margin causes the network to be influenced by harder negatives. The dataset is comprised of 78k pairs of Google street view images along with their corresponding aerial view. The whereCNN was trained for 4 days on an NVIDIA Grid K520 \ac{GPU}. The authors demonstrated that the whereCNN trained without sharing parameters between the siamese network entities generalizes reasonably well on unseen data. The method exhibited cross-view matching accuracy of over 22\% for Charleston, San Diego, and San Francisco. In Table \ref{tab:dluav}, we summarize the deep learning techniques that enable autonomous UAV applications. \begin{table*}[!h] \caption{Deep learning for UAV autonomy} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{3.8 cm}|p{2.6 cm}|p{4cm}|} \hline \textbf{Proposed solution} & \textbf{Architecture} & \textbf{Application}\\ \hline Giusti et al. \cite{Imagery_1} &CNN &Outdoor UAV navigation \\ \hline Li et al. \cite{Imagery_3} &AlexNet &Palm tree detection\newline and counting \\ \hline Xu et al. \cite{Imagery_4} &Faster R-CNN &Car detection from\newline low-altitude UAV imagery \\ \hline Liu et al. \cite{Imagery_5} &Faster R-CNN &Maize tassel detection \\ \hline Benjdira et al. \cite{Imagery_6} &Faster R-CNN, \newline YOLOv3 &Car detection from\newline UAV imagery \\ \hline Morito et al. \cite{Acoustic_1} &PS-DNN &Emergency rescue mission \\ \hline Jeon et al. \cite{Acoustic_2} &RNN &Drone identification \\ \hline S. Al-Emadi et al. \cite{Acoustic_3} &CNN, RNN, CRNN &Drone identification \\ \hline D. K. Kim and T. Chen \cite{PSA_1} &CaffeNet &Indoor UAV navigation \\ \hline Gandhi et al. \cite{PSA_2} &AlexNet &Indoor UAV navigation \\ \hline A. Kouris and C. Bouganis \cite{regCNN} &CNN &Indoor UAV navigation \\ \hline Delmerico et al. \cite{aerialSearchCNN} &CNN &UAV-assisted ground \newline robot navigation \\ \hline Lin et al. \cite{ground2aerialCNN} &whereCNN &Ground to aerial geolocalization \\ \hline \end{tabular} \\ \label{tab:dluav} \end{table*} \subsection{Open Problems and Challenges} In this section \ref{sec:DeepLearning}, we discussed the state of the art deep learning techniques for achieving various \ac{UAS} tasks. Specifically, we discussed how deep learning can be leveraged to accomplish feature extraction from sensor data, planning, and situational awareness. However, there exist several open research challenges on the road to achieving complete autonomy of \ac{UAS} tasks. A few of these are enlisted below: \begin{enumerate} \item \emph{Lack of realistic datasets}: The realistic gap between simulated and actual deployed scenarios poses a severe challenge to the deployed deep learning solutions. The diverse scenarios that can be confronted by a UAV in a realistic setting in terms of the varied obstacles in the traversed path, occluded or visually artifacted targets in an object detection task, the effects caused by the sensors on board, etc., are hard to model in a virtual setting. In addition, generating such a realistic dataset from actual UAVs followed by annotating them is a laborious task. \item \emph{Fast deep learning:} Generalizing a supervised deep learning solution to unseen data such as those not represented by the training dataset is an open research challenge. On-the-spot learning implying training of the neural network on-the-fly with limited snapshots of the scenario will prove useful in allowing the model to continue learning new scenarios without forgetting past knowledge. The recently introduced model agnostic meta learning (MAML) \cite{maml} opens door to developing such fast learning techniques. \item \emph{Resource-heavy deep learning techniques:} The computational complexity of deep learning architectures is another significant hurdle that poses severe constraints on the latency, weight, flight time, power consumption, and cost. Denser architectures require powerful computational platforms such as \acp{GPU} that are often above the prebuilt onboard computational capacity of the UAVs requiring auxiliary computational units. Such additional computational platforms increase the cost, weight, flight time, and power consumption of the UAVs. \item \emph{Vulnerability to cyberattacks:} Vulnerability of the deployed deep learning techniques to various security attacks is a cause of serious concern. Spoofing attacks, signal jamming, identity forging, among others can disrupt the intended UAV operation leading to asset loss and damage. Integrating adversarial learning techniques to the application-specific deep learning approaches can be one way to tackle such security threats. \end{enumerate} \section{Reinforcement Learning for UAS Autonomy} \label{sec:RL} Reinforcement learning provides a learning framework allowing agents to act optimally via sequential interactions with its environment. In comparison to supervised or unsupervised learning, reinforcement learning allows the agent to leverage its own experiences derived from environmental interactions. Additionally, reinforcement learning provides a means to specify goals for the agent by means of a reward and penalty scheme. These characteristics of reinforcement learning have led to many research efforts on its application to autonomous \ac{UAS} applications. Reinforcement learning has been primarily applied to lower-level control system tasks that regulate the \ac{UAV}'s velocity, attitude, and navigation as well as other higher-level tasks. \subsection{UAS Control System} Stable control of a \ac{UAS} is a complex task due to nonlinear flight dynamics. Traditional control approaches such as \ac{PID} controllers have been successfully used for \ac{UAS} for attitude and velocity control in stable environments. However, the performance of these controllers can deteriorate in dynamic or harsh environments. The main disadvantages of \ac{PID} control being a constant parameter feedback controller are the control efforts are reactive and the controller does not have apriori knowledge of or the ability to learn about the environment. Techniques from adaptive and robust control can provide insights on designing controllers that can adapt to dynamic environments and operate effectively in the presence of uncertainties. However, a shortcoming of these traditional control techniques is that they typically require a mathematical model of the environmental dynamics and do not explicitly learn from past experiences. Reinforcement learning algorithms present a potential solution to the problem of \ac{UAS} control due to their ability to adapt to unknown environments. There have been many research efforts focusing on the application of reinforcement learning to control systems on a \ac{UAS} \cite{Waslander_2005,BouAmmar_2010,dosSantos_2012,Zhang_2016_MPC,Hwangbo_2017,Lambert_2019,Koch_2019,Bohn_2019}. Much of the research has been focused on quadrotor \acp{UAV}; however, some of the early works involved autonomous helicopters. Many of the reinforcement learning based control systems discussed in this section are for attitude control of the UAV but some of the works consider trajectory tracking and maneuvering as well. Additionally, several algorithmic approaches have been studied including both online and offline methods operating in conjunction with traditional control algorithms as well as \ac{DRL} based approaches. Early works of applying reinforcement learning to \ac{UAV} control problems focused on autonomous helicopters \cite{Bagnell_2001, Kim_nips_2004_autonomous_helicopter, Ng_autonomous_inverted_helicopter_2004, Abbeel_2006}. In these works, data was collected from a human pilot flying a remote control helicopter and the dynamics were learned offline. From the learned dynamics, reinforcement learning algorithms were used to design controllers for various maneuvers including hovering, trajectory tracking, and several advanced maneuvers including inverted hovering, flips, rolls, tunnels, and others from the Academy of Model Aeronautics (AMA) remote control helicopter competition. The first work that used reinforcement learning for quadrotor UAV control did so for altitude control \cite{Waslander_2005}. A model-based reinforcement learning algorithm that rewards accurate tracking and good damping performance was utilized to find an optimal control policy. To benchmark with a traditional approach, an integral sliding mode controller was also implemented. Tests conducted on \ac{STARMAC} quadrotors showed both the reinforcement learning and integral sliding mode controllers to have comparable performance, both significantly exceeding that of traditional linear control strategies. In \cite{BouAmmar_2010}, \ac{FVI} is used to design a velocity control system for a quadrotor UAV. The reinforcement learning FVI controller was compared to a cascaded velocity and attitude controller designed using nonlinear control techniques. The performance of each controller was compared using numerical simulations in MATLAB/SIMULINK. While both controllers produced satisfactory results, the reinforcement learning controller was outperformed in terms of settling time but had a lower percent overshoot. The authors stated that a non-parametric approach to value function estimation, such as the use of a wavelet network, may have resulted in better performance for the reinforcement learning controller. The authors emphasized that an advantage of the reinforcement learning controller is that it does not require any prior mathematical knowledge of quadrotor dynamics to yield satisfactory behavior. In \cite{dosSantos_2012}, a Learning Automata reinforcement learning algorithm called \ac{FALA} was used to learn the optimal parameters of nonlinear controllers for trajectory tracking and attitude control. Traditional approaches such as \ac{PID}, sliding mode, and backstepping controllers were used to benchmark against \ac{FALA}. The performance of the controllers was analyzed in simulation under varying non-linear disturbances including wind and ground effects. The reinforcement learning tuned controllers outperformed the mathematically tuned controllers in terms of tracking errors. In \cite{Zhang_2016_MPC}, an off-policy method, \ac{MPC}, is used for guided policy search for a deep neural network policy for UAV obstacle avoidance. During training, MPC is used to generate control actions for the UAV using knowledge of the full state, this is used along with the state observations to train the policy network in a supervised learning setting. During testing, only the state observations are available to the policy neural network. Simulations were conducted that demonstrated that the proposed approach was able to successfully generalize to new environments In \cite{Hwangbo_2017}, a neural network based policy trained using reinforcement learning is used for trajectory tracking and recovery maneuvers. The authors proposed a new reinforcement learning method that uses deterministic policy optimization using natural gradient descent. Experiments were conducted in both simulation and on a real quadrotor UAV, the Ascending Technologies Hummingbird, that demonstrated the effectiveness of the proposed approach. In simulations, the proposed method outperformed the popular algorithms \ac{TRPO} and \ac{DDPG}. The trajectory tracking test resulted in a small but acceptable steady-state error. Additionally, a recovery test where the quadrotor was manually thrown upside down demonstrated autonomous UAV stabilization. A benefit of the proposed algorithm is low computation time; average time of 6 $\mu$s was reported. In \cite{Lambert_2019}, deep \ac{MBRL} is used for low-level control of a quadrotor UAV. Deep \ac{MBRL} is used to learn a forward dynamics model of the quadrotor and then \ac{MPC} is used as a framework for control. The algorithms were evaluated using a Crazyflie nano quadrotor. Stable hovering for 6 seconds using 3 minutes of training data was achieved emphasizing the ability to generate a functional controller with limited data and without assuming any apriori dynamics model. In \cite{Koch_2019}, multiple neural network based reinforcement learning algorithms are evaluated for attitude control of UAVs. The algorithms that were evaluated include \ac{DDPG}, \ac{TRPO}, and \ac{PPO}. The reinforcement learning algorithms were compared against \ac{PID} control systems for attitude control of UAVs in a simulation environment. The authors also developed an open-source training environment utilizing OpenAI and was evaluated using the Gazebo simulator. The simulations indicated that the agents trained with PPO outperformed a tuned PID controller in terms of the rise time, overshoot, and average tracking error. In \cite{Bohn_2019}, \ac{PPO} is applied for attitude control of fixed-wing UAVs. The \ac{PPO} method was chosen largely due to the success reported in \cite{Koch_2019}. The PPO controller was trained in a simulation environment to control the attitude (pitch, roll) and airspeed of the UAV to the specified setpoints. The results showed that the DRL controller was able to generalize well to environments with turbulence. The advantages of the DRL controller were emphasized in the high turbulence scenarios by it outperforming the PID controller in multiple performance metrics including success percentage, rise time, settling time, and percent overshoot. A DRL robust control algorithm for quadrotor \acp{UAV} is presented in \cite{Wang_2019_DPG_IC}. The algorithm uses \ac{DPG} which is an actor-critic method. Furthermore, similar to classical control design, DPG is augmented with an integral compensator to eliminate steady state errors. Additionally, a two phase learning protocol consisting of an offline and online learning phase is defined for training the model. The offline training is completed using a simplified quadrotor model but the robust generalization capabilities are validated in simulation by changing model parameters and adding disturbances. The capability of the model to learn an improved policy online is demonstrated with faster response time and less overshoot compared to original policy learned offline. \subsection{Navigation and Higher Level Tasks} In this section, the use of reinforcement learning for higher level planning tasks such as navigation, obstacle avoidance, and landing maneuvers is studied. In \cite{Imanberdiyev_2016}, a model-based reinforcement learning algorithm is used as a high level control method for autonomous navigation of quadrotor \acp{UAV} in an unknown environment. A reinforcement learning algorithm called TEXPLORE \cite{texplore_paper} is utilized to perform a targeted exploration of states that are both uncertain in the model and promising for the final policy. This is in contrast to an algorithm such as Q-learning that attempts to exhaustively explore the state space. TEXPLORE uses decision trees and random forests to learn the environmental model. In particular, the decision trees are used to predict the relative state transitions and transition effects. A random forest is used to learn several models of the environment as a single decision tree may learn an inaccurate model. The final model is averaged over the decision trees in the random forest. TEXPLORE then performs its targeted exploration using an algorithm called \ac{UCT}. The authors implement and compare the TEXPLORE algorithm to Q-Learning for a navigation task. The navigation task involves the UAV traveling from a start to an end state under battery constraints i.e., the UAV requires a recharge during the mission in order to make it to the goal. The navigation task is performed in a simulated grid environment implemented using ROS and Gazebo. It is shown that the TEXPLORE algorithm learns effective navigation policies and outperforms the Q-Learning algorithm considerably. In \cite{pham2018autonomous}, a \ac{PID} and Q-Learning algorithm for navigation of a \ac{UAV} in an unknown environment is presented. The problem is modeled as a finite \ac{MDP}. The environment is modeled as a finite set of spheres with the centers forming a grid, the state of the \ac{UAV} is its approximate position i.e. one of the points on the grid, and the actions available to the agent are head North, South, East, or West. In this work, a constant flight altitude is assumed and thus the state space is two dimensional. The objective of the agent is to navigate to a goal position following the shortest path in an unknown environment. A \ac{PID} and Q-Learning algorithm are used in conjunction to navigate the \ac{UAV} to the goal position in the unknown environment. The Q-Learning algorithm and $\epsilon$-greedy policy are used by the agent to select the next action given the current state. The action is then translated to a desired position and is inputted to the \ac{PID} controller which outputs control commands to the \ac{UAV} to complete the desired action. The proposed algorithm was implemented and tested in both simulation and on a Parrot AR Drone 2.0. In both simulation and experimentation, the \ac{UAV} was able to learn the shortest path to the goal after 38 episodes. In \cite{pham2018autonomous_functionapprox}, the authors of \cite{pham2018autonomous} utilize an approximated Q-Learning algorithm that employs function approximation in conjunction with the previously described \ac{PID} and Q-Learning control algorithm for \ac{UAV} navigation tasks. Function approximation is used to handle the large state space and to provide faster convergence time. Fixed sparse representation is used to represent the Q table as a parameter vector. Compared to the work in \cite{pham2018autonomous}, the state representation consists of the relative distance of the \ac{UAV} to the goal and relative distances to obstacles in four directions obtained using on-board radar. Both simulation and real tests demonstrated faster convergence and \ac{UAV} navigation to the goal position. In \cite{Wang_2019_Navigation}, the authors introduce a DRL algorithm, a variant of \ac{RDPG} called Fast-RDPG, for autonomous UAV navigation in large complex environments. The Fast-RDPG differs from RDPG as it uses non-sparse rewards allowing for the agent to learn online and speed up the convergence rate. The reward function design is discussed which includes transition (i.e., progress towards the goal), obstacle proximity penalty, free space, and time step penalty. The Fast-RDPG algorithm outperforms RDPG and DDPG in terms of rate of success, crash, and stray metrics. Generalization of the Fast-RDPG algorithm to environments of different sizes, different target altitudes, and 3D navigation is discussed as well. In \cite{singla_drl_oa_2019}, a Deep Recurrent Q-Network with temporal attention is proposed as a \ac{UAV} controller for obstacle avoidance tasks. The model uses a conditional generative adversarial network to predict a depth map from monocular RGB images. The predicted depth map is then used to select the optimal control action. The temporal attention mechanism is used to weight the importance of a sequence of observations over time which is important for obstacle avoidance tasks. The performance of the proposed approach was compared to Deep Q-Network, \ac{D3QN}, and Deep Recurrent Q-Network without temporal attention algorithms and showed superior performance in simulations. In \cite{Ramos_2018}, a \ac{DRL} algorithm called Deep \ac{DPG} is used to enable an advanced autonomous UAV maneuvering and landing on a moving platform. The authors integrate the Deep \ac{DPG} algorithm into their reinforcement learning simulation framework implemented using Gazebo and \ac{ROS}. The training phase of the proposed approach was conducted in simulation and the testing phases were conducted in both simulation and real flight. The experiments demonstrated the feasibility of the proposed algorithm in completing the autonomous landing task. Additionally, this work showed that agents trained in the simulation are capable of performing effectively in real flights. In \cite{uav_auto_land_drl_polvara_2018}, a \ac{DRL} based approach to perform autonomous landing maneuver is presented. The approach relies on a single downward facing camera as the sole sensor. The landing maneuver is considered as a three phase problem: landmark detection, descent maneuver, and touchdown. A hierarchy of two independent \acp{DQN} is proposed as a solution for the landmark detection and descent maneuver problems. The touchdown maneuver is not considered in the research; however, the authors indicated that it may be solved using a closed loop \ac{PID} controller. A \ac{DQN} is employed for the landmark detection component and a double \ac{DQN} is used for the descent. Additionally, the authors propose a new form of prioritized experience replay called \emph{partitioned buffer replay} to handle sparse rewards. Various simulations were conducted that indicated that the proposed \ac{DRL} approach was capable of performing the landing maneuver and could effectively generalize to new scenarios. In Table \ref{tab:rluav}, we summarize the reinforcement learning techniques that enable autonomous UAV applications. \begin{table*}[!h] \caption{Reinforcement for UAS autonomy} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{3.4 cm}|p{3.4 cm}|p{4 cm}|} \hline \textbf{Proposed Solution} & \textbf{Reinforcement Learning Technique} & \textbf{Application}\\ \hline J. A. Bagnell and J. G. Schneider \cite{Bagnell_2001} &Model-based, PEGASUS &Helicopter control \\ \hline Kim et al. \cite{Kim_nips_2004_autonomous_helicopter} &Model-based, PEGASUS &Helicopter hovering and maneuvers \\ \hline Ng. et. al.\cite{Ng_autonomous_inverted_helicopter_2004} &Model-based, PEGASUS &Helicopter inverted hovering \\ \hline Abbeel et. al. \cite{Abbeel_2006} &Differential Dynamic Programming &Helicopter aerobatic maneuvers \\ \hline S. L. Waslander and G. Hoffmann \cite{Waslander_2005} &Model-based; \newline \ac{LWLR},\newline Policy Iteration &Quadrotor altitude control \\ \hline Bou-Ammar et. al. \cite{BouAmmar_2010} &Fitted Value Iteration &Quadrotor velocity control \\ \hline S. R. B. dos Santoes et. al. \cite{dosSantos_2012} &Finite Action-set Learning Automata &Quadrotor trajectory tracking and attitude control \\ \hline Zhang et. al. \cite{Zhang_2016_MPC} &MPC Guided Policy Search &Quadrotor obstacle avoidance \\ \hline Hwangbo et. al. \cite{Hwangbo_2017} &Neural network policy &Waypoint tracking and recovery tests \\ \hline Lambert et. al. \cite{Lambert_2019} &Deep model-based &Hovering \\ \hline Koch et. al. \cite{Koch_2019} &DDPG, TRPO, PPO &Attitude control \\ \hline Bøhn et. al. \cite{Bohn_2019} &PPO &Attitude control \\ \hline Y. Wang et. al. \cite{Wang_2019_DPG_IC} &DPG &UAV control \\ \hline Imanberdiyev et. al. \cite{Imanberdiyev_2016} &Model-based,TEXPLORE &UAV navigation \\ \hline Pham et. al. \cite{pham2018autonomous} &Q-Learning &UAV navigation \\ \hline Pham et. al. \cite{pham2018autonomous_functionapprox} &Q-Learning with function approximation &UAV navigation \\ \hline C. Wang et. al. \cite{Wang_2019_Navigation} &Fast-RDPG &UAV navigation \\ \hline Singla et. al. \cite{singla_drl_oa_2019} &Deep recurrent Q network with temporal attention &Obstacle avoidance \\ \hline A. Rodriguez-Ramos et. al. \cite{Ramos_2018} &DDPG &Landing on a moving platform \\ \hline Polvara et. al. \cite{uav_auto_land_drl_polvara_2018} &DQN &Autonomous landing \\ \hline \end{tabular} \\ \label{tab:rluav} \end{table*} \subsection{Open Problems and Challenges} There are still several open problems and challenges associated with reinforcement learning based autonomous UAV solutions. Many problems and challenges are associated with the transition from simulation to hardware. This is evidenced by limited results on the performance of reinforcement learning solutions performing high complexity planning tasks in real life tests. A challenge associated with the transition is managing the reality gap between simulation and real life testing. Additionally, as deep reinforcement learning solutions are utilized for autonomy, the integration onto an embedded UAV platform can become challenging due to the computational requirements of the algorithms and the \ac{SWaP} constraints of the UAV. Other challenge areas include developing algorithmic solutions that enable higher degrees of autonomy. For example, more complex tasks and missions may require the UAV to cooperate with other autonomous systems and/or humans via \acp{NUI}. Also, the majority of the published works consider scenarios with a static mission objective in dynamic environments; however, in general, the autonomous agent will need to be able to operate in scenarios where both mission objectives and the environment are dynamic. It is also possible that the mission will consist of multiple objectives that need to be completed simultaneously \section{Simulation Platforms for UAS} \label{sec:Simulation} The ability to accurately simulate \ac{UAS} in realistic operational environments is an invaluable capability. This is largely due to the fact that real hardware-based testing of \ac{UAS} is both a time consuming and expensive process. The potential for injuries and damages or losses are the main challenges associated with hardware-based testing. Additional challenges and constraints include limited battery life and the laws and regulations of outdoor flight. These challenges are exacerbated in the context of deep learning and reinforcement learning based autonomy solutions as they require large amounts of training data and experiences in order to learn effective behaviors and are also often unstable during their training phases. Additionally, it can also be challenging and/or costly to collect ample training data for machine learning based autonomous \ac{UAS} algorithms. Physically and visually realistic \ac{UAS} simulations are potential solutions to several of these challenges. For example, a realistic visual simulation of an operational environment could be used to create a dataset for a deep learning algorithm. Furthermore, simulation provides a means to test \ac{UAS} in scenarios that can be hard to create in real life e.g. failure modes, harsh environmental conditions, etc. Simulation also provides a means for establishing easily repeatable environments for algorithm comparisons and software regression testing. \subsection{Simulation Suites} This section now presents a survey of popular simulation software platforms for \ac{UAS}. Previous surveys conducted in \cite{UAV_flight_controller_simulator_survey, Simulation_Hentati, Mairaj_2019} introduced the majority of available \ac{UAS} simulation platforms for various applications. The discussion in this section focuses primarily on open-source simulators that appear useful for research and development of autonomous \ac{UAS} applications. Gazebo \cite{Gazebo_website, Gazebo_paper} is an open-source robotics simulator capable of simulating multiple robots in both indoor and outdoor environments. This is enabled by its integration with high-performance physics engines, e.g., \ac{ODE}, Bullet, Simbody, and \ac{DART} as well as its ability to model various sensors, noise, and environmental effects. The Gazebo architecture is modular by allowing for worlds and objects to be defined using \ac{SDF} files while enabling sensor and environmental effect modules to be added as plugins. \ac{OGRE} \cite{Ogre_website} is utilized by Gazebo for high fidelity visual rendering of the environment that captures different textures and lighting. Gazebo is also one of the default simulators integrated with the popular robotics middleware package \ac{ROS}. By itself, Gazebo does not provide the capability to simulate \ac{UAV}s; however, there have been multiple works that define the necessary model, sensor, and controller plugins to facilitate \ac{UAV} simulation and is discussed herein. An example of \ac{UAV} simulation using Gazebo is shown in Figure \ref{fig:gazebo}. In \cite{hector_quadrotor_paper,hector_quadrotor_wiki} simulation of quadrotor \acp{UAV} using Gazebo and \ac{ROS} is implemented as an open-source package called hector\_quadrotor. The hector\_quadrotor package provides the geometry, dynamics, and sensor models for quadrotor \acp{UAV}. Sensor models for \ac{IMU}, barometer, ultrasonic sensor, magnetic field, and \ac{GPS} in addition to the default sensor models provided by Gazebo such as LIDAR and cameras. \acp{EKF} and cascaded \ac{PID} controllers are implemented and utilized for state estimation and control respectively. A tutorial example of integrating a LIDAR based \ac{SLAM} algorithm with the simulated \acp{UAV} is included in the package's documentation. RotorS is another open-source \ac{MAV} simulator using Gazebo and \ac{ROS} \cite{RotorS_chapter,rotorS_wiki}. Models of various multirotor \acp{UAV} including the AscTec Hummingbird, AscTec Pelican, and AscTec Firefly are included with the simulator. Default simulator sensors include \ac{IMU}, a generic odometry sensor, and visual inertial sensor. Similar to hector\_quadrotor, RotorS provides a baseline UAV simulation using Gazebo by defining the required UAV, sensor, and controller configuration files and plugins. The RotorS package provides a well documented and functional UAV simulator that a researcher can use for rapid prototyping of new autonomous UAV control algorithms. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=3 in]{editor/figures/gazebo.png} \caption{UAV simulation in Gazebo \cite{px4_gazebo_pic}} \label{fig:gazebo} \end{figure} In \cite{Koch_2019}, a framework called GymFC for tuning \ac{UAV} flight control systems was introduced. The framework integrates the popular reinforcement learning toolkit OpenAI Gym \cite{OpenAI_Gym} and the Gazebo simulator to facilitate research and development of attitude flight control systems using \ac{DRL}. GymFC defines three layers to provide seamless integration of reinforcement learning based \ac{UAV} control algorithms: Digital Twin Layer, Communication Layer, and Environment Interface Layer. The Digital Twin Layer consists of the simulated \ac{UAV} and environment as well as interfaces to the Communication Layer. The Communication Layer is the interface between the Digital Twin and Environment Interface Layer that implements lower level functionality to enable control of the \ac{UAV} and the simulation. The Environment Interface Layer implements the environmental interface defined by the OpenAI Gym API that the reinforcement learning agent interacts with. In the original work \cite{Koch_2019}, the proposed \ac{DRL} based attitude controllers were only evaluated in simulation. The open-source Neuroflight framework \cite{koch2019neuroflight} has since been introduced for deploying neural network based low-level flight control firmware on real \acp{UAV}. Neuroflight utilizes GymFC for initial training and testing of controllers in a simulation environment and then deploys the trained models to the \ac{UAV} platform. Initial tests of Neuroflight have demonstrated stable flight and maneuver execution while the neural network based controller runs on an embedded processor onboard the \ac{UAV}. The Aerostack software framework \cite{Aerostack_Paper, aerostack_git, Simulation_Sanchez-Lopez} defines an architectural design to enable advanced \ac{UAV} autonomy. Additionally, Aerostack has been used for autonomous \ac{UAV} research and development in both simulations (utilizing the RotorS simulator \cite{RotorS_chapter}) and in hardware such as the Parrot AR Drone. Microsoft AirSim \cite{airsim2017fsr, airsim_git} is an open-source simulator for both aerial and ground vehicles. AirSim provides realistic visual rendering of simulated environments using the Unreal Engine, as shown in Figure \ref{fig:airsim}. AirSim was designed as a simulation platform to facilitate research and development of \ac{AI} enabled autonomous ground and aerial vehicles which motivates its use when developing deep learning and reinforcement learning \ac{UAS} solutions. The software is cross platform and can be used on Linux, Windows, and Macintosh operating systems. The AirSim software comes with extensive documentation, tutorials, and \acp{API} for interfacing with vehicles, sensors, and environment for programmatic control and data collection for model training. Recently, AirSim was used as a platform to host a simulation-based drone racing competition called Game of Drones \cite{madaan2020airsim}. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=3 in]{editor/figures/AirSimDemo.png} \caption{UAV simulation in AirSim \cite{airsim_pic}} \label{fig:airsim} \end{figure} A final consideration is that the popular flight control stacks - PX4 and ArduPilot (discussed in detail in section \ref{sec:flightstack}) - can both integrate with Gazebo and AirSim for software-in-the-loop and hardware-in-the-loop simulations. The Gazebo interfaces maintained by PX4 are derived from the RotorS project. \subsection{Open Problems and Challenges} Even with the advances made in the realm of UAS simulations, there are still multiple problems and challenges associated with it. The first problem is typical to any open-source platforms used in different domains - there is no official or industry accepted standard platform. For example, the two most popular open-source flight control stacks, ArduPilot and PX4, both support multiple simulators but there is not a specified official/default simulator common to them. At this time, it appears that both Gazebo or AirSim have the potential for use in autonomous \ac{UAS} research and development. A challenge associated with the Gazebo simulator is that although it is widely used in \ac{UAS} simulation, it technically does not provide native \ac{UAS} simulation support. Works such as \cite{hector_quadrotor_paper, RotorS_chapter} implement the required plugins, configuration, and baseline controllers to enable \ac{UAV} simulation using Gazebo. Additionally, as common with open-source software, there is often limited software maintenance, development support, and documentation of the open-source simulators. An additional challenge associated with \ac{UAS} simulation is that there can be steep learning curves associated with advanced usage and software development. It appears to be straightforward to install and run examples provided by the simulator; however, it may take time to familiarize with simulator configurations, development workflow, and software APIs. For example, a developer may be required to add support for a new \ac{UAV} platform, sensor type, or environment tailored for the research application. This problem could be mitigated to an extent as the use of these platforms become widespread and if there is a uniform standard to add new features that can be made available to the community. An open problem is assessing the reality gap between simulation and real life deployment. This problem will be further studied as research and development of algorithms for autonomous \ac{UAS} continues. Other open problems are associated with the seemingly limited consideration of \ac{UAV} swarm operation, human interaction via \acp{NUI} or ground control stations, and communication systems utilized by the UAS. \section{UAV Hardware for Rapid Prototyping} Rapid \ac{UAS} hardware-based prototyping is an essential step in deploying and validating machine learning solutions. Certain factors such as the unique requirements of the deep learning solution and the cost of \ac{COTS} UAS in commercial market are the driving factors in choosing the custom prototyping route. The requirements of deep learning solutions could be unique to the problem under consideration and consequently the needs would vary. For instance, an object detection task might require a stable flight platform with good quality image sensor. However, a target tracking or acoustic-based search and rescue mission might require maneuverable platform with image sensor and acoustic sensors onboard respectively. UAS prototyping for testing deep learning solutions involve several steps such as choosing the appropriate hardware platform, sensors, computational resources, memory unit, flight controller software, among others which depend on the size, weight, and onboard carrying capacity of the UAS platform. This section will serve as a comprehensive guide in choosing the appropriate UAS platform, flight stack software, computational resources as well as the various challenges incurred in UAS prototyping. \subsection{Classification Choice} \label{sec:uasclass} \acp{UAV} are classified based on their wings, size, landing, etc., as seen in the beginning of the chapter (section \ref{subsec:classUAS}). In this section, however, we will focus on the Fixed-wing and Rotary-wing \acp{UAV}. The various UAV classifications will guide the reader in understanding the nuances of the platforms in terms of its hovering, maneuvering, payload capabilities, among others allowing application-specific selection. Fixed-wing UAV has rigid wings with airfoil allowing it to produce the desired lift and aerodynamics by deflecting the oncoming air. Although they cannot hover at a place and maintain low speed, they support long endurance flights. Further, they require an obstruction-free runway to take-off and land. However, in comparison to rotary-wing, they carry heavier payloads and are energy-efficient owing to their gliding characteristic. The MQ-9 Reaper is an example of a fixed-wing UAV as in Fig. \ref{fig:fixed_wing}. Rotary-wing UAV possesses two or more rotary blades positioned around a fixed mast to achieve the desired aerodynamic thrust. Rotary-wing platforms are capable of hovering tasks, low-altitude flights, and perform \ac{VTOL}. In contrast to fixed-wing, they present flexible maneuverability advantages owing to the rotary blades. Rotary-wing \acp{UAV} are further classified into single-rotor, multi-rotor, and fixed-wing hybrid \cite{chapman_2019}. \begin{figure*}[h] \minipage[b]{0.5\textwidth \centering \includegraphics[width=2.3 in]{editor/figures/reapermq9.JPG} \caption{Fixed Wing \ac{UAV} \cite{airForce_2015} } \label{fig:fixed_wing} \endminipage\hfill \minipage[b]{0.49\textwidth \centering \includegraphics[width=2.3 in]{editor/figures/single-rotor-drone-compressor.jpg} \caption{Single-Rotor \ac{UAV} \cite{shan2018} } \label{fig:single_rotor} \endminipage\hfill \end{figure*} Single-rotor \acp{UAV} rely on a single front rotor to stay airborne. Although, they possess a tail rotor to control the heading as in Fig. \ref{fig:single_rotor}, it does not count towards the rotor count. The required airflow to move forward is generated by the rotor blades. They are also capable of \ac{VTOL} and hovering tasks. Since they rely on a singular rotor to stay elevated, the blades are usually longer. In contrast to multi-rotor \acp{UAV}, they can carry heavier payloads and are energy-efficient owing to lesser power requirements for a single rotor. The energy-efficient operation enables longer flight times when compared to multi-rotor platforms. Therefore, single-rotor platforms might present themselves as beneficial for aerial surveying applications which require carrying heavier payloads and extended flight times. Helicopters are an example of single-rotor \acp{UAV}. \begin{figure*}[h] \minipage[b]{0.5\textwidth \centering \hspace{-1 cm} \includegraphics[width=1.95 in]{editor/figures/muti-rotor-uav.JPG} \caption{Multi-Rotor \ac{UAV}} \label{fig:multi_rotor} \endminipage\hfill \minipage[b]{0.49\textwidth}\hspace{-0.5cm} \centering \includegraphics[width=2.4 in]{editor/figures/fixed-wing-hybrid-uav.jpg} \caption{Fixed Wing Hybrid \ac{UAV} \cite{nasa} } \label{fig:fixed_wing_hybrid} \endminipage\hfill \end{figure*} Multi-rotor \acp{UAV}, on the other hand, uses multiple rotor blades to achieve the desired aerodynamic thrust for lifting and propelling as in Fig. \ref{fig:multi_rotor}. Most common examples of this category are tricopter, quadcopter (quadrotor), hexacopter, and octocopter. Multi-rotor platforms can perform complex maneuvering and hovering tasks but have limited payload capability and flight endurance. They also provide a stable platform for aerial inspection, photography, and precision agriculture applications. Fixed-wing hybrid UAV platforms combine the aerodynamic benefits of fixed-wing and rotary-wing UAV classes (Fig.\ref{fig:fixed_wing_hybrid}). This coupling adds the \ac{VTOL}, hovering, increased flight speed, and long endurance capabilities. Owing to the fairly recent arrival of the hybrid class, there are still very few developmental resources available for this class. The discussion in this section will enable the developer in choosing the appropriate UAV platform tailored to meet the requirements pertinent to their unique machine learning solution. \subsubsection*{Build or Buy} Here, we will ponder upon the pros and cons of buying versus building a UAV. Commercial \acp{UAV} available in the market would serve as an easier and cost-friendly option to rapidly test deep learning solutions. However, specific mission requirements might urge towards building a custom model. Commercial \acp{UAV} are often preprogrammed and tested for stability. Most of them come in a ready-to-fly state requiring minimal setup out of the box. The prebuilt \acp{UAV} offer limited customization and could be difficult to repair and/or replace components. An essential requirement for deep learning solutions is the computational power, however, prebuilt UAV platforms have limited onboard computational resources requiring external processors. A costlier option could be purpose-built commercial \acp{UAV} with custom attachments to fit the mission requirements. UAV prototyping, on the contrary, offers several benefits. Often developers can add custom sensors, batteries, and computational units to a flight-ready UAV platform for rapid deployment and testing. The lift and payload capacity of the UAV judges its flight endurance and stability. Achieving flight stability is guided by several factors such as the right component balance and the ground controller's pilot skills. Building a flight-ready UAV would entail requiring immense electrical and mechanical skills which could be envisioned as a pro as well as a con. The prototyping procedure could be time-consuming while garnering the electro-mechanical skills would be knowledgeable. Another major requirement while building custom prototypes would be the flight controller software needed to control and navigate the \acp{UAV}. To conclude, we have listed a few commercial drones and their specifications in Table \ref{table:drones}. The next subsection sheds light on the flight stack software. \begin{table}[h!] \caption{Commercially available drones} \centering \def\arraystretch{1.4}% \resizebox{\textwidth}{!}{% \begin{tabular}{|l|c|c|c|c|} \hline \multicolumn{1}{|c|}{{\textbf{UAV platform}}} & \textbf{Specifications} & \begin{tabular}[c]{@{}c@{}}\textbf{Onboard/}\\ \textbf{External} \\\textbf{DL Processing}\end{tabular} & \textbf{SDK} & \textbf{Estimated Cost} \\ \hline Ryze Tello EDU \cite{ryze_tello} & \begin{tabular}[c]{@{}c@{}}87 g Weight, 13min Flight, \\ WiFi 802.11n, Range Finder, Barometer LED, Camera\end{tabular} & External via SDK & Tello-Python & \$129.00 \\ \hline DJI Inspire 2 \cite{dji_inspire_2} & \begin{tabular}[c]{@{}c@{}}3.44 kg Weight, 4.25 kg payload, 27 min fight time, \\ 2.4000 GHz-2.4835GHz, 5.725 GHz-5.850GHz, GPS, GLONASS, \\ GALILEO, Camera, Vision systems for obstacle avoidance\end{tabular} & External via SDK & Mobile SDK & \$3,299.00 \\ \hline DJI Matrice 100 \cite{dji_matrice_100} & \begin{tabular}[c]{@{}c@{}}2.355 kg weight, 3.6 kg payload, 13 - 40 min flight time, \\ 5.725-5.825 GHz, 922.7MHz-927.7 MHz, \\ 2.400-2.483 GHz (Lightbridge)\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK, \\ Mobile SDK\end{tabular} & N/A \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 200 Series V2 \cite{dji_matrice_200}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}4.91 kg weight, 1.23 kg payload, 33 min flight time, \\ 2.4000-2.4835 GHz, 5.725-5.850 GHz, \\ Different Payload configurations\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK\\ Payload SDK\\ Mobile SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 300 RTK \cite{dji_matrice_300}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}6.3 kg weight, 2.7 kg payload, 55 min flight time, \\2.4000-2.4835 GHz, 5.725-5.850 GHz, \\ Camera Gimbal, infrared ToF Sensing System, FPV Camera, GPS\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK\\ Payload SDK\\ Mobile SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 600 Pro \cite{dji_matric_600}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}9.5 kg weight, 15.5 kg payload, 16 - 38 min flight time, \\ 920.6 MHz-928 MHz, 5.725 GHz-5.825 GHz, 2.400 GHz-2.483 GHz, \\ Camera Gimbal, Collision avoidance system, GPS, GLONASS\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK, \\ Mobile SDK\end{tabular} & \$5,699.00 \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Mavic 2 \\ Enterprise \cite{dji_mavic_2_enterprise}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}905 g weight, 1100 g payload, 29 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, GLONASS, Visual Camera, \\ Omnidirectional Obstacle Sensing, Speaker, Beacon, Spotlight\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Mavic 2 \\ Enterprise Dual \cite{dji_mavic_2_enterprise}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}899 g weight, 1100 g payload, 29 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, GLONASS, \\Thermal Camera, Visual Camera, Camera, Speaker,\\ Omnidirectional Obstacle Sensing, Beacon, Spotlight\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & Request Quote \\ \hline DJI Mavic 2 Pro \cite{dji_mavic_2} & \begin{tabular}[c]{@{}c@{}}905 g weight, 31 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, \\ GLONASS, Pro Camera, Omnidirectional Obstacle Sensing\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & \$1,599.00 \\ \hline DJI Mavic 2 Zoom \cite{dji_mavic_2} & \begin{tabular}[c]{@{}c@{}}905 g weight, 31 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, \\ GLONASS, Zoom Camera, Omnidirectional Obstacle Sensing\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & \$1,349.00 \\ \hline DJI P4 Multispectral \cite{dji_p4} & \begin{tabular}[c]{@{}c@{}}1487 g weight, 27 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz,\\ GPS, GLONASS, GALILEO, RGB Camera, 5 monochome sensors\end{tabular} & External via SDK & Mobile SDK & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Phantom 4 \\ Pro V2.0 \cite{dji_phantom_pro_v2}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}1375 g weight, 30 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz, \\ GPS, GLONASS, GALILEO, RGB Camera, infrared sensors\end{tabular} & External via SDK & Mobile SDK & \$1,599.00 \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Phantom 4 \\ RTK \cite{dji_phantom_4_rtk}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}1391 g weight, 30 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz, \\ GPS, GLONASS, GALILEO, RGB Camera, infrared sensors\end{tabular} & External via SDK & Mobile SDK & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}Parrot ANAFI \\ ANAFI Thermal \cite{parrot_anafi_thermal}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}315 g weight, 26 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, Barometer, magnetometer,\\ vertical camera, ultra sonar, 6 axis, IMU,3 axis accelerometer, \\3 axis gyroscope, thermal imaging camera, 4k camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & \$1,900.00 \\ \hline Parrot ANAFI USA \cite{parrot_anafi_usa} & \begin{tabular}[c]{@{}c@{}}500 g weight, 32 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, GALILEO, Barometer, \\ magnetometer, vertical camera, ultra sonar, 6 axis, IMU, 4k camera \\ 3 axis accelerometer, 3 axis gyroscope, 32x zoom camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & Coming soon \\ \hline Parrot ANAFI Work \cite{parrot_anafi_work} & \begin{tabular}[c]{@{}c@{}}321 g weight, 25 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, Barometer, magnetometer, \\ vertical camera, ultrasonar, 6 axis, IMU, 3 axis accelerometer,\\ 3 axis gyroscope, thermal imaging camera, 4k camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & \$999.00 \\ \hline \end{tabular}% } \label{table:drones} \end{table} \subsection{Flight Stack} \label{sec:flightstack} Flight stack is the flight controller software that comprises of a set of positional, navigational guidance and control algorithms, interfacing, and communication links that directs the UAV's flight path and maneuverability. A flight stack is typically comprised of firmware, middleware, and interface layers as in Fig. \ref{fig:flight_stack} \cite{caleberg_2019} whereby the middleware supports the communication link to enable command and control (C2) and telemetry data message passing. The software layer performs the interfacing of the firmware via the communication link protocol. Software layer refers to the \ac{GCS} software that performs UAV configuration and monitoring. \begin{figure}[h] \centering \includegraphics[width=4.5 in]{editor/figures/FlightStackDiagram.png} \caption{Flight Stack} \label{fig:flight_stack} \end{figure} \iffalse \subsubsection*{Firmware Layer} The firmware layer in the flight stack is responsible for controlling \acp{UAV} at the hardware level. The firmware Layer handles numerous tasks such as auto-leveling or obstacle avoidance. There are two main firmware layers available: \textit{Ardupilot} and \textit{PX4}. Both of these share many of the same features such as simulation tools, analytical tools, and many of the same flight modes. Below are the main differences between them. Ardupilot is an open-source project designed to give developers and hobbyists alike a fully-featured and reliable tool to create autopilot systems. Ardupilot has a large community surrounding it which makes it a great platform for developers as there are many forums to ask questions on. Ardupilot comes with a substantial documentation which provides in-depth and extensive guidance. Ardupilot has been installed on more than one million vehicles including \acp{UAV}, boats, submarines, and other land based vehicles. The main advantage of Ardupilot when compared to the other firmware layer is its \ac{GPL} License. This also may be considered its disadvantage, depending on the point of view. The \ac{GPL} license can be used in commercial with one requirement, any change made to the \ac{GPL} licensed software must be made available. This means that if anyone makes a change to the Ardupilot code, that change needs to be added back into the main repository which is publicly available. This is great for hobbyists since it encourages innovation to be shared and means that Ardupilot's features are always expanding. This a disadvantage for projects that include proprietary software using Ardupilot as the proprietary software would need to be made publicly available. The other autopilot system for the firmware layer is known as PX4. PX4 is the flight controller that Dronecode promotes. Dronecode is a nonprofit organization that dedicates its time to promoting open-source components and the communities around them. As part of the Linux Foundation, Dronecode and PX4 have a large community around them. The main benefit of PX4 over Ardupilot comes down to its \ac{BSD} License. The \ac{BSD} License allows developers who use this platform to sell their code commercially without needing to make it publicly available. This is why PX4 has become the industry standard over the past few years. Some consider this a disadvantage however because any improvements made to the code-base are not required to be shared and therefore PX4 may lack some features that have been developed but kept closed-source. \subsubsection*{Middleware Layer} The middleware layer can be thought of as the glue that holds the flight stack together. The middleware handles the communication between the layers of the flight stack. The industry standard choice is a protocol known as \ac{MAVLink}. \ac{MAVLink} is another project hosted under Dronecode and the Linux Foundation. Several libraries and language bindings exist for \ac{MAVLink} enabling virtually any application to use the \ac{MAVLink} protocol. This means the \ac{UAS} solution can be written in a number of languages such as C/C++, Python, C\verb|#|, Objective-C, Java, JavaScript/TypeScript, Lua, Swift, Clojure, Go, and Haskell. The MAVLink protocol provides a packet structure that uses a 6 byte header. This small overhead is beneficial to prevent overloading of the wireless link between the ground and the \ac{UAV}. A standard set of messages are also provided which allows applications that are designed to interface with Ardupilot or PX4 can be interoperable \cite{caleberg_2019}. \subsubsection*{Interface/Software Layer} The interface/software layer is the piece of software that utilizes the MAVLink protocol to interface with the firmware layer. There are many interface layers already available. They are broken up into three categories. There is a command line interface layer known as MAVProxy. It lets users control their \ac{UAV} from a terminal such as bash or powershell. There are several \ac{GUI} interface layers. The two most popular are Mission Planner from Ardupilot and QGroundControl from Dronecode. These \acp{GUI} can be used to configure, fly, and view the status of a \ac{UAV}. The last category is an \ac{API} known as Dronekit. Dronkit is a python library that is used to communicate with a \ac{UAV} via the MAVLink protocol \cite{chapman_2019}. \fi There are many open-source flight controller software available today namely; ArduPilot, PX4, Paparazzi, among others. Flight controller software enables autonomous operation capability to specific UAV platforms (airframes). This comprises fault detection and handling, C2 link protocol, battery monitoring, obstacle avoidance, landing, return home features, data logging, among others. The fault detection and handling support features such as landing when missing C2 link, return to home when missing C2 link, automatic parachute release, battery voltage warning, geofence, land/return to home when battery low, safety check for sensor error, etc. Some of the C2 link protocols are MavLink, UAVTalk, XBUS, XBee, FrSky, HoTT, \ac{PPM}, and Lightweight TeleMetry (LTM). ArduPilot is an open-source flight controller software released under GNU General Public License (GPL) which supports a wide range of vehicles including fixed-wing UAV, multi-rotor UAV, single-rotor UAV, boats, and submarines \cite{ardu}. It can be run on a Linux-based operating system (OS) allowing support on single-board computers to full PC systems. ArduPilot has a desktop \ac{GCS} software for mission planning and calibration for Linux, Windows, and Macintosh OS. It also supports MAVLink, FrSky, and LTM communication protocols. ArduPilot additionally supports the usage of multiple radio control receivers for redundancy, failover, and/or handoffs. PX4 flight controller \cite{px4} from DroneCode collaborative project \cite{dronecode} is released under \ac{BSD} license and supports both fixed-wing and multi-rotor airframes. PX4 enables operation with QGroundControl GCS software from where the UAV can be configured as well as monitored. Both ArduPilot and PX4 supports satellite waypoint navigation and satellite position hold. ArduPilot and PX4 additionally support stereovision navigation function and follow me autonomous navigation features respectively. Paparazzi flight controller supports fixed-wing, flapping-wing, hybrid, and multi-rotor airframes and is released for public use under GNU GPL \cite{paparazzi}. The GCS software of Paparazzi enables UAV configuration, monitoring, and custom flight plan configuration for navigational control and guidance. The supported C2 link protocols are MavLink, XBee, SBus, and \ac{PPM}. Paparazzi supports all autonomous navigation features offered by ArduPilot and PX4 in addition to automatic takeoff and landing. Several other open-source flight controller software worth mentioning are OpenPilot \cite{openpilot}, LibrePilot \cite{librepilot}, BetaFlight \cite{beta}, dRonin \cite{dronin}, and INAV \cite{inav}. \subsection{Computational Unit} \iffalse A \ac{UAS} that takes advantage of deep learning ultimately needs computational resources to house and execute the trained model. \acp{UAV} have two main options as to where this can take place: an on-board computer or a \ac{GCS}. There are many parts that make up a \ac{UAV}. One of these parts is known as the flight controller. The purpose of the flight controller is to control the motors of the \ac{UAV} in accordance to any input that it is given. This input can be in the form of user input from a wireless remote or from some other origin such as a deep learning algorithm. The problem with flight controllers in the context of deep and reinforcement learning is that flight controllers generally have low computational resources. This is simply because manual flight of a \ac{UAV} requires minimal computation. In order to overcome this challenge a separate computer, known as a companion computer, can be added to a \ac{UAV}. Companion computers are more powerful than their flight controller counterparts which means they can be used to execute a trained model and then send the decision to the flight controller as input. The advantage to executing the model on board the ]ac{UAV} is that the \ac{UAV} can react in real time. The companion computer can be connected to any sensors on the \ac{UAV} and interpret this input to make a decision without needing to off-load all of the sensor data. Having the companion computer on the \ac{UAV} means that if the communication between the \ac{UAV} and the \ac{GCS} is severed, the \ac{UAV} will continue to execute the flight plan or the trained model. The disadvantage to the companion computer approach is the added weight it brings to the \ac{UAV}. Adding more weight will decrease the flight time. This may not be important in the development stage but could be of the utmost importance in a production ready \ac{UAS}. It is also uncommon to find a commercial \ac{UAV} that has a built in companion computer or even one that has the functionality to add one. An alternative to having a companion computer on the \ac{UAV} is to utilize a \ac{GCS}. A \ac{GCS} is a computer or mobile device that is on the ground and communicates with the \ac{UAV}. Many commercial \acp{UAV} use \acp{GCS} to display video streams from cameras on-board the \ac{UAV}. But \acp{GCS} can also be used to execute a trained model. The advantage to using a \ac{GCS} as the computation hub is that it can have much more power in terms of processing. It also will not add weight to the \ac{UAV} which can increase the flight time. Using a \ac{GCS} allows for many more commercially available \acp{UAV} to be used as many of them offer \acp{API} to read data from its sensors and control the \ac{UAV} pragmatically from the ground. The disadvantage to this option is that there needs to be a reliable low latency wireless link between the \ac{UAV} and \ac{GCS} in order to offload data from the \ac{UAV} to the \ac{GCS}. If this link is degraded or disconnected during flight, there needs to be a fallback mode in order to make sure the \ac{UAS} operates safely. \fi The computational resources on the UAV is a primary concern when it comes to deploying deep learning solutions. The payload capacity of the UAV and the power consumption of the processors are the two major determinants for onboard UAV processor selection. Further, given two processor platforms of comparable weight, an essential performance metric for selection could be the ratio of the inference speed of the deep learning solution to the power consumption of the processor. Additional metrics for selection could be the memory space and volume of the processors. There are several computational platforms such as Raspberry Pi 4 Model B, Odroid XU4, Jetson Tegra K1 , SnapDragon flight board, Jetson TX1, among others with on-chip \acp{CPU} and \acp{GPU}. Table \ref{tab:comp} shows a comparison of these platforms in terms of various metrics such as memory, \ac{CPU}, \ac{CPU} speed, \ac{GPU}, \ac{GPU} performance, and dimensions. Raspberry Pi 4 Model B (Pi 4B) is a small, low-cost 1.5GHz 64-bit ARM Cortex-A72 \ac{CPU}-based hardware platform with multiple \ac{RAM} options developed for educational purpose. The Pi is also equipped with a Broadcom VideoCore VI \ac{GPU}. However, the Pi 4 model B has a very high power draw in contrast to its predecessors. Odroid XU4 is a developmental platform that is based on Samsung Exynos 5422 Octa-core \ac{CPU} and ARM Mali-T628 6 Core \ac{GPU}. The XU4 consists of two sockets with 1.4GHz ARM Cortex-A7 and 2GHz ARM Cortex-A15 \acp{CPU}. The Mali-T628 supports OpenGL ES 3.1/2.0/1.1 \cite{opengl} and OpenCL 1.2 \cite{opencl} full profile. Jetson Tegra K1 (TK1) is a developmental kit from NVIDIA comprising of Kepler \ac{GPU} with 192 CUDA cores and 4-Plus-1 quad-core ARM Cortex-A15 \ac{CPU}. The TK1 has a very low power footprint while being capable of 300 Giga\ac{FLOPS} of 32-bit floating-point computations. The Jetson TX1 on the other hand hosts an NVIDIA Maxwell 256 CUDA core GPU and quad-core ARM Cortex-A57 CPU. The power draw for a typical CUDA load is in the range of 8-10W. In contrast to TK1, the TX1 comes at a much lower form factor of 50mm $\times$ 80mm. The Snapdragon flight board based on Snapdragon 801 processor was introduced by Qualcomm for autonomous vehicle platforms. The board comes with a 2.26GHz Qualcomm Krait quad-core \ac{CPU} and Qualcomm Adreno 330 \ac{GPU} with nearly 148 Giga\ac{FLOPS} and 4GB \ac{RAM}. In contrast to TX1 and TK1, the Snapdragon flight board comes at a smaller form factor of 58mm $\times$ 40mm. Such a smaller form factor (nearly half the size of a credit card) and lightweight ($<$13g) would serve as an ideal payload option for \acp{UAV}. \begin{table*}[!h] \caption{Computational Platforms for \ac{UAV}} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{1.6cm}|p{2.4cm}|p{3cm}|p{2.1cm}|p{2cm}|} \hline \textbf{Platforms} & \textbf{CPU} & \textbf{GPU} & \textbf{Dimensions} & \textbf{Memory}\\ \hline \textbf{Pi 4B} &ARM Cortex-A72 \newline Speed: 1.5GHz &Broadcom VideoCore VI \newline 32Giga\ac{FLOPS} &85mm$\times$56mm &RAM Options: 2GB, 4GB, 8GB\\ \hline \textbf{Odroid XU4} &ARM Cortex-A7\newline Speed: 1.4GHz \newline ARM Cortex-A15\newline Speed: 2GHz &ARM Mali-T628\newline 102.4Giga\ac{FLOPS}&83mm$\times$59mm &2GB RAM \newline eMMC5.0 HS400 Flash\\ \hline \textbf{Jetson TK1} &ARM Cortex-A15 \newline Speed: 2.3GHz &Kepler 192 CUDA core\newline 300Giga\ac{FLOPS} &127mm$\times$127mm &2GB RAM \newline 16GB Flash\\ \hline \textbf{Jetson TX1} &ARM Cortex-A57\newline Speed: 2GHz &Maxwell 256 CUDA core\newline 1Tera\ac{FLOPS} &50mm$\times$87mm &4GB RAM \newline 16GB Flash\\ \hline \textbf{Snapdragon Flight} &Qualcomm Krait 400 \newline Speed: 2.26GHz &Qualcomm Adreno 330\newline 148Giga\ac{FLOPS}&58mm$\times$40mm &2GB \ac{RAM}\newline 32GB Flash\\ \hline \end{tabular} \\ \label{tab:comp} \end{table*} Here, we briefly discussed a few computational platforms that could potentially enable deep learning solutions on \ac{UAV} platforms and contrasted them on the basis of their physical and performance specifications. Next, we will discuss the UAS safety and regulations enforced to prevent risk and/or injury to people and property. \subsection{UAS Safety and Regulations} \subsubsection*{Safety} \label{subsec:safety} \acp{UAV} have become increasingly popular recently for a diverse array of applications including but not limited to personal hobby, photography, aerial survey, precision agriculture, power-line inspection, entertainment, tactical surveillance, border security, etc. \ac{FAA} estimates an even increased adoption of \acp{UAV} in the coming years with an estimate of nearly 3.5 Million in 2021 \cite{faaest}. The advent of \acp{UAV} have posed significant safety and security challenges. Safety encompasses physical risks posed to people and infrastructure as well as UAV cyber-security risks. \ac{FAA} has reported over 4889 incidents causing serious harm to people and infrastructure between 2014 and 2017 \cite{droneincident}. UAV risk factors such as obstacle collision, human factor, rogue \acp{UAV}, untimely battery, and sensor errors, etc., must be carefully assessed prior to any \ac{UAV} missions. Such risk assessment becomes increasingly essential when opting for self-designed \acp{UAV} as opposed to commercial drones. As discussed in section \ref{sec:flightstack}, most of the commercial drones incorporate general safety measures as part of the flight controller software such as obstacle avoidance, return home or land when battery low or sensor error, geofence, among others. Hence, strict \ac{UAV} safety assessment must be conducted in a studied and regulatory manner to alleviate risks to the mission as well as people and infrastructure. \subsubsection*{Regulations} In the United States, \ac{FAA} is the regulatory body that enforces aviation rules for air traffic control. Commercial as well as hobbyist use of \acp{UAV} must abide by the regulations enforced by \ac{FAA} as detailed in \cite{faadronezone}. The rules and regulations are enforced based on weight, coverage distance, application, speed, and flight altitude. The regulations restrict operating \acp{UAV} over/near people, in certain airspaces (airports, military facilities, or no-fly zones), and non-line-of-sight operation to avoid accidents and injuries. Commercial \ac{UAV} operation requires the pilots to get licenses as well as are restricted to operate during daylight hours. Recreational flying involves similar rules such as registering the \ac{UAV}, line-of-sight operation, daylight operation, drone altitude not more than 400 feet from the ground, restricted from operating near manned aircraft, people, automobiles, and mental as well as physical alertness during drone operation. \section{Conclusion} \label{sec:Conclusion} This chapter presented how the modern era of machine learning can overcome challenges and accelerate the realization of truly autonomous \ac{UAS}. We begin by presenting a tutorial study of the basic deep learning and reinforcement learning techniques to refine the reader's perception and equip them for further research in this realm. Next, the recent advances in deep learning and reinforcement learning techniques as applied to various autonomous \ac{UAV} tasks were reviewed in depth. The inherent challenges and open problems pertaining to the application of machine learning techniques for autonomous \ac{UAS} tasks were clearly stated to open doors for future research. Additionally, to bridge the gap between simulations and hardware implementations, we present a detailed account of the various simulation suites, \ac{UAV} platforms, flight stacks, and regulatory standards. The various challenges and factors to consider while prototyping \ac{UAV} for machine learning solutions were also discussed. Furthermore, this chapter will serve as a comprehensive handbook to pave a clear roadmap for future research and development in pursuing autonomous \ac{UAS} solutions. \bibliographystyle{spmpsci} \part{Part Title} \noindent Use the template \emph{part.tex} together with the document class SVMono (monograph-type books) or SVMult (edited books) to style your part title page and, if desired, a short introductory text (maximum one page) on its verso page. \end{partbacktext} \section*{Contents} \medskip\contentsline {section}{\numberline {{\bf 1}}{\bf Introduction}}{{\bf 2}}{section.0.1} \medskip\contentsline {section}{\numberline {{\bf 2}}{\bf Step-by-Step Instructions}}{{\bf 2}}{section.0.2} \contentsline {subsection}{\numberline {2.1}Initializing the Class}{3}{subsection.0.2.1} \contentsline {subsection}{\numberline {2.2}Required Packages}{3}{subsection.0.2.2} \contentsline {subsection}{\numberline {2.3}The Contribution Header}{4}{subsection.0.2.3} \contentsline {subsection}{\numberline {2.4}Fine-Tuning Your Text}{4}{subsection.0.2.4} \contentsline {subsection}{\numberline {2.5}Fine-Tuning Mathematics}{7}{subsection.0.2.5} \contentsline {subsection}{\numberline {2.6}Figures, Tables and Their Captions}{9}{subsection.0.2.6} \contentsline {subsection}{\numberline {2.6}Figures, Tables and Their Captions}{9}{subsection.0.2.6} \contentsline {subsection}{\numberline {2.7}Special Elements}{10}{subsection.0.2.7} \contentsline {subsection}{\numberline {2.8}References}{11}{subsection.0.2.8} \contentsline {subsection}{\numberline {2.9}Index}{12}{subsection.0.2.9} \medskip\contentsline {section}{\numberline {{\bf 3}}{\bf Submitting your Manuscript}}{{\bf 12}}{section.0.3} \medskip\contentsline {section}{{\bf Further Reading}}{{\bf 12}}{section*.3} \medskip\contentsline {section}{{\bf Subject Index}}{{\bf 13}}{chapter*.4} \clearpage \parindent=0pt% \parskip=0.6em% \begin{sloppy} \section{ Introduction}\label{sec:1} The documentation in the Springer Nature {\sc SVMult} tool package is not intended to be a general introduction to \LaTeX$2_\varepsilon$ or \TeX. For this we refer you to [1--3] in the section ``Further Reading''. Instead, the Springer Nature {\sc SVMult} tool package has been set up for those who are familiar with the basics of \LaTeX. The {\sc SVMult} document class and its special features were designed to facilitate the preparation of scientific monographs for Springer Nature according to Springer Nature style requirements. If in this tool package we refer to standard tools or packages that are not in­stalled on your system, please consult the {\it Comprehensive \TeX Archive Network} (CTAN) at [4--6] in the section ``Further Reading''. The components of the {\sc SVMult} tool package are: \begin{itemize}\leftskip15pt \item The {\it Springer Nature \LaTeX~class} {\tt svmult.cls} (major version 5) and BiBTeX styles {\tt spmpsci.bst, spphys.bst, spbasic.bst} as well as the {\it template} with preset class options, packages and coding examples; {\it Tip}: Copy these files to your working directory, run \LaTeX2$_\varepsilon$ and produce your own example *.dvi or *.pdf file; rename the template file as you see fit and use it for your own input. \item {\it Instructions} with style and coding instructions {\it specific} to {\it contributed books}; {\it Tip}: Follow these instructions to set up the files, to typeset the text and to obtain a consistent formal style; use these pages as checklists before finally submitting the manuscript or print data. \item The {\it Reference Guide} describing the {\sc SVMult} features independent of any specific style requirements. {\it Tip}: Use it as a reference if you need to alter or enhance the default settings of the {\sc SVMult} document class and the templates. \end{itemize} For {\it volume editors only} the {\sc SVMult} tool package is enhanced by \begin{itemize}\leftskip15pt \item the {\it editor instructions} for compiling multiple contributions into a single book. \end{itemize} \section{ Step-by-Step Instructions} The following sections give you detailed instructions on how to set up your files and meet Springer Nature specific style and layout requirements. Please try to adhere to these standards right from the start and use them as a checklist before submitting the manuscript or print data. \subsection{Initializing the Class} \hspace*{29pc}\hbox{{\it Tip}:} \hspace*{29pc}\hbox{Use the pre-set} \hspace*{29pc}\hbox{template} \hspace*{29pc}\hbox{{\it author.tex}} \vspace*{-4.5pc} To format a {\it document for a contributed book} enter \cprotect\boxtext{\verb|\documentclass{svmult}|} at the beginning of your root file. This will set the text area to a \verb|\textwidth| of 117~mm or 27-3/4 pi pi and a \verb|\textheight| of 191~mm or 45-1/6 pi plus a \verb|\headsep| of 12~pt (space between the running head and text). {\it N.B.} Trim size (physical paper size) is $155 \times 235$\,mm or $61/8 \times 91/4$\,in. Please refer to Sect. 2.6 for ``overwide'' fioating objects. For a description of all possible class options provided by {\sc SVMult} see the ``{\sc SVMult} Class Options'' section in the enclosed {\it Reference Guide}. \subsection{Required Packages} The following selection in the past has proved to be essential in preparing a fully formatted (i.e. ready-to-print) manuscript. Invoke the required packages with the command \cprotect\boxtext{\verb|\usepackage{}|} \begin{tabular}{p{7.5pc}@{\qquad}p{18.5pc}} {\tt newtxtext.sty} and {\tt newtxmath.sty} & Supports roman text font provided by a Times clone, sans serif based on a Helvetica clone, typewriter faces, plus math symbol fonts whose math italic letters are from a Times Italic clone\\ {\tt graphicx.sty} & is a powerful tool for including, rotating, scaling and sizing graphics files (preferably *.eps files) \\ {\tt makeidx.sty} & provides and interprets the command \verb|\printindex| which ``prints'' the index file *.ind (compiled by an index processor) on a chosen page \\ {\tt multicol.sty} & balances out the columns on the last page of, for exam­ple, your subject index \\ {\tt footmisc.sty} & together with style option {\tt [bottom]} places all footnotes at the bottom of the page \end{tabular} For a description of other useful packages and {\sc SVMult} class options, special commands and environments tested with the {\sc SVMult} document class see the {\it Reference Guide}. \subsection{The Contribution Header} To format the header of your {\it contribution} enter \vspace*{-6pt} \cprotect\boxtext{\begin{tabular}{l} \verb|\title*{Title of Contribution}|\\ \verb|\author{Name of Author}|\\ \verb|\institute{Name of Author \at Name, Address of Institute, \email{[email protected]}}|\\ \verb|\maketitle| \end{tabular}} immediately after the \verb|\begin{document}| command. Normally the running heads are produced automatically by the \verb|\maketitle| command using the contents of \verb|\title| (for right hand or recto pages) and \verb|\author| (on left hand or verso pages). If the result is too long for the page header (running head) the class will produce an error message and you will be asked to supply a shorter version. This is done using the syntax \vspace*{-6pt} \cprotect\boxtext{\begin{tabular}{l} \verb|\titlerunning{|$\langle${\it text}$\rangle$\verb|}|\\ \verb|\authorrunning{|$\langle${\it first author} et al.$\rangle$\verb|}| \end{tabular}} These commands must be entered before \verb|\maketitle|. For a more detailed description of the relevant commands, see the global {\it Ref­erence Guide.} \enlargethispage{16pt}\vspace*{-6pt} \subsection{Fine-Tuning Your Text} \looseness-1 As a general rule, text, formulae, figures, and tables are typed using the standard \LaTeX2$_\varepsilon$ commands. The standard sectioning commands are also used. {\spaceskip .25em plus .1em minus .1em Nevertheless, in the {\sc SVMult} document class we have defined and en­hanced a few text mode commands (e.g. \verb|\dedication|, \verb|\preface|, \verb|\abstract*|; \verb|description environment|,...). Please refer to the {\it Reference Guide.}} Always use the \LaTeX~commands \verb|\label| and \verb|\ref| for cross-referencing to chap­ters, sections, theorems, equations, figures, and tables. In contrast to any hard-coded references these soft-coded cross-references can automatically be con­verted to hyperlinks for any possible electronic version of your book. {\it Abstracts.} Each chapter or contribution should be preceded by an abstract (10--15 lines long) that summarizes the content. The abstract will appear online at {\tt www.SpringerLink.com} and be available with unrestricted access. This allows unregistered users to read the abstract as a teaser for the complete chapter. As a general rule the abstracts will not appear in the printed version of the book unless it is the style of the particular volume or that of the series to which the book belongs. Please use the Springer Nature command \verb|\abstract*| for typesetting the text of the online abstracts and include them with the source files of the manuscript. Use the plain \verb|\abstract| command if the abstract is also to appear in the printed version of the book. {\it Headings.} In English texts all words of a heading have a leading capital letter except for articles (a, an, the), conjunctions and prepositions of up to four letters (e.g. on, of, at, to, by, and, or, but, from, with). If a heading needs more than one line please break the line at an appropriate place and position the binding word (conjunction, preposition, article, ...) at the beginning of the new line. It looks nicer if every heading is followed by at least a short passage of text in order to avoid simply listing headings of different levels. If the running heads at the tops of the pages do not fit into the space allocated to them, a shorter version has to be specified with the commands \verb|\authorrunning{}| and \verb|\titlerunning{}|. If a different version of your contribution title is to be used for the {\it table of contents} use the command \verb|\toctitle{|$\langle${\it title for table of contents}$\rangle$\verb|}|. {\it Emphasizing Text.} Use the command \verb|\emph{}| to emphasize (i.e. italicize) a selection of {\it individual} words. \looseness-1 {\it Theorem-Like Environments.} For individual text structures such as theorems, definitions, examples, etc., the {\sc SVMult} document class provides numerous predefined environments ({\it numbered} as well as {\it unnumbered}) which conform with the specific Springer Nature layout requirements. Sections 2.7 and 3.3 of the {\it Reference Guide} give a complete list of the built-in environments as well as a description of the {\sc SVMult} mechanism for defining your own environments. {\it Special Expressions.} If a special, e.g. non-English, expression is used repeatedly, please spell it consistently throughout the book. Latin terms, e.g. ``in situ'', should not be italicized. {\it List of Symbols.} Please add a list of symbols or short definitions or explanations. (Even if this is not to be included in the final book, it's a very useful tool for the copyeditor who may work on your manuscript.) {\it Abbreviations.} Please set abbreviations such as ``e.g.'', ``cf.'', ``et al.'' and ``i.e.'' upright. Only abbreviations that can be found in a dictionary may be used without definition. Particular terminology that is often abbreviated should be defined on first usage. {\it Dashes.} In Springer Nature books we differentiate between three different types of dashes, which have to be coded individually: \begin{enumerate}\leftskip15pt \item[1.] To produce a simple hyphen, used to connect or separate dependent parts of a word such as prefixes, or in compound adjectives, please enter a single keyboard hyphen without any space on either side (-). \item[2.] To produce an en-dash, enter two single hyphens with no space on either side to stand in place of ``to'' in ranges, as in ``Fig. 3a--c'' or ``... in the range 10--20 eV'', or to connect two names or words that are independent of each other, such as ``... the electron-photon interaction''. However, double-barrelled names like Levi-Civita are connected with simple hyphens. \item[3.] To produce an em-dash--e.g. to denote an insertion within a sentence— please enter three hyphens without any spaces on either side (\verb|---|). \end{enumerate} {\it Quotation Marks.} Please use the following commands to create English-language quotation marks: \verb|`word'| gives `word' in the dvi file, and \verb|``word''| gives ``word'' in the dvi file. {\it Page Breaks.} Please see to it that you always have at least two lines of the same paragraph at the foot or head of a page. So-called ``orphans'' or ``widows'' reduce the readability of your text. {\it Cross-References Within Text.} Please always give a \verb|\label| where possible and use \verb|\ref| for cross-referencing. Such cross-references may then easily be converted to hyperlinks in any electronic version of your book. The \verb|\cite| and \verb|\bibitem|~mechanism for bibliographic references is also obligatory. Cross-references to particular sections, figures, tables, equations and the like should be written in full when they stand at the beginning of a sentence, but in any other position within the text they should be abbreviated as follows: \begin{tabular}{l@{\quad}l@{\quad}l} (Chapter) Chap./Chaps. & (Section) Sect./Sects. & (Figure) Fig./Figs. \\ (Page) p./pp. & (Volume) Vol./Vols. \end{tabular} {\it Exceptions}: \begin{enumerate}\leftskip15pt \item[1.] ``Table'' should always be written out in full---at the beginning of a sentence as well as within it, and please use ``Tables'' for the plural form. \item[2.] When referring to equations the abbreviations ``Eq./Eqs.'' may be used---but as a general it is suficient to use the equation number set in parenthe­ses, e.g. (1.45). At the beginning of a sentence you should write ``Equation (1.45)''. \item[3.] References are cited in the text simply as numbers in square brackets, e.g. [165], do not use the abbreviations ``Ref./Refs.'' in the middle of a sentence. Only at the beginning of a sentence should you write ``Reference [165]''. \end{enumerate} {\it Spelling Checker.} If possible, please use a computer program for verifying the spelling of your text prior to submitting your manuscript. Depending on your operating system you may choose from a number of freely available programs designed for \LaTeX~code. A list of such \LaTeX-aware spelling checkers can be found at {\tt http://www.tex.ac.uk/cgi-bin/texfaq2html?label=spell} \subsection{Fine-Tuning Mathematics} \looseness-1 As a general rule, text, formulae, figures, and tables are typed using the standard \LaTeX2$_\varepsilon$ commands. The standard sectioning commands are also used. \pagebreak Nevertheless, in the {\sc SVMult} document class we have some defined and enhanced math mode commands. Please refer to the {\it Reference Guide.} Always give a \verb|\label| where possible and use \verb|\ref| for cross-referencing. Such cross-references may then be converted to hyperlinks in any electronic version of your book. Please set {\it mathematical expressions and formulae within the running text} in math mode, i.e. \verb|$...$|, so that the desired spaces are set automatically. In text mode please put a small space \verb|\,| between a number and its unit. {\it Displayed Formulae} will automatically be centered. {\it Equation Arrays.} In order to get a readable layout for your equation arrays we recommend that you use the \LaTeX~environment eqnarray. This will automati­cally use optimal line spaces and line breaks. If an equation spans more than one line place the equals sign at the beginning of the second (or subsequent) line(s); binary operators such as $+$, $-$, *, etc. should also appear at the beginning of the second or subsequent lines of an array, and the line should be indented to the right of the equals sign in the line before. If you want to sub-number individual lines of your equation array you may use the style \verb|subeqnarray.sty|. For a description see Sect. 3.1 in the {\it Reference Guide}. Please {\it punctuate} displayed equations in the same way as any other written statement and insert \verb|\;| before the punctuation to add a little extra space. {\it Multiplication.} Where a multiplication sign is essential use the command \verb|\times| ($\times$), not \verb|\cdot| ($\cdot$). The \verb|\cdot| is reserved for vector dot products. {\it Vectors.} Use the command \verb|\vec{v}| to depict a vector. By default, vectors will be set bold face upright. To set vectors bold face italic -- as is common in physics texts -- use the class option {\it vecphys}. {\it Tensors.} Use the defined command \verb|\tens{A}| to depict an ordinary second-order tensor (without indices). {\it Chemical Symbols and Formulae} should be set upright. Where a ``--'' is used to combine parts of chemical compounds, please use an en-dash; see Sect. 2.4. {\it Computer Code.} To display computer code in your book, we recommend the use of the \verb|verbatim| environment. {\it Abbreviations} such as Ord, Var, Ker, const., etc. should be set upright. {\it Physical units} (and their prefixes) should correspond to the SI standards and be set upright. Always put a fixed space \verb|\,| between a number and its unit, and between elements of units. Both the ``... 3\,kms$^{-1}$ ...'' (note space between different units; please do not use a middot) and ``... 3\,km/s ...'' styles are acceptable, but please settle for one choice and use it consistently. In headers in tables please use the ``$v$ (m/s)'' or ``$v$ (m\,s$^{-1}$)'' styles, i.e. use parentheses, not brackets. Please use ``\%'' without a space, e.g. ``100\%'', and use the degree sign without a space, e.g. ``19$^{\circ}$''. For Celsius use ``100$^{\circ}$C'', i.e. no spaces. {\it Greek Letters.} By default the {\sc SVMult} document class depicts Greek letters as italics because they are mostly used to symbolize variables. However, when used as operators, abbreviations, physical units, etc., they should be set upright. For example, when $\varDelta$ (\verb|\varDelta|) is used to refer to an infinitesimal amount or $\umu$ (\verb|\umu|) is used to denote micro. All upper-case Greek letters have been defined in the document class in an {\it upright} version. The fonts are taken from the \TeX~alphabet. Use the command prefix \verb|\var...| followed by the upper-case name of the Greek letter to obtain an upright upper-case Greek letter. A number of lower-case Greek letters have been defined in the document class in an upright version: $\allmodesymb{\greeksym}{a}}|, \ubeta, \uchi, \udelta, \ugamma, \unu, \upi, \utau$. The letters are taken from the PostScript Symbol font. Use the command prefix \verb|\u...| with the lower­case name of the Greek letter to set it upright. Please refer to Sect. 2.6 in the Reference Guide. {\it Variables} should be represented by a unique single character and always, i.e. in math mode as well as in the text, be set in italics. If possible please use \verb|\varepsilon| for $\epsilon$ and \verb|\varrho| for $\varrho$. {\it Exponential terms} with long exponents or with exponents containing subscripts or superscripts should be set as ``exp(...)''. {\it Subscripts and superscripts} should always appear upright (use \verb|\mathrm{ }| in math mode) when they are abbreviations. If you need to depict a vector, please also use the syntax \verb|\vec{ }|. The font size will automatically be adjusted. The {\it Differential} d, {\it exponential} e and {\it imaginary} i should be set upright in Springer Nature books. Use the defined commands \verb|\D, \E| or \verb|\eul| and \verb|\I| or \verb|\imag|. {\it Fractions} in displayed equations should be coded with \verb|\frac|. When they appear within exponents, running text or narrow tables, they should be set with a slash. Otherwise the font size will be too small to be easily read. {\it Delimiters} should be large enough to completely enclose their content -- but no larger. We recommend using dynamic \LaTeX~input commands, e.g. \verb|\left[| or \verb|\right}|, \verb|\langle| or \verb|\rangle|, \verb|\left|${\tt \vert}$, \verb|\right|${\tt \vert}$,etc. \subsection{Figures, Tables and Their Captions} In general, text, formulae, figures and tables are typed using the standard \LaTeX2$_\varepsilon$ commands. The standard sectioning commands are also used. Nevertheless, in the \textsc{SVMult} document class we have defined new commands and environments, and in some cases, enhanced standard environments. Please refer to the enclosed \emph{Reference Guide}. Always give a \verb|\label| where possible and use \verb|\ref| for cross-referencing. Such cross-references may then be converted to hyperlinks in any possible electronic version of your book. \textit{Figures}. Figures and their captions by default are set flushleft with the caption placed beneath the figure. If the figure width is smaller than 78 mm, use the command \texttt{sidecaption} to align the caption with the base of the figure when the figure is positioned at the bottom of the page, or use the command \texttt{sidecaption[t]} when the figure is positioned at the top of the page. ``Overwide'' figures should be reduced to the normal page width, or if it improves the readability, may protrude into the page margin by a maximum of 5 mm or 1 pica on each side. \emph{Color Figures}. Despite the fast technical progress in digital printing the reproduction of color figures is still very costly in the field of scientific publishing. In general any colour figures will be converted into b/w figures or graytones for the printed version of the book. Only upon explicit agreement will Springer Nature reproduce color figures in the printed version of the book. \emph{Digital Illustrations}. Whenever possible illustrations (photos and drawings) should be supplied in digital form -- this will simplify production, provided a few basic rules are followed. For \emph{scanned line figures} the minimum resolution in the final print size is 1200\,dpi. For \emph{scanned photos}, 300 dpi in the final size is sufficient. \emph{Image Processing}. If illustrations are to appear in \textit{grayscale} or \textit{black and white}, do not produce them in color. Color fields often convert to screens that are almost indistinguishable from one another. Instead of screens, whenever possible please use cross-hatching, stippling, and other dot and line patterns to differentiate among elements in an illustration. If screens must be used, they must be between 15\% and 60\%. Screens must be differentiated from one another by at least 15\%. The lowest \emph{line weight} is 0.5 pt in the final print size (approx. 0.15\,mm). \emph{Grids and details} within the figures must be clearly readable and may not overlap. \enlargethispage{12pt} \emph{Lettering}. To add lettering, it is best to use a sans serif font; Helvetica is preferred. The font size should be approx. 2-3\,mm (8-10\,pt) in final print. Avoid effects such as shading, outline letters, etc. Lettering should not be added until after scanning, i.e. it should be added to the graphics file. Please do not insert any figure legends or figure headings in your illustration file. \emph{Further Instructions}. Please find more detailed instructions about figure and graphic sizing, placement, labeling, screenshots, halftones, shading, etc. at \texttt{http://www.springer.com} $>$ Our services for: authors $>$ Author Guidelines $>$ Preparing Illustrations. Figures should be in \emph{eps format} with fonts embedded, without preview and with the so-called bounding box adjusted to the actual content of the figure. Use the standard \LaTeX~``graphicx'' package to include your graphics files. \emph{Tables}. By default, tables and their captions are justified. Please make sure that every table is \emph{preceded} by a caption. The layout of your tables should not contain any vertical lines. The header of the table should not contain any extra lines. ``Overwide'' tables should be reduced to the normal page width, or, if this is not possible, should not exceed the page width by more than 5\,mm. Please find coding examples in the enclosed sample files. \emph{Captions}. A caption should read easily. It follows regular text rules for abbreviation, hyphenation, capitalization, and punctuation, however, it does not have end punctuation. Should a figure consist of several parts, please set the names of the parts in bold face type inside the caption, e.g. \textbf{Fig. 1.1} General explanation. \textbf{a} individual description. \textbf{b} individual description. Should you want to explain special line formats, etc. used in the figure, then please set their description in italics, e.g. \textbf{Fig. 1.1} In the upper edge the phenomenon is illustrated (\textit{dashed line}). \enlargethispage{12pt} \subsection{Special Elements} In the {\sc SVMult} document class we have defined a few environments. This is done using the following commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{trailer}{Trailer Head}...{trailer}|\\[3pt] \verb|\begin{question}{Questions}...\end{question}|\\[3pt] \verb|\begin{important}{Important}...\end{important}|\\[3pt] \verb|\begin{attention}{Warning}...\end{attention}|\\[3pt] \verb|\begin{programcode}{Program Code}...\end{programcode}|\\[3pt] \verb|\begin{tips}{Tips}...\end{tips}|\\[3pt] \verb|\begin{overview}{Overview}...\end{overview}|\\[3pt] \verb|\begin{backgroundinformation}{Background Information}...|\\ \verb|\end{backgroundinformation}|\\[3pt] \verb|\begin{legaltext}{Legal Text}...\end{legaltext}| \end{tabular}} \noindent Please refer to the {\it Reference Guide.} \eject \subsection{References} References may be \emph{cited} in the text either by number (preferred) or by author/year. Please make sure that all references from the list are cited in the text. Those not cited should be moved to a separate \emph{Further Reading} section or chapter. In mathematical texts references are often labelled as author-year acronyms. In order to achieve this simply give an optional argument to the \verb|\bibitem| command. Always use \verb|\bibitem| and \verb|\cite| for cross-referencing. When producing your bibliography please make sure that the data is complete (name and initial of author, year of publication, book title, publisher's name and place, journal name, volume number, page numbers) and up to date (e.g. edition number). If there are several works by the same author, the following order should be used: \begin{enumerate}\leftskip15pt \item[1.] all works by the author alone, ordered chronologically by year of publication \item[2.] all works by the author with a coauthor, ordered alphabetically by coauthor \item[3.] all works by the author with several coauthors, ordered chronologically by year of publication. \end{enumerate} Always use the standard abbreviation of a journal's name according to the ISSN \textit{List of Title Word Abbreviations}, see \texttt{http://www.issn.org/en/node/344} The \emph{styling} of references depends on the subject of your book: \begin{itemize}\leftskip15pt \item The \emph{two} recommended styles for references in books on \emph{mathematical}, \emph{physical}, \emph{statistical and computer sciences} are depicted in the reference section of the example pdf files [1--5] and [6--10]. If you use BiBTeX for generating your reference list please use one of the two Springer Nature styles \emph{spmpsci.bst} or \emph{spphys.bst}. \item Examples of the most commonly used reference style in books on \emph{Psychology}, \emph{Social Sciences} are depicted in the reference section of the example pdf files [11--15]. \item Examples for references in books on \emph{Humanities}, \emph{Linguistics}, \emph{Philosophy} are depicted in the reference section of the example pdf files [16--20]. \item Examples of the basic Springer Nature style used in publications on a wide range of subjects such as \emph{Computer Science, Economics, Engineering, Geosciences, Life Sciences, Medicine, Biomedicine} are depicted in the reference section of the example pdf files [21--25]. If you use BiBTeX for generating your reference list please use the Springer Nature style \emph{spbasic.bst}. \end{itemize} \eject For your own input follow the syntax of the corresponding style examples in the pre-set template. Please make sure that, in the individual reference citations, the initials of names do not stand alone. Please connect them to their surname with the help of the tilda \~{} so that they will not be separated from each other when \LaTeX~breaks the line. The same applies to volume or page numbers. \vspace*{-6.5pc} \hspace*{29.5pc}\emph{Tip}: \hspace*{29.5pc}\mbox{Use the pre-set} \hspace*{29.5pc}\mbox{templates} \vspace*{3pc} For a description of \textsc{SVMult} enhancements to the bibliography environment refer to the enclosed \emph{Reference Guide}. \subsection{Index} Please make sure that your entries for the book's general subject index are coded with the command \verb|\index{}| and please check the output for any redundancy before submitting your manuscript files. For more information on generating an index see [1]. \enlargethispage{12pt} \section{Submitting your Manuscript} As soon as you have decided on the content and structure of your contribution and have set up a representative portion of text, send this material including figures for evaluation to the \emph{volume editor}. Please check whether the source files (i.e. \verb|*.tex|, \verb|*.eps|, \verb|*.cls|, \verb|*.sty|) are needed besides a printout. Please direct any queries concerning the layout, \LaTeX~coding, figures or the contract side of your contribution to your contact person at Springer Nature. He or she will be happy to respond directly or pass on your query to the expert in charge. \section*{Further Reading} \begin{enumerate}\leftskip3pt \item[{[1]}] Lamport L.: \LaTeX~-- A Document Preparation System. 2nd ed. Addison-Wesley, Reading, MA (1994) \item[{[2]}] Goossens M., Mittelbach F., Samarin A.: The \LaTeX~Companion. Addison-Wesley, Reading, MA (1994) \item[{[3]}] Knuth D.E.: The \TeX~book. Addison-Wesley, Reading, MA (1986) and revised to cover \TeX3 (1991) \item[{[4]}] \TeX~Users Group (TUG), \texttt{http://www.tug.org} \item[{[5]}] Deutschsprachige Anwendervereinigung \TeX~e.V. (DANTE), Heidelberg, Germany, \texttt{http://www.dante.de} \item[{[6]}] UK \TeX~Users' Group (UK-TuG), \texttt{http://uk.tug.org} \end{enumerate} \end{sloppy} \defSubject Index{Subject Index} \begin{theindex} \vspace*{-13pc} \item class options, 3 \item computer code, 7 \item cross-referencing, 6 \indexspace figures \subitem black and white, 9 \subitem captions, 9, 10 \subitem color, 9 \subitem digital, 9 \subitem graphics files \subsubitem fonts, 10 \subsubitem format, 3, 10 \subsubitem including, 3 \subsubitem scaling, 3 \subitem grids and details, 9 \subitem lettering, 9 \subitem line weight, 9 \subitem overwide, 9 \subitem positioning, 9 \subitem scanned, 9 \item fioating objects, 3 \indexspace \item index, 3 \indexspace \item layout \subitem page breaks, 6 \subitem paper size, 3 \subitem text area, 3 \subitem textheight, 3 \subitem textwidth, 3 \item list of symbols, 5 \indexspace \item math mode, 7 \subitem abbreviations, 7 \subitem chemical symbols and formulae, 7 \subitem delimiters, 8 \subitem differential d, 8 \subitem displayed formulae, 7 \subitem equation arrays, 7 \subsubitem sub-numbering, 7 \subitem exponential e, 8 \subitem exponential terms, 8 \subitem fractions, 8 \vspace*{8pc}\columnbreak \vspace*{-12.8pc} \subitem Greek letters \subsubitem lower-case, 8 \subsubitem upper-case, 8 \subitem imaginary i, 8 \subitem operators, 7 \subitem physical units, 8 \subitem punctuation, 7 \subitem subscripts, 8 \subitem superscripts, 8 \subitem tensors, 7 \subitem variables, 8 \subitem vectors, 7 \indexspace \item references \subitem data, 10 \subitem numbering system, 10 \indexspace \item spelling checker, 6 \indexspace \item tables \subitem captions, 10 \subitem header, 10 \subitem layout, 10 \subitem overwide, 10 \item text mode \subitem abbreviations, 5 \subitem abstracts, 4 \subitem dashes, 5 \subitem emphasizing text, 5 \subitem headings, 5 \subitem quotation marks, 6 \subitem running heads, 5 \subitem special expressions, 5 \item theorem-like environments, 5 \vfill\eject \end{theindex} \end{refguide} \end{document} \section*{Contents} \contentsline {section}{\numberline {{\bf 1}}{\bf Introduction}}{{\bf 2}}{section.0.1} \medskip\contentsline {section}{\numberline {{\bf 2}}{\bf Step-by-Step Instructions}}{{\bf 2}}{section.0.2} \contentsline {subsection}{\numberline {2.1}Setting up a Root File for Your Book}{3}{subsection.0.2.1} \contentsline {subsection}{\numberline {2.2}Initializing the Class}{3}{subsection.0.2.2} \contentsline {subsection}{\numberline {2.3}Required Packages}{4}{subsection.0.2.3} \contentsline {subsection}{\numberline {2.4}Structuring Commands}{4}{subsection.0.2.4} \contentsline {subsection}{\numberline {2.5}Compiling the Contributions}{5}{subsection.0.2.5} \contentsline {subsection}{\numberline {2.6}List of Contributors}{5}{subsection.0.2.6} \contentsline {subsection}{\numberline {2.7}Index}{5}{subsection.0.2.7} \medskip\contentsline {section}{\numberline {{\bf 3}}{\bf Submitting your Manuscript}}{{\bf 5}}{section.0.3} \clearpage \parindent=0pt% \parskip=0.6em% \section{ Introduction}\label{sec:1} This \emph{editorial supplement} gives advice on how to collect, edit and compile the complete set of authors' contributions for your planned book. It comes with the \textsc{SVMult} tool package specific to -- \emph{Contributed Books (global)} --. The components of the \textsc{SVMult} tool package (for editors) are: \begin{itemize}\leftskip15pt \item The \emph{Springer Nature \LaTeX~class} \texttt{svmult.cls} (major version 5) and BiBTeX styles \texttt{spmpsci.bst, spphys.bst, spbasic.bst} as well as the \emph{template} with preset class options, packages and coding examples; \emph{Tip}: Copy these files to your working directory, run \LaTeX2$_\varepsilon$ and produce your own example *.dvi or *.pdf file; rename the template file as you see fit and use it for your own input. \item \emph{Instructions} with style and coding instructions \emph{specific} to \emph{contributed books}; \emph{Tip}: Follow these instructions to set up the files, to typeset the text and to obtain a consistent formal style; use these pages as checklists before finally submitting the manuscript or print data. \item The \emph{Reference Guide} describing the \textsc{SVMult} features independent of any specific style requirements. \emph{Tip}: Use it as a reference if you need to alter or enhance the default settings of the \textsc{SVMult} document class and the templates. \end{itemize} For \emph{volume editors only} the \textsc{SVMult} tool package is enhanced by \begin{itemize}\leftskip15pt \item the \emph{editor instructions} for compiling multiple contributions into a single book. \end{itemize} \section{ Step-by-Step Instructions} Although we assume that you wish the layout of your book to reflect the individual work of the contributing authors we recommend that all authors of your book use the same basic macros, styles, and sample input files for their manuscript, i.e. the \LaTeX2$_\varepsilon$ \textsc{SVMult} package. Please advise your authors accordingly. \noindent In contrast to our macro package for monographs the \textsc{SVMult} document class provides a text layout specific to \emph{contributed books} with \begin{itemize}\leftskip15pt \item the names and affiliations of the contributing authors mentioned in the header and foot of each contribution's first page; \pagebreak \item a front and back matter ``reserved'' for editorial contents, such as foreword, preface, table of contents, list of contributors, introduction to the volume, common appendix and subject index, etc. \end{itemize} For default settings, detailed instructions on stylistic and formal standards as well as on the inclusion of figures we refer you also to the \emph{Author Instructions}. \subsection{Setting up a Root File for Your Book}\label{sec21} In order to compile all the contributions into a single book it will be necessary that you check the \TeX~file of each individual contribution. Assuming that the authors have used the Springer Nature template \texttt{author.tex} for their own input and thus have all used the same file structure, you must in the \emph{preamble} of each of these \TeX~files \begin{itemize}\leftskip15pt \item delete everything including the command \verb|\begin{document}|. Any individual styles and definitions the author has used must be moved to your \emph{root} file (see below)! \end{itemize} At the \emph{end} of each contribution file \begin{itemize}\leftskip15pt \item delete the commands that format the index (\verb|\printindex|) and delete \verb|\end{document}|. \end{itemize} Save each single contribution as an individual file. Set up a \emph{root} file complete with all commands needed to invoke the class, the packages and the individual contributions. \subsection{Initializing the Class} \hspace*{29pc}\emph{Tip:}\\ Enter\hspace*{27pc}\hbox{Use the preset}\\ \hspace*{29pc}\hbox{template}\\ \hspace*{29pc}\hbox{\emph{editor.tex}} \vspace*{-30pt} \cprotect\boxtext{\verb|\documentclass{svmult}|} at the beginning of your root file. This will set the text area to a \verb|\textwidth| of 117 mm or 27-3/4 pi pi and a \verb|\textheight| of 191 mm or 45-1/6 pi plus a \verb|\headsep| of 12 pt (space between the running head and text). \emph{N.B.} Trim size (physical paper size) is $155 \times 235$\,mm or 6$\nicefrac{1}{8}$ $\times$ 9$\nicefrac{1}{4}$ in. For a description of all possible class options provided by \textsc{SVMult} see the ``\textsc{SVMult} Class Options'' section in the enclosed \emph{Reference Guide}. \subsection{Required Packages} The following selection in the past has proved to be essential in preparing a fully formatted (i.e. ready-to-print) manuscript. Invoke the required packages with the command \cprotect\boxtext{\verb|\usepackage{}|} \begin{tabular}{p{7.5pc}@{\qquad}p{18.5pc}} {\tt newtxtext.sty} and {\tt newtxmath.sty} & Supports roman text font provided by a Times clone, sans serif based on a Helvetica clone, typewriter faces, plus math symbol fonts whose math italic letters are from a Times Italic clone\\ \texttt{graphicx.sty}& is a powerful tool for including, rotating, scaling and sizing graphics files (preferably *.eps files)\\ \texttt{makeidx.sty}& provides and interprets the command \verb|\printindex| which ``prints'' the index file *.ind (compiled by an index processor) on a chosen page\\ \texttt{multicol.sty}& balances out the columns on the last page of, for example, your subject index\\ \texttt{footmisc.sty}& together with style option \texttt{[bottom]} places all footnotes at the bottom of the page \end{tabular} For a description of other useful packages and \textsc{SVMult} class options, special commands and environments tested with the \textsc{SVMult} document class see the \emph{Reference Guide}. \subsection{Structuring Commands}\label{sec24} Use the declarations \cprotect\boxtext{\begin{tabular}{l}\verb|\frontmatter|\\ \verb|\mainmatter|\\ \verb|\backmatter|\end{tabular}} in the root file to divide your manuscript into three parts: (1) the \emph{front matter} for the dedication, foreword, preface, introduction to the volume, table of contents, list of acronyms and, if applicable, the list of contributors; (2) the \emph{main matter} for the individual contributions; (3) the \emph{back matter} for a possible common appendix, bibliography, index, etc. \subsection{Compiling the Contributions} Use this root file for the compilation of your book (see Sects. \ref{sec21}--\ref{sec24}, or adapt and use the sample root file \texttt{editor.tex} which comes with this package.) Insert the individual contribution files with the \verb|\include| command and compile your root file. \subsection{List of Contributors} If your contributions do not contain full author information please create your own list of contributors by using the environment \verb|\begin{thecontriblist}| $\ldots$ \verb|\end{contriblist}| provided in the macro package. Detailed instructions for use of this environment can be found in the \emph{Reference Guide}. \vspace*{-6.5pc} \hspace*{29pc}\emph{Tip}: \hspace*{29pc}\mbox{Use~the~preset}\\ \hspace*{29pc}\mbox{template} \hspace*{29pc}\mbox{\emph{cblist.tex}} \vspace*{20pt} \subsection{Index} Provided that the contributing authors have coded ``their''~entries for the book's subject index with the command \verb|\index{}| you may use the \emph{MakeIndex} program to automatically generate a common subject index. Please check the output for any redundancy before submitting your manuscript files. Be sure to use the style file \texttt{svind.ist} with the index processor \emph{MakeIndex} to give your index the required Springer Nature layout. For a description of \textsc{SVMult} enhancements to the index environment refer to the enclosed \emph{Reference Guide}. \section{ Submitting your Manuscript} As soon as you have finalized the content and structure of your book and have compiled all contributions, send us all the source files (text and figures), i.e. *.tex, *.eps, *.cls, *.sty, as well as the digitial output, i.e. *.dvi, *.ps, *.pdf, and, if possible, a 600 dpi printout. Please direct any queries concerning your book project to your contact person at Springer Nature. He or she will be happy to respond directly or pass on your query to the expert in charge. \end{refguide} \end{document} \section{Initializing the Class} To format a {\it document for a contributed book} enter \cprotect\boxtext{\verb|\documentclass{svmult}|} \vspace*{-5pc} \hspace*{28pc}\,{\it Tip}: \\ \hspace*{28pc} \hbox{Use the pre-set}\\ \hspace*{28pc} \hbox{templates} \bigskip at the beginning of your root file. This will set the text area to a \verb|\textwidth| of 117 mm or 27-3/4 pi pi and a \verb|\textheight| of 191 mm or 45-1/6 pi plus a \verb|\headsep| of 12 pt (space between the running head and text). {\it N.B.} Trim size (physical paper size) is $155 \times 235$ mm or $6\nicefrac{1}{8} \times 9\nicefrac{1}{4}$ in. For a description of all possible class options provided by {\sc SVMult} see the ``{\sc SVMult} Class Options'' section in the enclosed {\it Reference Guide}. \section{Required Packages} The following selection has proved to be essential in preparing a manuscript in the Springer Nature layout. Invoke the required packages with the command \cprotect\boxtext{\verb|\usepackage{}|} \begin{tabular}{p{7.5pc}@{\qquad}p{18.5pc}} {\tt newtxtext.sty} and {\tt newtxmath.sty} & Supports roman text font provided by a Times clone, sans serif based on a Helvetica clone, typewriter faces, plus math symbol fonts whose math italic letters are from a Times Italic clone\\ {\tt makeidx.sty} & provides and interprets the command \verb|\printindex| which ``prints'' the index file *.ind (compiled by an index processor) on a chosen page\\ {\tt graphicx.sty} & is a powerful tool for including, rotating, scaling and sizing graphics files (preferably *.eps files)\\ {\tt multicol.sty} & balances out the columns on the last page of, for example, your subject index\\ {\tt footmisc.sty} & together with style option {\tt [bottom]} places all footnotes at the bottom of the page \end{tabular} For a description of other useful packages and {\sc SVMult} class options, special commands and environments tested with the {\sc SVMult} document class see the {\it Reference Guide}. For a detailed description of how to fine-tune your text, mathematics, and references, of how to process your illustrations, and of how to set up your tables, see the enclosed {\it Author Instructions}. \end{sloppy} \end{refguide} \end{document} \section*{Contents} \contentsline {section}{\numberline {1}Introduction}{4}{section.0.1} \contentsline {section}{\numberline {2}SVMult Class Features --- Contribution-wise}{5}{section.0.2} \contentsline {subsection}{\numberline {2.1}Initializing the SVMult Class}{5}{subsection.0.2.1} \contentsline {subsection}{\numberline {2.2}SVMult Class Options}{6}{subsection.0.2.2} \contentsline {subsection}{\numberline {2.3}Required and Recommended Packages}{9}{subsection.0.2.3} \contentsline {subsection}{\numberline {2.4}SVMult Commands and Environments in Text Mode}{11}{subsection.0.2.4} \contentsline {subsection}{\numberline {2.5}SVMult Commands in Math Mode}{15}{subsection.0.2.5} \contentsline {subsection}{\numberline {2.6}SVMult Theorem-Like Environments}{16}{subsection.0.2.6} \contentsline {subsection}{\numberline {2.7}SVMult Commands for the Figure and}{}{} \contentsline {subsection}{\numberline {2.7}Table Environments}{19}{subsection.0.2.7} \contentsline {subsection}{\numberline {2.8}SVMult Environments for Exercises,}{}{} \contentsline {subsection}{\numberline {}Problems and Solutions}{21}{subsection.0.2.8} \contentsline {subsection}{\numberline {2.9}SVMult Commands for Styling References}{22}{subsection.0.2.9} \contentsline {section}{\numberline {3}SVMult Class Features -­ Book-wise}{22}{section.0.3} \contentsline {section}{References}{25}{section*.4} \begin{refguide} \begin{sloppy} \parindent=0pt% \parskip=1em% \section{Introduction}\label{sec:1} This reference guide gives a detailed description of the \LaTeX2$_{\varepsilon}$ \textsc{SVMult} document class Version 5.x and its special features designed to facilitate the preparation of scientific books for Springer Nature. It always comes as part of the \textsc{SVMult} tool package and should not be used on its own. \clearpage The components of the \textsc{SVMult} tool package are: \begin{itemize}\leftskip15pt \item The \textit{Springer Nature} \LaTeX~class \verb|SVMult.cls|, MakeIndex styles \texttt{svind.ist}, \texttt{svindd.ist}, BibTeX styles \texttt{spmpsci.bst}, \texttt{spphys.bst}, \texttt{spbasic.bst}{\break} as well as the \textit{templates} with preset class options, packages and coding{\break} examples; \item[]\textit{Tip}: Copy all these files to your working directory, run \LaTeX2$_{\varepsilon}$, BibTeX and MakeIndex---as is applicable--- and and produce your own example *.dvi file; rename the template files as you see fit and use them for your own input. \item \textit{Author Instructions} with style and coding instructions. \item[]\textit{Tip}: Follow these instructions to set up your files, to type in your text and to obtain a consistent formal style in line with the Springer Nature layout specifications; use these pages as checklists before you submit your manuscript data. \item The \textit{Reference Guide} describing \textsc{SVMult} features with regards to their functionality. \item[]\textit{Tip}: Use it as a reference if you need to alter or enhance the default settings of the \textsc{SVMult} document class and/or the templates. \end{itemize} For \textit{editors} only the \textsc{SVMult} tool package is enhanced by \begin{itemize}\leftskip15pt \item the \textit{Editor Instructions} for compiling multiple contributions to a mutual book. \end{itemize} The documentation in the \textsc{SVMult} tool package is not intended to be a general introduction to \LaTeX2$_{\varepsilon}$ or \TeX. For this we refer you to [1--3]. Should we refer in this tool package to standard tools or packages that are not installed on your system, please consult the \textit{Comprehensive \TeX\ Archive Network} (CTAN) at [4--6]. \textsc{SVMult} was derived from the \LaTeX2$_{\varepsilon}$ article.cls. The main differences from the standard article class are the presence of \begin{itemize}\leftskip15pt \item multiple class options, \item a number of newly built-in environments for individual text structures like theorems, exercises, lemmas, proofs, etc., \item enhanced environments for the layout of figures and captions, and \item new declarations, commands and useful enhancements of standard environments to facilitate your math and text input and to ensure their output is in line with the Springer Nature layout standards. \end{itemize}% Nevertheless, text, formulae, figures, and tables are typed using the standard \LaTeX2$_{\varepsilon}$ commands. The standard sectioning commands are also used. Always give a \verb|\label| where possible and use \verb|\ref| for cross-referencing. Such cross-references may then be converted to hyperlinks in any electronic version of your book. The \verb|\cite| and \verb|\bibitem| mechanism for bibliographic references is also obligatory. \section{SVMult Class Features --- Contribution-wise}\label{sec:2} \subsection{Initializing the SVMult Class}\label{subsec:1} To use the document class, enter \cprotect\boxtext{\verb|\documentclass [|$\langle$\textit{options}$\rangle$\verb|] {svmult}|} at the beginning of your input. \subsection{SVMult Class Options}\label{subsec:2} Choose from the following list of class options if you need to alter the default layout settings of the \textsc{SVMult} document class. Please note that the optional features should only be chosen if instructed so by the editor of your book. \textbf{Page Style} \begin{description}[\textit{norunningheads}] \item[\textit{default}] twoside, single-spaced output, contributions starting always on a recto page \item[\textit{referee}] produces double-spaced output for proofreading \item[\textit{footinfo}] generates a footline with name, date, $\ldots$ at the bottom of each page \item[\textit{norunningheads}] suppresses any headers and footers \end{description} \textit{N.B.} If you want to use both options, you must type \texttt{referee} before \texttt{footinfo}. \textbf{Body Font Size} \begin{description}[\textit{11pt, 12pt}] \item[\textit{default}] 10 pt \item[\textit{11pt, 12pt}] are ignored \end{description} \textbf{Language for Fixed \LaTeX\ Texts} In the \textsc{SVMult} class we have changed a few standard \LaTeX\ texts (e.g. Figure to Fig. in figure captions) and assigned names to newly defined theorem-like environments so that they conform with Springer Nature style requirements. \begin{description}[\textit{francais}] \item[\textit{default}] English \item[\textit{deutsch}] translates fixed \LaTeX\ texts into their German equivalent \item[\textit{francais}] same as above for French \end{description} \textbf{Text Style} \begin{description}[\textit{graybox}] \item[\textit{default}] plain text \item[\textit{graybox}] automatically activates the packages \verb|color| and \verb|framed| and places a box with 15 percent gray shade in the background of the text when you use the \textsc{SVMult} environment \verb|\begin{svgraybox}...\end{svgraybox}|, see Sects.~\ref{subsec:3},~\ref{subsec:4}. \end{description} \textbf{Equations Style} \begin{description}[\textit{vecarrow}] \item[\textit{default}] centered layout, vectors boldface (\textit{math style}) \item[\textit{vecphys}] produces boldface italic vectors (\textit{physics style}) when \verb|\vec|-command is used \item[\textit{vecarrow}] depicts vectors with an arrow above when \verb|\vec|-command is used \end{description} \textbf{Numbering and Layout of Headings} \begin{description}[\textit{nosecnum}] \item[\textit{default}] all section headings down to subsubsection level are numbered, second and subsequent lines in a multiline numbered heading are indented; Paragraph and Subparagraph headings are displayed but not numbered; figures, tables and equations are numbered chapterwise, individual theorem-like environments are counted consecutively throughout the book. \item[\textit{nosecnum}] suppresses any section numbering; figures, tables and equations are counted chapterwise displaying the chapter counter, if applicable. \end{description} \textbf{Numbering and Counting of Built-in Theorem-Like Environments} \begin{description}[\textit{envcountresetchap}] \item[\textit{default}] each built-in theorem-like environment gets its own counter without any chapter or section prefix and is reset for each unnumbered contribution. \item[\textit{envcountchap}] Each built-in environment gets its own counter and is numbered \textit{chapterwise}. \textit{To be selected as default setting for a volume with numbered contributions}. \item[\textit{envcountsect}] each built-in environment gets its own counter and is numbered \textit{sectionwise} \item[\textit{envcountsame}] all built-in environments follow a \textit{single counter} without any chapter or section prefix, and are counted consecutively throughout the book \item[\textit{envcountresetchap}] each built-in environment gets its own counter without any chapter or section prefix but with the counter \textit{reset for each chapter} \item[\textit{envcountresetsect}] each built-in environment gets its own counter without any chapter or section prefix but with the counter \textit{reset for each section} \end{description} \textit{N.B.1} When the option \textit{envcountsame} is combined with the options \textit{envcount-resetchap} or \textit{envcountresetsect} all predefined environments get the same counter; but the counter is reset for each chapter or section. \textit{N.B.2} When the option \textit{envcountsame} is combined with the options \textit{envcountchap} or \textit{envcountsect} all predefined environments get a common counter with a chapter or section prefix; but the counter is reset for each chapter or section. \textit{N.B.3} We have designed a new easy-to-use mechanism to define your own environments, see Sect.~\ref{subsec:6}. \textit{N.B.4} Be careful not to use layout options that contradict the parameter of the selected environment option and vice versa. \marginpar{\textbf{Warning !}} Use the Springer Nature class option \begin{description}[\textit{nospthms}] \item[\textit{nospthms}] \textit{only} if you want to suppress all defined theorem-like environments and use the theorem environments of original \LaTeX\ package or other theorem packages instead. (Please check this with your editor.) \end{description} \textbf{References} \begin{description}[\textit{chaprefs}] \item[\textit{default}] the list of references is set as an unnumbered section at the end of your contribution, with automatically correct running heads and an entry in the table of contents. The list itself is set in small print and numbered with ordinal numbers. \item[\textit{chaprefs}] sets the reference list as an unnumbered chapter e.g. at the end of the book \item[\textit{natbib}] sorts reference entries in the author-year system (make sure that you have the natbib package by Patrick~W. Daly installed. Otherwise it can be found at the \textit{Comprehensive \TeX\ Archive Network} (CTAN...texarchive/macros/latex/contrib/supported/natbib/), see [4-­6] \item[\textit{oribibl}] use the Springer Nature class option \textit{only} if you want to set reference numbers in square brackets without automatic TOC entry etc., as is the case in the original \LaTeX\ bibliography environment. But please note that most page layout features are nevertheless adjusted to Springer Nature requirements. (Please check usage of this option with your editor.) \end{description} \subsection{Required and Recommended Packages}\label{subsec:3} \textsc{SVMult} document class has been tested with a number of Standard \LaTeX\ tools. Below we list and comment on a selection of recommended packages for preparing fully formatted book manuscripts for Springer Nature. If not installed on your system, the source of all standard \LaTeX\ tools and packages is the \textit{Comprehensive \TeX\ Archive Network} (CTAN) at [4-­6]. \textbf{Font Selection} \begin{tabular}{p{7.5pc}@{\qquad}p{18.5pc}} \texttt{default} &Times font family as default text body font together with Helvetica clone as sans serif and Courier as typewriter font.\\ \texttt{newtxtext.sty} and \texttt{newtxmath.sty} & Supports roman text font provided by a Times clone, sans serif based on a Helvetica clone, typewriter faces, plus math symbol fonts whose math italic letters are from a Times Italic clone \end{tabular} If the packages `\texttt{newtxtext.sty} and \texttt{newtxmath.sty}' are not already installed with your \LaTeX\ they can be found at https://ctan.org/tex.archive/ fonts/newtx at the \textit{Comprehensive \TeX\ Archive Network} (CTAN), see [4-­6]. If Times Roman is not available on your system you may revert to CM fonts. However, the \textsc{SVMult} layout requires font sizes which are not part of the default set of the computer modern fonts. \begin{description}[\texttt{type1cm.sty}] \item[\texttt{type1cm.sty}] The \texttt{type1cm} package enhances this default by enabling scalable versions of the (Type 1) CM fonts. If not already installed with your \LaTeX\ it can be found at ../tex-archive/macros/latex/contrib/type1cm/ at the \textit{Comprehensive \TeX\ Archive Network} (CTAN), see [4-­6]. \end{description} \textbf{Body Text} When you select the \textsc{SVMult} class option \texttt{[graybox]} the packages \texttt{framed} and color are required, see Sect. \ref{subsec:2} \begin{description}[\texttt{framed.sty}] \item[\texttt{framed.sty}] makes it possible that framed or shaded regions can break across pages. \item[\texttt{color.sty}] is part of the \texttt{graphics} bundle and makes it possible to selct the color and define the percentage for the background of the box. \end{description} \textbf{Equations} A useful package for subnumbering each line of an equation array can be found at ../tex-archive/macros/latex/contrib/supported/subeqnarray/ at the \textit{Comprehensive \TeX\ Archive Network}(CTAN), see [4-­6]. \begin{description}[\texttt{subeqnarray.sty}] \item[\texttt{subeqnarray.sty}] defines the \texttt{subeqnarray} and \texttt{subeqnarray*} environments, which behave like the equivalent \texttt{eqnarray} and \texttt{eqnarray*} environments, except that the individual lines are numbered as 1a, 1b, 1c, etc. \end{description} \textbf{Footnotes} \begin{description}[\texttt{footmisc.sty}] \item[\texttt{footmisc.sty}] used with style option \texttt{[bottom]} places all footnotes at the bottom of the page \end{description} \pagebreak \textbf{Figures} \begin{description}[\texttt{graphicx.sty}] \item[\texttt{graphicx.sty}] tool for including graphics files (preferrably \texttt{eps} files) \end{description} \textbf{References} \begin{description}[\texttt{natbib.sty}] \item[\textit{default}] Reference lists are numbered with the references being cited in the text by their reference number \item[\texttt{natbib.sty}] sorts reference entries in the author­year system (among other features). \textit{N.B.} This style must be installed when the class option \textit{natbib} is used, see Sect. \ref{subsec:2} \item[\texttt{cite.sty}] generates compressed, sorted lists of numerical citations: e.g. [8,{\break}11-­16]; preferred style for books published in a print version only \end{description} \textbf{Index} \begin{description}[\texttt{multicol.sty}] \item[\texttt{makeidx.sty}] provides and interprets the command \verb|\printindex| which ``prints'' the externally generated index file *.ind. \item[\texttt{multicol.sty}] balances out multiple columns on the last page of your subject index, glossary or the like \end{description} \textit{N.B.} Use the \textit{MakeIndex} program together with one of the folllowing styles \begin{description}[\texttt{svindd.ist}] \item[\texttt{svind.ist}] for English texts \item[\texttt{svindd.ist}] for German texts \end{description} to generate a subject index automatically in accordance with Springer Nature layout requirements. For a detailed documentation of the program and its usage we refer you to [1]. \subsection{SVMult Commands and Environments in Text Mode}\label{subsec:4} Use the command \cprotect\boxtext{% \verb|\title*{}|} to typeset an unnumbered heading of your contribution. \cprotect\boxtext{\verb|\title{}|} to typeset a numbered heading of your contribution. Use the new command \cprotect\boxtext{\verb|\subtitle[|$\langle$\textit{subtitle}$\rangle$\verb|]|} to typeset a possible subtitle to your contribution title. Beware that this subtitle is not tranferred automatically to the table of contents. Alternatively use the \verb|\title|-command to typeset your subtitle together with the contribution title and separate the two titles by a period or an en-dash . \marginpar{\textbf{Alternative !}} Use the command \cprotect\boxtext{\verb|\toctitle{}|} if you want to alter the line break of your heading for the table of content. Use the command \cprotect\boxtext{\verb|\titlerunning{}|} if you need to abbreviate your heading to fit into the running head. Use the command \cprotect\boxtext{\verb|\author{}|} for your name(s). If there is more than one author, the names should be separated by \verb|\and|. The author names will appear beneath the contribution's title. Use the command \cprotect\boxtext{\verb|\tocauthor{}|} to change manually the list of authors to appear in the table of contents. Use the command \cprotect\boxtext{\verb|\authorrunning{}|} if there are more than two authors; abbreviate the list of authors to the main author's name and add ``et al.'' for the running head. Use the command \cprotect\boxtext{\verb|\institute[|$\langle$\textit{author name}$\rangle$\verb|\at|$\langle$\textit{affiliation details separated by commas}$\rangle$\verb|\email|$\langle$\textit{email address}$\rangle$\verb|]|} when the authors' names and affiliations shall appear at the bottom of the contribution's first page. Please list multiple authors and/or affiliations by using the command \verb|\and|, cf. the example below: \verb|\institute{J.B. Doe|\\ \verb|\at Doe Institute, 281 Prime Street, Daisy Town, NA 02467,USA\\|\\ \verb|Tel.: +127-47-678901, Fax: +127-47-678907|\\ \verb|\and|\\ \verb|J.B. Doe|\\ \verb|\and|\\ \verb|S.Q. Public|\\ \verb|\at Public-Enterprises|\\ \verb|\and|\\ \verb|J.A. Smith|\\ \verb|\at Smith University,\email{[email protected]}}| Use the command \cprotect\boxtext{\verb|\maketitle|} to compile the header of your contribution. To create and format a short table of contents enter prior to the command \verb|\dominitoc|, \textit{see below} \cprotect\boxtext{\verb|\setcounter{minitocdepth}{|$\langle$$n$$\rangle$\verb|}|} with $n$ depicting the highest sectioning level of your short table of content (default is 0) and then enter \cprotect\boxtext{\verb|\dominitoc|} Use the new command \cprotect\boxtext{\verb|\motto[|$\langle$\textit{textwidth}$\rangle$\verb|]{|$\langle$\textit{text}$\rangle$\verb|}|} to include \textit{special text}, e.g. mottos, slogans, between the chapter heading and the actual content of the chapter. The default font size is ``small'', the default font shape is ``italic''. In the optional argument \verb|[|$\langle$\textit{textwidth}$\rangle$\verb|]| alternative widths may be indicated. The argument \verb|{|$\langle$\textit{text}$\rangle$\verb|}| contains the text of your inclusion. It may not contain any empty lines. To introduce vertical spaces use \verb|\\[height]|. The command must be placed \textit{before} the \verb|\title| command. Use the new commands \cprotect\boxtext{\begin{tabular}{l} \verb|\abstract{|$\langle$\textit{text}$\rangle$\verb|}|\\ \verb|\abstract*{|$\langle$\textit{text}$\rangle$\verb|}| \end{tabular}} to typeset an abstract at the beginning of a contribution. The text of \verb|\abstract*| will be used for compiling \verb|html| abstracts for the online publication of the individual chapters \verb|www.SpringerLink.com|. Please do not use the standard \LaTeX\ environment \marginpar{\textbf{Warning !!!}} \verb|\begin{abstract}...\end{abstract}| -- it will be ignored when used with the \textsc{SVMult} document class! Use the command \cprotect\boxtext{\verb|\keywords{|$\langle$\textit{keyword list}$\rangle$\verb|}|} \textit{within} the abstract environment to specify your keywords and/or subject classification. Use the new commands \cprotect\boxtext{\begin{tabular}{l} \verb|\runinhead[|$\langle$\textit{title}$\rangle$\verb|]|\\ \verb|\subruninhead[|$\langle$\textit{title}$\rangle$\verb|]| \end{tabular}} when you want to use unnumbered run-in headings to structure your text. Use the new environment command \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{svgraybox}|\\ \verb||$\langle$\textit{text}$\rangle$\verb||\\ \verb|\end{svgraybox}| \end{tabular}} to typeset complete paragraphs within a box showing a 15 percent gray shade. \textit{N.B.} Make sure to select the \textsc{SVMult} class option \verb|[graybox]| in order to have all the required style packages available, see Sects. \ref{subsec:2}, \ref{subsec:3}. \marginpar{\textbf{Warning !}} \clearpage Use the new environment command \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{petit}|\\ \verb||$\langle$\textit{text}$\rangle$\verb||\\ \verb|\end{petit}| \end{tabular}} to typeset complete paragraphs in small print. Use the enhanced environment command \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{description}[|$\langle$\textit{largelabel}$\rangle$\verb|]|\\ \verb|\item[|$\langle$\textit{label1}\verb|] |$\langle$\textit{text1}$\rangle$\verb||\\ \verb|\item[|$\langle$\textit{label2}\verb|] |$\langle$\textit{text2}$\rangle$\verb||\\ \verb|\end{description}| \end{tabular}} for your individual itemized lists. The new optional parameter \verb|[|$\langle$\textit{largelabel}$\rangle$\verb|]| lets you specify the largest item label to appear within the list. The texts of all items are indented by the width of \verb||$\langle$\textit{largelabel}$\rangle$\verb|| and the item labels are typeset flush left within this space. Note, the optional parameter will work only two levels deep. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\setitemindent{|$\langle$\textit{largelabel}$\rangle$\verb|}|\\ \verb|\setitemitemindent{|$\langle$\textit{largelabel}$\rangle$\verb|}| \end{tabular}} if you need to customize the indention of your ``itemized'' or ``enumerated'' environments. \subsection{SVMult Commands in Math Mode}\label{subsec:5} Use the new or enhanced symbol commands provided by the \textsc{SVMult} document class: \cprotect\boxtext{\begin{tabular}{ll} \verb|\D| &upright d for differential d\\ \verb|\I| &upright i for imaginary unit\\ \verb|\E| &upright e for exponential function\\ \verb|\tens| &depicts tensors as sans serif upright\\ \verb|\vec| &depicts vectors as boldface characters instead of the arrow accent\\ \end{tabular}} \textit{N.B.} By default the \textsc{SVMult} document class depicts Greek letters as italics because they are mostly used to symbolize variables. However, when used as operators, abbreviations, physical units, etc. they should be set upright. All \textit{upright} upper-case Greek letters have been defined in the \textsc{SVMult} document class and are taken from the \TeX\ alphabet. Use the command prefix \cprotect\boxtext{\verb|\var...|} with the upper-case name of the Greek letter to set it upright, e.g. \verb|\varDelta|. Many \textit{upright} lower-case Greek letters have been defined in the \textsc{SVMult} document class and are taken from the PostScript Symbol font. Use the command prefix \cprotect\boxtext{\verb|\u...|} with the lower-case name of the Greek letter to set it upright, e.g. \verb|\umu|. If you need to define further commands use the syntax below as an example: \cprotect\boxtext{\verb|\newcommand{\allmodesymb{\greeksym}{a}}|}{\allmodesymb{\greeksym}{a}}|} \subsection{SVMult Theorem-Like Environments}\label{subsec:6} For individual text structures such as theorems, definitions, and examples, the \textsc{SVMult} document class provides a number of \textit{pre-defined} environments which conform with the specific Springer Nature layout requirements. Use the environment command \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{|$\langle$\textit{name of environment}$\rangle$\verb|}[|$\langle$\textit{optional material}$\rangle$\verb|]|\\ \verb||$\langle$\textit{text for that environment}$\rangle$\verb||\\ \verb|\end{|$\langle$\textit{name of environment}$\rangle$\verb|}| \end{tabular}} for the newly defined \textit{environments}.\\ \textit{Unnumbered environments} will be produced by \verb|claim| and \verb|proof|. \clearpage \textit{Numbered environments} will be produced by \verb|case, conjecture, corollary, definition, example, exercise|, \verb|lemma, note, problem, property, proposition, question, remark|, \verb|solution|, and \verb|theorem|. The optional argument \verb|[|$\langle$\textit{optional material}$\rangle$\verb|]| lets you specify additional text which will follow the environment caption and counter. Use the new symbol command \cprotect\boxtext{\verb|\qed|} to produce an empty square at the end of your proof. In addition, use the new declaration \cprotect\boxtext{\verb|\smartqed|} to move the position of the predefined qed symbol to be flush right (in text mode). If you want to use this feature throughout your book the declaration must be set in the \textit{preamble}, otherwise it should be used individually in the relevant environment, i.e. proof. \section*{Example} \verb|\begin{proof}|\\ \verb|\smartqed|\\ \verb|Text|\\ \verb|\qed|\\ \verb|\end{proof}| Furthermore the functions of the standard \verb|\newtheorem| command have been \textit{enhanced} to allow a more flexible font selection. All standard functions though remain intact (e.g. adding an optional argument specifying additional text after the environment counter). Use the mechanism \cprotect\boxtext{\verb|\spdefaulttheorem{|$\langle$\textit{env name}$\rangle$\verb|}{|$\langle$\textit{caption}$\rangle$\verb|}{|$\langle$\textit{cap font}$\rangle$\verb|}{|$\langle$\textit{body font}$\rangle$\verb|}|} to define an environment compliant with the selected class options (see Sect.\ref{subsec:2}) and designed as the predefined theorem-like environments. The argument \verb|{|$\langle$\textit{env name}$\rangle$\verb|}| specifies the environment name; \verb|{|$\langle$\textit{caption}$\rangle$\verb|}| specifies the environment's heading; \verb|{|$\langle$\textit{cap font}$\rangle$\verb|}| and \verb|{|$\langle$\textit{body font}$\rangle$\verb|}| specify the font shape of the caption and the text body. \textit{N.B.} If you want to use optional arguments in your definition of a theorem-like environment as done in the standard \verb|\newtheorem| command, see below. Use the mechanism \cprotect\boxtext{\verb|\spnewtheorem{|$\langle$\textit{env name}$\rangle$\verb|}[|$\langle$\textit{numbered like}$\rangle$\verb|]{|$\langle$\textit{caption}$\rangle$\verb|}{|$\langle$\textit{cap font}$\rangle$\verb|}{|$\langle$\textit{body font}$\rangle$\verb|}|} to define an environment that shares its counter with another predefined environment \verb|[|$\langle$\textit{numbered like}$\rangle$\verb|]|. The optional argument \verb|[|$\langle$\textit{numbered like}$\rangle$\verb|]| specifies the environment with which to share the counter. \textit{N.B.} If you select the class option ``envcountsame'' the only valid ``numbered like'' argument is \verb|[theorem]|. Use the defined mechanism \cprotect\boxtext{\verb|\spnewtheorem{|$\langle$\textit{env name}$\rangle$\verb|}{|$\langle$\textit{caption}$\rangle$\verb|}[|$\langle\langle$\textit{within}$\rangle\rangle$\verb|]{|$\langle$\textit{cap font}$\rangle$\verb|}{|$\langle$\textit{body font}$\rangle$\verb|}|} to define an environment whose counter is prefixed by either the chapter or section number (use \verb|[chapter]| or \verb|[section]| for \verb|[|$\langle$\textit{within}$\rangle$\verb|]|). Use the defined mechanism \cprotect\boxtext{\verb|\spnewtheorem*{|$\langle$\textit{env name}$\rangle$\verb|}{|$\langle$\textit{caption}$\rangle$\verb|}{|$\langle$\textit{cap font}$\rangle$\verb|}{|$\langle$\textit{body font}$\rangle$\verb|}|} to define an \textit{unnumbered} environment such as the pre-defined unnumbered environments \textit{claim} and \textit{proof}. Use the newly defined declaration \cprotect\boxtext{\verb|\nocaption|} in the argument \verb|{|$\langle$\textit{caption}$\rangle$\verb|}| if you want to skip the environment caption and use an environment counter only. Use the defined environment \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{theopargself}|\\ ...\\ \verb|\end{theopargself}| \end{tabular}} as a wrapper to any theorem-like environment defined with the mechanism. It suppresses the brackets of the optional argument specifying additional text after the environment counter. \subsection{SVMult Commands for the Figure and Table Environments}\label{subsec:7} Use the new declaration \cprotect\boxtext{\verb|\sidecaption[|$\langle$\textit{pos}$\rangle$\verb|]|} to move the figure caption from beneath the figure (\textit{default}) to the lower left-hand side of the figure. The optional parameter \verb|[t]| moves the figure caption to the upper left-hand side of the figure \textit{N.B.1} (1) Make sure the declaration \verb|\sidecaption| follows the \verb|\begin{figure}| command, and (2) remember to use the standard \verb|\caption{}| command for your caption text. \textit{N.B.2} This declaration works only if the figure width is less than 7.8~cm. The caption text will be set raggedright if the width of the caption is less than 3.4~cm. Use the new declaration \cprotect\boxtext{\verb|\samenumber|} \textit{within} the figure and table environment -- directly after the \verb|\begin{|$\langle$\textit{environment}$\rangle$\verb|}| command -- to give the caption concerned the same counter as its predecessor (useful for long tables or figures spanning more than one page, see also the declaration \verb|\subfigures| below. To arrange multiple figures in a single environment use the newly defined commands \cprotect\boxtext{\verb|\leftfigure[|$\langle$\textit{pos}$\rangle$\verb|]| and \verb|\rightfigure[|$\langle$\textit{pos}$\rangle$\verb|]|} \textit{within} a \verb|{minipage}{\textwidth}| environment. To allow enough space between two horizontally arranged figures use \verb|\hspace{\fill}| to separate the corresponding \verb|\includegraphics{}| commands . The required space between vertically arranged figures can be controlled with \verb|\\[12pt]|, for example. The default position of the figures within their predefined space is flush left. The optional parameter \verb|[c]| centers the figure, whereas \verb|[r]| positions it flush right -­ use the optional parameter \textit{only} if you need to specify a position other than flush left. Use the newly defined commands \cprotect\boxtext{\verb|\leftcaption{}| and \verb|\rightcaption{}|} \textit{outside} the \verb|minipage| environment to put two figure captions next to each other. Use the newly defined command \cprotect\boxtext{\verb|\twocaptionwidth{|$\langle$\textit{width}$\rangle$\verb|}{|$\langle$\textit{width}$\rangle$\verb|}|} to overrule the default horizontal space of 5.4~cm provided for each of the abovedescribed caption commands. The first argument corresponds to \verb|\leftcaption| and the latter to \verb|\rightcaption|. Use the new declaration \cprotect\boxtext{\verb|\subfigures|} \textit{within} the figure environment -- directly after the \verb|\begin{figure}| command -- to subnumber multiple captions alphabetically within a single figure-environment. \textit{N.B.}: When used in combination with \verb|\samenumber| the main counter remains the same and the alphabetical subnumbering is continued. It works properly only when you stick to the sequence \verb|\samenumber\subfigures|. If you do not include your figures as electronic files use the defined command \cprotect\boxtext{\verb|\mpicplace{|$\langle$\textit{width}$\rangle$\verb|}{|$\langle$\textit{height}$\rangle$\verb|}|} to leave the desired amount of space for each figure. This command draws a vertical line of the height you specified. Use the new command \cprotect\boxtext{\verb|\svhline|} for setting in tables the horizontal line that separates the table header from the table content. \subsection{SVMult Environments for Exercises, Problems and Solutions}\label{subsec:8} Use the environment command \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{prob}|\\ \verb|\label{|$\langle$\textit{problem:key}$\rangle$\verb|}|\\ \verb||$\langle$\textit{problem text}$\rangle$\verb||\\ \verb|\end{prob}| \end{tabular}} to typeset and number each problem individually. To facilitate the correct numbering of the solutions we have also defined a \textit{solution environment}, which takes the problem's key, i.e. \verb||$\langle$\textit{problem:key}$\rangle$\verb|| (see above) as argument. Use the environment syntax \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{sol}{|$\langle$\textit{problem:key}$\rangle$\verb|}|\\ \verb||$\langle$\textit{solution text}$\rangle$\verb||\\ \verb|\end{sol}| \end{tabular}} to get the correct (i.e. problem $=$) solution number automatically. \subsection{SVMult Commands for Styling References}\label{subsec:9} The command \cprotect\boxtext{\verb|\biblstarthook{|$\langle$\textit{text}$\rangle$\verb|}|} allows the inclusion of explanatory \textit{text} between the bibliography heading and the actual list of references. The command must be placed before the \verb|thebibliography| environment. \section{SVMult Class Features -­ Book-wise}\label{sec:3} In addition to the \textit{Editor Instructions} and the details described in the previous sections of this \textit{Reference Guide} you find below a list of further \textsc{SVMult} class options, declarations and commands which you may find especially useful when compiling all contributions to a single book. Use the environment syntax \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{dedication}|\\ \verb||$\langle$\textit{text}$\rangle$\verb||\\ \verb|\end{dedication}| \end{tabular}} to typeset a dedication or quotation at the very beginning of the in book. Use the new commands \cprotect\boxtext{\begin{tabular}{l} \verb|\foreword|\\ \verb|\preface|\\ \verb|\contributors| \end{tabular}} to typeset a \textit{Foreword, Preface, or List of Contributors} with automatically generated runnings heads. Use the environment syntax \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{thecontriblist}|\\ \verb||$\langle$\textit{author name}$\rangle$\verb||\\ \verb|\at |$\langle$\textit{affiliation details separated by commas}$\rangle$\verb||\\ \verb|\email{|$\langle$\textit{email address}$\rangle$\verb|}|\\ \verb|\and|\\ \verb||$\langle$\textit{author name}$\rangle$\verb||\\ \verb|\at |$\langle$\textit{XYZ Institute, Technical University, Albert-Schweitzer-Str. 34, 1000 Berlin, Germany}$\rangle$\verb||\\ \verb|\email{|$\langle$\textit{[email protected]}$\rangle$\verb|}|\\ \verb|\end{thecontriblist}| \end{tabular}} to list and style the names and affiliation details of the contributors. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\extrachap{|$\langle$\textit{heading}$\rangle$\verb|}|\\ \verb|\Extrachap{|$\langle$\textit{heading}$\rangle$\verb|}| \end{tabular}} to typeset---in the front or back matter of the book---an extra unnumbered chapter with your preferred heading and automatically generated runnings heads. \verb|\Extrachap| furthermore generates an automated TOC entry. Use the new command \cprotect\boxtext{\verb|\partbacktext[|$\langle$\textit{text}$\rangle$\verb|]|} to typeset a text on the back side of a part title page. N.B. The command must be placed \textit{before} the \verb|part|-command. Use the new command \cprotect\boxtext{\verb|\motto{|$\langle$\textit{text}$\rangle$\verb|}|} to include \textit{special text}, e.g. mottos, slogans, between the chapter heading and the actual content of the chapter. The argument \verb|{|$\langle$\textit{text}$\rangle$\verb|}| contains the text of your inclusion. It may not contain any empty lines. To introduce vertical spaces use \verb|\\[height]|. If needed, the you may indicate an alternative widths in the optional argument. N.B. The command must be placed \textit{before} the relevant \verb|heading|-command. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\abstract{|$\langle$\textit{text}$\rangle$\verb|}|\\ \verb|\abstract*{|$\langle$\textit{text}$\rangle$\verb|}| \end{tabular}} to typeset an abstract at the beginning of a contribution. The text of \verb|\abstract*| will be used for compiling \verb|html| abstracts for the online publication of the individual chapters \verb|www.SpringerLink.com|. Please do not use the standard \LaTeX\ environment \marginpar{\textbf{Warning !!!}} \verb|\begin{abstract}...\end{abstract}| -- it will be ignored when used with the \textsc{SVMult} document class! Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{trailer}{|$\langle$\textit{Trailer Head}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{trailer}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Trailer Head|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{question}{|$\langle$\textit{Questions}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{question}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Questions|. \eject Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{important}{|$\langle$\textit{Important}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{important}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Important|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{warning}{|$\langle$\textit{Attention}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{warning}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Attention|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{programcode}{|$\langle$\textit{Program Code}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{programcode}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Program Code|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{tips}{|$\langle$\textit{Tips}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{tips}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Tips|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{overview}{|$\langle$\textit{Overview}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{overview}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Overview|. \eject Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{backgroundinformation}{|$\langle$\textit{Background Information}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{backgroundinformation}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Background| \verb|Information|. Use the commands \cprotect\boxtext{\begin{tabular}{l} \verb|\begin{legaltext}{|$\langle$\textit{Legal Text}$\rangle$\verb|}|\\ \verb|...|\\ \verb|\end{legaltext}| \end{tabular}} If you want to emphasize complete paragraphs of texts in an \verb|Legal Text|. Use the declaration \cprotect\boxtext{\verb| \section{Introduction} \label{sec:Intro} The current era of \acp{UAS} has already made a significant contribution to civilian, commercial, and military applications \cite{gupta2013review, DoD}. The ability to have aerial systems perform tasks without having a human operator/pilot in the cockpit has enabled these systems to evolve in different sizes, forms, capabilities, conduct tasks, and missions that were previously hazardous or infeasible. Since the penetration of UAS into different realms of our lives is only going to increase, it is important to understand the current state-of-the-art, determine open challenges, and provide road-maps to overcome these challenges. The relevance of \acp{UAS} is increasing exponentially at a \ac{CAGR} of 15.5\% to culminate at USD 45.8 billion by 2025 \cite{UAV_Market}. While this growth seems extremely promising, there are several challenges that need to be overcome before UAS can achieve its full potential. The majority of these UASs are predominantly controlled by an operator and depend on reliable wireless communication links to maintain control and accomplish tasks. As the number of these systems increases and the mission complexity escalates, autonomy will play a crucial role in the next generation of UAS. In the next decade, we will see an incredible push towards autonomy for UAS just like how autonomy has evolved in markets like manufacturing, automotive industry, and in other robotics-related market areas. When it comes to autonomy, there are several definitions and levels of autonomy claimed by manufacturers. Similarly, several definitions and requirements for various levels of autonomy exist in literature. According to \cite{Level_autonomy}, autonomy in UAS can be divided into five levels as follows, \begin{itemize} \item \textbf{Level 1 - Pilot Assistance:} At this initial level, the UAS operator still maintains control of the overall operation and safety of the UAS. Meanwhile, the UAS can take over at least one function (to support navigation or maintaining flight stability) for a limited period of time. Therefore, at this level, the UAS is never in control of both speed and direction of flight simultaneously and all these controls are always with the operator. \item \textbf{Level 2 - Partial Automation:} Here, the UAS is capable of taking control of altitude, heading, and speed in some limited scenarios. It is important to understand that the operator is still responsible for the safe operation of the UAS and hence needs to keep monitoring the environment and flight path to take control when needed. This type of automation is predominantly used for application with a pre-planned path and schedules. At this level, the UAS is said to be capable of \textit{sensing}. \item \textbf{Level 3 - Conditional Automation:} This case is similar to Level 2 described before with the exception that the UAS can notify the operator using onboard sensors if intervention is needed. This means the operator can be a little more disengaged as compared to Level 2 and acts as the backup controller. It is important to understand that at this level the scenarios of operation are relatively static. If any change in operating conditions is detected, the UAS will alert the operator to take over the control. At this level, the UAS is said to be capable of \textit{sense and avoid}. \item \textbf{Level 4 - High Automation:} At this level, the UAS is designed to operate without the requirement of the controller in several circumstances with the capability to detect and avoid obstacles using several built-in functionalities, rule sets, or machine learning-based algorithms deployed on the embedded computers on the UAS. While the operator can take control of the UAS, it is not necessary since several backup systems are in place to ensure safety in case one system fails. This is where an ideal system is expected to adapt to highly dynamic environments using powerful techniques like machine learning. At this level, the UAS is said to have achieved complete \textit{sense and navigate} capability. \item \textbf{Level 5 - Full Automation:} In this final level, the UAS operates fully autonomously without any intervention from operators regardless of the operating scenarios. This will not only include sense and navigate but the ability to learn and adapt its objectives and goals or even optimize its operational objectives and make necessary changes on-the-fly. \end{itemize} Several of today's UASs have limited semi-autonomous modes (level 1 to 3) that warrants UAS to perform some autonomous actions such as return to the initial location, follow a pre-determined flight path, perform maneuvering acts, and recover from some standard instabilities, among others. A completely autonomous system (level 4 and 5) that can interact and survive in a dynamic environment without the need for human-in-the-loop are still far from being realized or deployed in a safe and effective manner. Machine learning, a subset of Artificial Intelligence has seen a spike in its application in various domains. This resurgence from its last winter is attributed to two main reasons (i) the exponential growth in computing resources in the last decade (ii) digitization of the modern era that has provided access to a huge quantity of data that can be used to train these machine learning models. Today, we see machine learning algorithms successfully applied to computer vision \cite{sebe2005machine,DL_CV,alexnet}, natural language processing \cite{NLP,NLP_2}, medical application \cite{litjens2017survey}, wireless communication \cite{JagannathAdHoc2019,Ajagannath6G2020}, signal intelligence \cite{Jagannath19MLBook}, robotics \cite{polydoros2017survey}, speech recognition \cite{DL_speech}, among others. These advancements in the field of machine learning have rendered it a perfect candidate to realize autonomy in UAS. To this end, in this chapter, we discuss the advances made in the field of machine learning, specifically deep learning, and reinforcement learning to facilitate autonomy to UAS. We also look at the key challenges and open research problems that need to be addressed for UAS autonomy. We hope this chapter becomes a great guide to beginners as well as seasoned researchers to take larger strides in these areas of research. \subsection{Applications of UAS} \begin{figure}[h] \centering \includegraphics[width=4.7 in]{editor/Application.pdf} \caption{Various applications of UAS} \label{fig:App} \end{figure} The applications of UAS can be broadly divided as follows, (i) \ac{ISR}, (ii) payload/product delivery, and (iii) maintenance and repair as shown in Figure \ref{fig:App}. Presently, ISR is the most common application that \acp{UAS} are employed for in both commercial and military realms. UASs are used for surveillance and remote sensing to map areas of interest using sensors such as traditional cameras or other sensors like acoustic, \ac{IR}, radars, among others. UASs are also used to monitor and survey oilfields, crop surveys, power grids, and other areas that are remote or difficult to access by operators. The surveys are also used for education, environment and climate studies, tourism, mapping, crop assessments, weather, traffic monitoring and border management. Similarly, UASs are used for humanitarian aid and rescue operations by first responders during disasters like flood and earthquakes where access by road does not exist or rendered inaccessible. UAS is also being actively being designed and developed to become efficient agents for the delivery of payloads. These payloads include packages from online retailers, medical supplies to hospitals or areas of disaster, maintenance parts to remote locations in the commercial and civilian domains. As one can imagine delivery of different kinds of payloads will also be critical for several military missions and UAS might provide a safer alternative to accomplish such delivery in hostile areas with limited accessibility. Though not prevalent yet, it is envisioned that UAS will open up the market for several maintenance and repair tasks for the aerospace industry, power grid, wind farms, and other operations that are not easy to access. Currently, UAS are already being deployed to monitor and detect faults as well as provide maintenance alerts to reduce operational expense. It is envisioned that robotics enabled UAS will also be able to intervene when faults or necessary repair are detected in the near future. \subsection{Classification of UAS} \label{subsec:classUAS} \begin{figure}[h] \centering \hspace{-0.5 cm} \includegraphics[width=4.8 in]{editor/Classification.pdf} \caption{Classification of Unmanned Aerial Systems} \label{fig:Class} \end{figure} It is clear from the previous discussion about the applications of UAS the need for versatility in the UAS design. Due to these reasons and the immense prospective that UAS holds for the future, UASs have evolved into different forms and sizes. While most of the discussion in this chapter is not specific to any type of UAS platforms, we provide a succinct classification for UASs in Figure \ref{fig:Class}. Several characteristics are used to classify different types of UASs. Here, we present the three most prevalent ones. The first is the classification of UASs adopted by \ac{DoD} which divides the systems into five groups based on weight, the altitude of flight, and the velocity. Another two sets of classification are based on the wing type and landing and takeoff. All these have been summarized in Figure \ref{fig:Class}. \subsection{Chapter Organization} \begin{figure}[h] \centering \includegraphics[width=4.6 in]{editor/Chapter_Organisation.pdf} \caption{Organization of the Chapter} \label{fig:Organization} \end{figure} This chapter is written for the benefit of a broad array of readers who have different levels of understanding and experience in this area of research. Therefore, for the benefit of readers who are relatively new to machine learning, we start by providing an overview of specific machine learning techniques that are explored in this chapter. The detailed explanation of these techniques would ensure even a beginner in the area of machine learning to grasp these techniques and benefit from the rest of the discussion in the chapter. The core contribution of this chapter is presented in the next four sections. Among these, two sections are dedicated to the discussion of various deep learning and reinforcement learning that has been explored for UAS. In each of these sections, we also discuss the open problems and challenges to motivate researches to explore these areas further. Since the goal of every research endeavor is to ensure the novel algorithms and solutions are effectively deployed on target platforms, in the next two sections, we look at simulation suites and hardware platforms that can help expedite this process. Finally, we conclude the chapter in the final section. The overall chapter organization is depicted in Figure \ref{fig:Organization}. \subsection{Notations} Here, we introduce some standard notations that will be used throughout this chapter. Matrices and vectors will be denoted by boldface upper and lower-case letters, respectively. For a vector $\mathbf{x}$, $x_i$ denotes the i-th element, $\norm{\mathbf{x}}$ indicates the Euclidean norm, $\mathbf{x}^\intercal$ represents its transpose, and $\mathbf{x} \cdot \mathbf{y}$ the Euclidean inner product of $\mathbf{x}$ and $\mathbf{y}$. For a matrix $\mathbf{H}$, $H_{ij}$ will indicate the element at row $i$ and column $j$. The notation $\mathbb{R}$ and $\mathbb{C}$ will indicate the set of real and complex numbers, respectively. The notation $\mathbb{E}_{x\sim p(x)}\left[f(x)\right]$ is used to denote the expected value, or average of the function $f(x)$ where the random variable $x$ is drawn from the distribution $p(x)$. When a probability distribution of a random variable, $x$, is conditioned on a set of parameters, $\boldsymbol{\theta}$, we write $p(x;\boldsymbol{\theta})$ to emphasize the fact that $\boldsymbol{\theta}$ parameterizes the distribution and reserve the typical conditional distribution notation, $p(x|y)$, for the distribution of the random variable $x$ conditioned on the random variable $y$. We use the standard notation for operations on sets where $\cup$ and $\cap$ are the infix operators denoting the union and intersection of two sets, respectively. We use $S_k \subseteq S$ to say that $S_k$ is either a strict subset of or equal to the set $S$ and $x \in S$ to denote that $x$ is an element of the set $S$. $\varnothing$ is used to denote the empty set and $|S|$ represents the cardinality of a set $S$. Lastly, the convolution operator is denoted as $*$. \section{Overview of Machine Learning Techniques} \label{sec:Overview} Machine Learning is a branch of artificial intelligence that is able to learn patterns from raw data and/or learn from observation sampling from the environment enabling computer systems to acquire knowledge. Machine learning is broadly classified into supervised, unsupervised, and reinforcement learning which are further subdivided into subcategories as shown in Fig.\ref{fig:1} (this is a very limited/relevant representation of this vast field). In this section, we elaborate on the key machine learning techniques (which are indicated as gray boxes in the Fig.\ref{fig:1}) prominently used in this chapter to benefit readers in understanding the deep learning approaches for UAS autonomy. \begin{figure}[h] \centering \includegraphics[width=4.6 in]{editor/ML_overview.pdf} \caption{Machine Learning Techniques} \label{fig:1} \end{figure} \subsection{Feedforward Neural Networks} \label{sec:fnn} Feedforward neural networks (FNN) also referred to as multilayer perceptrons are directed layered neural networks with no internal feedback connections. Mathematically, an FNN performs a mapping, i.e., $f:X\longrightarrow Y$. An N-layered FNN is a composite function $y = f(\mathbf{x};\theta)=f_N(f_{N-1}(\cdots f_1(\mathbf{x})))$ mapping input vector $\mathbf{x}\in \mathbb{R}^m$ to a scalar output $y \in \mathbb{R}$. Here, $\theta$ represents the neural network parameters. The number of layers in the neural network dictates the \emph{depth} whereas the number of neurons in the layers defines the \emph{width} of the network. The layers in between the input and output layers for which the output does not show are called \emph{hidden} layers. Figure \ref{fig:fnn} shows a 3-layered FNN accepting a two-dimensional input vector $\mathbf{x}\in \mathbb{R}^2$ approximating it to a scalar output $y \in \mathbb{R}$. \begin{figure}[h] \centering \includegraphics[width=3.5 in]{editor/fnn.pdf} \caption{Three-layered FNN} \label{fig:fnn} \end{figure} In the figure, each node represents a neuron and each link between the nodes $i$ and $j$ are assigned a weight $w_{ij}$. The composite function of the 3-layered FNN is \begin{equation} y = f(\mathbf{x};\theta) = f_3(f_2(f_1(\mathbf{x}))) \label{eq:fnn} \end{equation} In other words, the 3-layer FNN in Fig.\ref{fig:fnn} is the directed acyclic graph equivalent of the composite function in equation (\ref{eq:fnn}). The mapping in the first layer is \begin{equation} \mathbf{h}_1 = f_1(\mathbf{x}) = \mathcal{A}_1(\mathbf{W}_1\mathbf{x} + \mathbf{b}_1) \end{equation} where $\mathcal{A}_1(\circ)$ is the activation function, $\mathbf{b}_1$ is the bias vector, and $\mathbf{W}_1$ represents the weight matrix between the neurons in the first and second layers. Here, the weight matrix $\mathbf{W}_1$ is defined as the link weights between the neurons in the input and second layer \begin{equation} \mathbf{W}_1 = \begin{bmatrix} w_{ab} & w_{db}\\w_{ae} & w_{de} \end{bmatrix}. \end{equation} Similarly, the second layer mapping can be represented as \begin{equation} \mathbf{h}_2 = f_2(\mathbf{h}_1) = \mathcal{A}_2(\mathbf{W}_2\mathbf{h}_1 + \mathbf{b}_2) \end{equation} Finally, the output is \begin{equation} y = f_3(\mathbf{h}_2) = \mathcal{A}_3(\mathbf{W}_3\mathbf{h}_2 + \mathbf{b}_3) \end{equation} The weight matrices in the second and final layers are \begin{equation*} \mathbf{W}_2 = \begin{bmatrix} w_{bc} & w_{ec}\\w_{bf} & w_{ef} \end{bmatrix} \text{ and } \mathbf{W}_3 = \begin{bmatrix} w_{co} & w_{fo} \end{bmatrix}. \end{equation*} The neural network parameters $\theta = \{\mathbf{W}_1,\mathbf{W}_2,\mathbf{W}_3,\mathbf{b}_1,\mathbf{b}_2,\mathbf{b}_3 \}$ comprise the weight matrices and bias vectors across the layers. The objective of the training algorithm is to learn the optimal $\theta^*$ to get the target composite function $f^*$ from the available samples of $\mathbf{x}$. \subsection{Convolutional Neural Networks} \label{sec:cnn} Convolutional networks or convolutional neural networks (CNNs) are a specialized type of feedforward neural network that performs convolution operation in at least one of its layers. The \emph{feature extraction} capability of CNNs mimics the neural activity of the animal visual cortex \cite{CNNcortex}. The visual cortex comprises a complex arrangement of cells that are sensitive to sub-regions of the perceived scene. The convolution operation in CNNs emulates this characteristic of the brain's visual cortex. Consequently, CNNs have been abundantly applied in the field of computer vision \cite{googlenet_inception,alexnet,LeNet5,vgg16,squeezenet,cnn4vision,fastRCNN,CNNface,resnet}. The convolution is an efficient method of feature extraction that reduces the data dimension and consequently reduces the parameters of the network. Hence, CNNs are more efficient and easier to train in contrast to its fully connected feedforward counterpart \ref{sec:fnn}. A typical CNN architecture would often involve convolution, pooling, and output layers. CNNs operate on input tensor $\mathbf{X}\in \mathbb{R}^{W\times H \times D}$ of width $W$, height $H$, and depth $D$ which will be operated on by kernel (filter) $\mathbf{K}\in \mathbb{R}^{w\times h\times D}$ of width $w$, height $h$, and of the same depth as the input tensor to generate an output feature map $\mathbf{M}\in \mathbb{R}^{W_1\times H_1\times D_1}$. The dimension of the feature map is a function of the input as well as kernel dimensions, the number of kernels $N$, stride $S$, and the amount of zero padding $P$. Likewise, the feature map dimensions can be derived as $W_1 = \left(W-w+2P\right)/S + 1, \; H_1 = \left(H-h+2P\right)/S + 1,\; D_1 = N$. Each kernel slice extracts a specific feature from the input region of operation. Kernel refers to the set of weights and biases. The kernel operates on the input slice in a sliding window manner based on the stride. Stride refers to the number of steps with which to slide the kernel along with the input slice. Hence, each depth slice of the input is treated with the same kernel or in other words, shares the same weights and biases - \emph{parameter sharing}. The convolution operation on an input slice $\mathbf{x}$ by a kernel $\mathbf{k}$ is demonstrated in Fig.\ref{fig:cnn_conv}. Here, $b$ represents the bias associated with the kernel slice and $\mathcal{A}\left(\circ\right)$ denotes a non-linear activation function. \begin{figure}[h] \centering \includegraphics[width=4.7 in]{editor/cnn_conv.pdf} \caption{Convolution of input slice with kernel} \label{fig:cnn_conv} \end{figure} The resulting output from the convolution operation is referred to as the \emph{feature map}. Each element of the feature map can be visualized as the output of a neuron which focuses on a small region of the input - \emph{receptive field}. The neural depiction of the convolution interaction is shown in Fig.\ref{fig:neural}. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=2.8 in]{editor/neural.pdf} \caption{Neural representation of convolution} \label{fig:neural} \end{figure} It is evident that each neuron in a layer is connected locally to the neurons in the adjacent layer - \emph{sparse connectivity}. Hence, each neuron is unaffected by variations outside of its receptive field while producing the strongest response for spatially local input pattern. The feature maps are propagated to subsequent layers until it reaches the output layer for a regression or classification task. \emph{Pooling} is a typical operation in CNN to significantly reduce the dimensionality. It operates on a subregion of the input to map it to a single summary statistic depending on the type of pooling operation - max, mean, $L_2$-norm, weighted average, etc. In this way, pooling downsamples its input. A typical pooling dimension is $2\times2$. Larger pooling dimensions might risk losing significant information. Figure \ref{fig:pool} shows max and mean pooling operations. \begin{figure}[h] \centering \includegraphics[width=2.8 in]{editor/pooling.pdf} \caption{Max and mean pooling on input slice with stride 1} \label{fig:pool} \end{figure} A pooling layer of dimensions $W_p\times H_p$ upon operating over an input volume of size $W_1\times H_1\times D_1$ with a stride of $S_1$ will yield an output of volume $W_2 = \left( W_1-W_p\right)/S_1, \;H_2 = \left( H_1-H_p\right)/S_1, \; D_2 = D_1$. Pooling imparts invariance to translation, i.e., if the input to the pooling layer is shifted by a small amount, the pooled output will largely be unaffected \cite{Goodfellow-et-al-2016}. As we have discussed, the three essential characteristics of CNNs that contribute to the statistical efficiency and trainability are parameter sharing, sparse connectivity, and dimensionality reduction. CNNs have demonstrated superior performance in computer vision tasks such as image classification, object detection, semantic scene classification, etc. Consequently, CNNs are increasingly used for UAS imagery and navigation applications \cite{uavapps}. Most notable CNN architectures are LeNet-5 \cite{LeNet5}, AlexNet \cite{alexnet}, VGG-16 \cite{vgg16}, ResNet \cite{resnet}, Inception \cite{googlenet_inception}, and SqueezeNet \cite{squeezenet}. \subsection{Recurrent Neural Networks} \label{sec:rnn} \ac{RNN} \cite{Rumelhart1986} is a type of feedforward neural network specialized to capture temporal dependencies from sequential data. RNN holds internal memory states and recurrent connections between them to capture the sequence history. This characteristic of RNN enables it to exploit the temporal correlation of data rendering them suitable for image captioning, video processing, speech recognition, and natural language processing applications. Unlike CNN and traditional feedforward neural networks, RNN can handle variable-length input sequences with the same model. RNNs operate on input sequence vectors at varying time steps $\mathbf{x}^{t}$ and map it to output sequence vectors $\mathbf{y}^{t}$. The recurrence relation in an RNN parameterized by $\mathbf{\theta}$ can be expressed as \begin{equation} \mathbf{h}^t = \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big) \label{eq:recursive} \end{equation} where $\mathbf{h}^t$ represents the hidden state vector at time $t$. The recurrence relation represents a recursive dynamic system. By this comparison, RNN can be defined as \emph{a recursive dynamic system that is driven by an external signal, i.e, input sequence $\mathbf{x}^{t}$}. The equation (\ref{eq:recursive}) can be unfolded twice as \begin{align} \mathbf{h}^t &= \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{F}\Big(\mathcal{F}\Big(\mathbf{h}^{t-2},\mathbf{x}^{t-1};\mathbf{\theta} \Big),\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{F}\Big(\mathcal{F}\Big(\mathcal{F}\Big(\mathbf{h}^{t-3},\mathbf{x}^{t-2};\mathbf{\theta} \Big),\mathbf{x}^{t-1};\mathbf{\theta} \Big),\mathbf{x}^{t};\mathbf{\theta} \Big) \end{align} The unfolded equations show how RNN processes the whole past sequences $\mathbf{x}^{t}, \mathbf{x}^{t-1},$ $\cdots, \mathbf{x}^{1}$ to produce the current hidden state $\mathbf{h}^{t}$. Another notable inference from the unfolded representation is the \emph{parameter sharing}. Unlike CNN, where the parameters of a spatial locality are shared, in an RNN, the parameters are shared across different positions in time. For this reason, RNN can operate on variable-length sequences allowing the model to learn and generalize well to inputs of varying forms. On the other hand, traditional feedforward network does not share parameters and have a specific parameter per input feature preventing it from generalizing to an input form not seen during training. At the same time, CNN share parameter across a small spatial location but would not generalize to variable-length inputs as well as an RNN. A simple many-to-many RNN architecture which maps multiple input sequences to multiple output sequences is shown in Fig.\ref{fig:mmrnn}. \begin{figure}[h] \centering \includegraphics[width=2 in]{editor/many-to-many_rnn.pdf} \caption{Many-to-many RNN architecture} \label{fig:mmrnn} \end{figure} For a simple representation, let us assume the RNN is parameterized by $\mathbf{\theta}$ and $\mathbf{\phi}$ with input-to-hidden, hidden-to-hidden, and hidden-to-output weight matrices being $\mathbf{W}_{ih}, \mathbf{W}_{hh},$ and $\mathbf{W}_{ho}$ respectively. The hidden state at time $t$ can be expressed as \begin{align} \mathbf{h}^t &= \mathcal{F}\Big(\mathbf{h}^{t-1},\mathbf{x}^{t};\mathbf{\theta} \Big)\\ &= \mathcal{A}_h\Big(\mathbf{W}_{hh}\mathbf{h}^{t-1} + \mathbf{W}_{ih}\mathbf{x}^{t} + \mathbf{b}_h\Big). \end{align} where $\mathcal{A}_h(\circ)$ is the activation function of the hidden unit and $\mathbf{b}_h$ is the bias vector. The output at time $t$ can be obtained as a function of the hidden state at time $t$, \begin{align} \mathbf{y}^t &= \mathcal{G}\Big(\mathbf{h}^{t};\mathbf{\phi} \Big)\\ &= \mathcal{A}_o\Big(\mathbf{W}_{ho}\mathbf{h}^t + \mathbf{b}_o\Big) \end{align} where $\mathcal{A}_o(\circ)$ is the activation function of the output unit and $\mathbf{b}_o$ is the bias vector. Other typical RNN architectures are shown in Fig.\ref{fig:allrnn}. \begin{figure}[h] \centering \includegraphics[width=4.8 in]{editor/allrnn.pdf} \caption{RNN architectures. (a) Many-to-one, (b) One-to-many, and (c) One-to-one} \label{fig:allrnn} \end{figure} The RNN architectures discussed so far captures only hidden states from the past. Some applications would also require future states in addition to past. This is accomplished by a bidirectional RNN \cite{biRNN}. In simple words, bidirectional RNN combines an RNN that depends on past states (\emph{i.e.,} from $\mathbf{h}^{1}, \mathbf{h}^{2}, \mathbf{h}^{3}, \cdots, \mathbf{h}^{t}$) with that of an RNN which looks at future states (\emph{i.e.,} from $\mathbf{h}^{t}, \mathbf{h}^{t-1}, \mathbf{h}^{t-2}, \cdots, \mathbf{h}^{1}$). \subsection{Reinforcement Learning} \label{sec:ReinforcementLearning} Reinforcement learning is focused on the idea of a goal-directed agent interacting with an environment based on its observations of the environment \cite{RL_book}. The main goal of reinforcement learning is for the agent to learn how to act i.e., what action to perform in a given environmental state, such that a reward signal is maximized. The agent repeatedly interacts with the environment in a series of discrete time steps by observing the environmental state, choosing, and executing an action. The action chosen by the agent may affect the state of the environment in the next time step. The agent receives a reward signal from the environment and transitions to a new state. The agent has some capability to sense the environmental state; informally the state can be thought of as any information about the environment that is made available to the agent. The agent selects which of the possible actions it can take by following a policy which is a function, in general stochastic, that maps state to actions. A reward signal is used to define the goal of the problem. The reward received by the agent at each time step specifies the immediate desirability of the current state. The objective of the reinforcement learning agent is to maximize the cumulative reward, typically defined by a value function, which defines the long-term goodness of the agent. The agent aims at achieving a goal by continuously interacting with the environment. This interaction which involves taking actions while trading off short and long term rewards renders reinforcement learning a potentially well-suited solution to many autonomous problems \cite{RL_Robotics_Survey_Kober}. The reinforcement learning problem is usually represented mathematically using a finite \ac{MDP}. A finite \ac{MDP} is defined by the following tuple $(S, A, P, R)$, where $S$, $A$, $P$, and $R$ are the state space, action space, transition function, and reward function respectively. Note that in finite \acp{MDP}, the state, action, and reward spaces consist of a finite number of elements. At each time step, the agent observes state $s \in S$, selects and takes action $a \in A$, receives a reward $r$, and transitions to a new state $s' \in S$. The transition function specifies the probability of transitioning from state $s$ to state $s'$ as a consequence of choosing action $a$ as, \begin{equation} P(s,a,s')=Pr(S_{t+1}=s'|S_{t}=s,A_{t}=a). \end{equation} The reward function $R$ defines the expected reward received by the agent after transitioning to state $s'$ from state $s$ after taking action $a$ i.e., \begin{equation} R(s,a)=\mathbb{E}[R_t|S_t=s,A_t=a]. \end{equation} It can be seen that the functions $P$ and $R$ define the dynamics of the \ac{MDP}. A reinforcement learning agent uses a policy to select actions in a given state. The policy, denoted $\pi(s,a)$ provides a probabilistic mapping of states to actions as, \begin{equation} \pi(s,a)=Pr(A_t=a|S_t=s). \end{equation} As discussed earlier, value functions are used to define the long term goodness of the agent. Mathematically, the \emph{state-value function} is denoted as \begin{equation} v_{\pi}(s)=\mathbb{E}_{\pi}\Bigg[\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}|S_t=s\Bigg], \forall s\in S \end{equation} The \emph{state-value function} specifies the expected return, i.e., sum of discounted rewards, if the agent follows policy $\pi$ starting from state $s$. The discount rate $\gamma$, $0 \leq \gamma \leq 1$, is used to weight future rewards progressively less. For example, as $\gamma$ approaches zero the agent is concerned only with immediate rewards whereas when $\gamma$ approaches unity, the agent favors future rewards. The expected discounted return is denoted by $G_t$ i.e., \begin{equation} G_t=\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}. \end{equation} Additionally, the \emph{action-value function} for policy $\pi$ is mathematically represented as \begin{equation} q_{\pi}(s,a)=\mathbb{E}_{\pi}\Bigg[\sum_{k=0}^{\infty}\gamma^kR_{t+k+1}|S_t=s,A_t=a\Bigg] \end{equation} The \emph{action-value function} specifies the expected return if the agent takes action $a$ in state $s$ under policy $\pi$. The \ac{MDP} dynamics of the environment and the notion of value functions have been exploited to develop multiple algorithms. In the case where the \ac{MDP} is fully known, i.e, the agent has knowledge of $P$ and $R$, dynamic programming methods (planning algorithms), such as policy iteration and value iteration can be used to solve the \ac{MDP} for the optimal policy or optimal value function. However, in reinforcement learning, knowledge of the \ac{MDP} dynamics is not usually assumed. Both model-based and model-free approaches exist for solving reinforcement learning problems. In model-based reinforcement learning, the agent attempts to learn a model of the environment directly, by learning $P$ and $R$, and then using the environmental model to plan actions using algorithms similar to policy iteration and value iteration. In model-free reinforcement learning, the agent does not attempt to directly learn a model of the environment but rather attempts to learn an optimal value function or policy. The discussion in this chapter is primarily focused on model-free methods. Generally speaking, model-free reinforcement learning algorithms fall into value function or policy gradient based methods. In value function based methods, the agent attempts to learn an optimal value function, usually action-value, and from which an optimal policy can be found. Value function methods include Monte Carlo, \ac{SARSA}, and Q-Learning. Policy gradient based methods attempt to learn an optimal parameterized policy directly via a gradient of a scalar performance measure with respect to the policy parameter. The REINFORCE algorithm is an example of a policy gradient method \subsubsection*{Monte Carlo} Monte Carlo methods can be utilized to learn value functions and optimal policies by direct experience with the environment. In particular, sequences of states, actions, and rewards can be obtained by the agent interacting with the environment, either directly or in simulation, and the value function can be estimated by averaging the returns beginning from a state-action pair. Monte Carlo methods are typically used for episodic tasks. An episode (sequence of state, action, reward) is generated by the agent following policy $\pi$ in the environment and the value function estimate is updated at the conclusion of each episode. Monte Carlo methods can be used for control i.e., finding the optimal policy, by performing policy improvement. Policy improvement updates the policy such that it is greedy with respect to the current action-value function estimate. The greedy policy for an action-value function is defined such that for each state $s$ the action with the maximum action-value is taken i.e., \begin{equation} \pi(s)\doteq \operatorname*{argmax}_{a \in A} q(s,a). \end{equation} An important consideration for using Monte Carlo methods for value function prediction, and in reinforcement learning in general, is that of maintaining exploration. In order to learn the action-value function, all state-action pairs need to be explored. One way to achieve this is known as exploration whereby each episode begins in a particular state-action pair and all state-action pairs have a non-zero probability of being selected at the start of an episode. Exploration guarantees every state-action pairs will be visited an infinite number of times in the limit of an infinite number of episodes \cite{RL_book}. An alternative approach is to utilize a policy that allows for continued exploration. An example is the $\epsilon$-greedy policy in which most of the time an action (probability of $1-\epsilon$) is selected that maximizes the action-value function while occasionally a random action is chosen with probability $\epsilon$ i.e., \begin{equation} \pi(s,a)=\begin{cases} 1-\epsilon+\frac{\epsilon}{|A|}, & \text{if } a = a^*\\ \frac{\epsilon}{|A|}, & \text{otherwise} \end{cases} \end{equation} There are two approaches to ensure continued exploration: on-policy and off-policy methods. In on-policy methods, the algorithm attempts to evaluate and improve the policy that is being used to select actions in the environment whereas off-policy methods are improving a policy different than the policy used to select actions. In off-policy methods, the agent attempts to learn an optimal policy, called the target policy, by generating actions using another policy that allows for exploration, called the behavior policy. Since the policy learning is from data collected ``off'' the target policy, the methods are called off-policy. Both on-policy and off-policy Monte Carlo control methods exist. \subsubsection*{Temporal Difference Learning} \ac{TD} learning defines another family of value function based reinforcement learning methods. Similar, to Monte Carlo methods, \ac{TD} learns a value function via interaction with the environment. The main difference between \ac{TD} and Monte Carlo is that \ac{TD} updates its estimate of the value function at each time step rather than at the end of the episode. In other words, the value function update is based on the value function estimate of the subsequent state. The idea of updating value function based on the estimated return $(R_{t+1}+\gamma V(S_{t+1}))$ rather than the actual (complete) reward as in Monte Carlo is known as bootstrapping. A simple \ac{TD} update equation for value function is \begin{equation} V(S_t)=V(S_t)+\alpha[R_{t+1}+\gamma V(S_{t+1})-V(S_t)] \end{equation} where $\alpha$ is a step size parameter. In the above equation, it is seen that the \ac{TD} method updates the value function estimate at the next time step. The target value for the \ac{TD} update becomes $R_{t+1}+\gamma V(S_{t+1})$ which is compared to the current value function estimate $(V(S_t))$. The difference between the target and the current estimate is known as the \ac{TD} error i.e., \begin{equation} \delta_{t}\doteq R_{t+1}+\gamma V(S_{t+1})-V(S_t) \end{equation} It can be seen that an advantage of \ac{TD} methods is its ability to update value function predictions at each time step which enables online learning. \textbf{SARSA:} is an example of an on-policy \ac{TD} control algorithm. The \ac{TD} update equation presented above is extended for action-value function prediction yielding the \ac{SARSA} action-value update rule as, \begin{equation} Q(S_t,A_t) \leftarrow Q(S_t,A_t)+\alpha[R_{t+1}+\gamma Q(S_{t+1},A_{t+1})-Q(S_t,A_t)].\label{eq:sarsa} \end{equation} As shown in the equation (\ref{eq:sarsa}), the update is performed after each sequence of $(\cdots,S_t,A_t,R_{t+1},S_{t+1},A_{t+1,\cdots})$ which leads to the name \ac{SARSA}. It is to be noted that the $Q$ estimate is updated based on the sample data generated from the behavior policy $(R_{t+1}+\gamma Q(S_{t+1},A_{t+1}))$. For the control algorithm perspective, a greedy policy like $\epsilon$-greedy is often used. \textbf{Q-Learning:} is an off-policy \ac{TD} control algorithm and its update rule is given below. \begin{equation} Q(S_t,A_t) \leftarrow Q(S_t,A_t)+\alpha[R_{t+1}+\gamma\max_{a}Q(S_{t+1},a)-Q(S_t,A_t)] \label{eq:qlearn} \end{equation} As an off-policy method, the learned action-value function estimate $Q$ is attempting to approximate the optimal action-value function $Q^*$ directly. This can be seen in the update equation (\ref{eq:qlearn}) where the target value is $R_{t+1}+\gamma\max_{a}Q(S_{t+1},a)$ compared to $R_{t+1}+\gamma Q(S_{t+1},A_{t+1})$ of \ac{SARSA}. Unlike in SARSA, the $Q$ value is updated based on the greedy policy for action selection rather than the behavior policy. SARSA does not learn the optimal policy but rather learns the action-values resulting from the $\epsilon$-greedy action selections. However, Q-learning learns the optimal policy resulting from the $\epsilon$-greedy action selections causing the online performance to drop occasionally \cite{RL_book}. The \ac{TD} methods can be further generalized with $n$-step bootstrapping methods which are an intermediate between Monte Carlo and \ac{TD} approaches. The $n$-step methods generalize the \ac{TD} methods discussed earlier by utilizing the next $n$ rewards, states, and actions in the value or action-value function updates. The value function based approaches discussed so far have been presented as tabular methods. The algorithms are tabular because the state-value or action-value function is represented as a table or an array. In many practical problems of interest, the state spaces are very large and it becomes intractable to learn optimal policies using tabular methods due to the time, data, and memory requirements to populate the tables. Additionally with massive state spaces, it is typical that the agent will enter states that are previously unseen requiring the agent to generalize from experiences in similar states. An example of an overwhelmingly large state space occurs when the environmental state is represented as a camera image; for example, an 8-bit, 200x200 pixel RGB image results in $256^{3*200*200}$ possible states. To cope with these challenges, optimal policies can be approximated by utilizing function approximation techniques to represent value functions and policies. The different function approximation techniques used in supervised learning can be applied to reinforcement learning. The specific use of deep neural networks as a means for function approximation is known as \ac{DRL} and is discussed later in this section. When using function approximation techniques, parameterized state-value or action-value functions are used to approximate value functions. A state-value estimate can be denoted as $\hat{v}(s;\boldsymbol{w}) \approx v_{\pi}(s)$ and an action-value estimate as $\hat{q}(s,a;\boldsymbol{w}) \approx q_{\pi}(s,a)$ where $\boldsymbol{w} \in \mathbb{R}^{d}$ is the parameter vector. In principle, any supervised learning method could be used for function approximation. For example, a value function estimate could be computed using techniques ranging from a linear function of the state and weights to nonlinear methods such as an \ac{ANN}. \ac{SGD} and its variants are often used to learn the value of the parameter vectors. \subsubsection*{REINFORCE} In contrast to value function based approaches, policy gradient methods attempt to learn an optimal parameterized policy directly without the requirement of learning the action-value function explicitly. The policy that is learned is defined as \begin{equation} \pi(a|s,\boldsymbol{\theta})=Pr(A_t=a|S_t=s,\boldsymbol{\theta}_t=\boldsymbol{\theta}) \end{equation} which specifies the probability that action $a$ is taken at step $t$ in state $s$ and is parameterized by the vector $\boldsymbol{\theta} \in \mathbb{R}^m$. Policy gradient methods learn the value of the policy parameter based on the gradient of a performance measure $J(\boldsymbol{\theta})$ with respect to the parameter. In the episodic case, the performance measure can be defined in terms of the state value function assuming the episode starts from an initial state $s_0$ as \begin{equation} J(\boldsymbol{\theta})\doteq v_{\pi_{\boldsymbol{\theta}}}(s_0) \end{equation} REINFORCE is an example of a policy gradient algorithm and is derived from the policy gradient theorem \begin{equation} \nabla J(\boldsymbol{\theta}) \propto \sum_{s}\mu(s)\sum_{a}q_{\pi}(s,a)\mathbf{\nabla_{\theta}}\pi(a|s,\boldsymbol{\theta}) \end{equation} where $\mu(s)$ is a distribution over states and the gradients are column vectors with respect to parameter vector $\boldsymbol{\theta}$. The policy gradient theorem provides an expression for the gradient of the performance measure with respect to the parameter vector. From the policy gradient theorem, the following equation is derived for the gradient of $J(\boldsymbol{\theta})$ \begin{equation} \nabla J(\boldsymbol \theta) = \mathbb{E}_{\pi}\Bigg[G_t\frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf \theta)}{\pi(A_t|S_t, \theta)}\Bigg] \end{equation} Using \ac{SGD}, the REINFORCE update rule for the policy parameter vector $\mathbf \theta$ can be derived as \begin{equation} \mathbf{\theta}_{t+1} = \mathbf{\theta}_{t} + \alpha G_t \frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf{\theta}_t)}{\pi(A_t|S_t,\mathbf{\theta}_t)}.\label{eq:rein_up} \end{equation} The update equation (\ref{eq:rein_up}) moves the parameter in a direction that increases the probability of taking action $A_t$ during future visits to the state $S_t$ in proportion to the return $G_t$. This causes the parameter to favor actions that produce the highest return. The normalization prevents choosing actions with a higher probability that may not actually produce the highest return. It is possible to generalize the policy gradient theorem and REINFORCE update rule with the addition of a baseline for comparison to the action values or returns. The baseline can be an arbitrary function or random variable. The motivation behind the use of a baseline is to reduce the variance in policy parameter updates. The update rule for the REINFORCE algorithm with a baseline is given as \begin{equation} \mathbf{\theta}_{t+1} = \mathbf{\theta}_{t} + \alpha (G_t-b(S_t)) \frac{\mathbf{\nabla_{\theta}} \pi(A_t|S_t,\mathbf{\theta}_t)}{\pi(A_t|S_t,\mathbf{\theta}_t)} \end{equation} where $b(S_t)$ is the baseline. A common baseline is an estimate of the state-value $\hat{v}(S_t,\mathbf{w})$ parameterized by the weight vector $\mathbf{w} \in \mathbb{R}^l$. The idea of using a state-value function as a baseline can be extended with actor-critic methods. In actor-critic methods, a state-value function, called a critic, is utilized to assess the performance of a policy, called an actor. The critic introduces a bias to the actor's gradient estimates which can substantially reduce variance. The two most recent policy gradient methods are \ac{TRPO} and \ac{PPO}. \ac{TRPO} was introduced in \cite{schulman2015trust} in order to prevent drastic policy changes by introducing an optimization constraint - Kullback-Leibler (KL) divergence. The policy is updated based on a trust-region and the KL constraint ensures that the policy update is not too far away from the original policy. The inclusion of KL constraint in the optimization problem introduces computational and implementation difficulty. However, \ac{PPO} introduced in \cite{schulman2017proximal} mitigates this implementation hurdle by incorporating the constraint term within the objective function. PPO computes the probability ratio between new and old policies. There are two variants of PPO - PPO with KL penalty and PPO with clipped objective. In the first variant, the KL constraint is introduced as a penalty term in the objective function such that it computes a policy update that does not deviate much from the previous policy while minimizing the cost function. In the second variant, the KL divergence is replaced with a clipped objective function such that the advantage function will be clipped if the probability ratio lies outside a range, say $1\pm\phi$. In contrast to TRPO, PPO is simpler to implement and tune. \subsubsection*{Deep Reinforcement Learning} Deep Reinforcement Learning is a popular area of current research that combines techniques from deep learning and reinforcement learning \cite{Arulkumaran_2017}. In particular, deep neural networks are used as function approximators to represent action-value functions and policies used in traditional reinforcement learning algorithms. This is of particular interest for problems that involve large state and action spaces that become intractable to represent using tabular methods or traditional supervised learning function approximators. A key capability of deep learning architectures is the ability to automatically learn representations (features) from raw data. For example, a deep neural network trained for image classification will automatically learn to recognize features such as edges, corners, etc. The use of deep learning enables policies to be learned in an end-to-end fashion, for example, learning control policies directly from raw sensor values. A famous exemplary deep reinforcement learning algorithm is the deep Q-Network that pairs Q-Learning with a deep \ac{CNN} to represent the action-value function \cite{mnih2013playing}. The deep Q-Network was able to achieve super human performance on several Atari games by using only visual information, reward signal, and available actions i.e., no game specific information was given to the agent. The deep Q-Network employs two methods to address the known convergence issues \cite{Tsitsiklis_analysis_td} that can arise when using neural networks to approximate the $Q$ function. These methods are experience replay and the use of a separate target network for $Q$ updates. The experience replay mechanism stores sequences of past experiences, $(s_t,a_t,s_{t+1},r_{t+1})$, over many episodes in replay memory. The past experiences are used in subsequent $Q$ function updates which improve data efficiency, removes correlations between samples, and reduces the variance of updates. The separate target network $\hat Q$ is used for generating targets in the Q-Learning updates. The target network is updated every $C$ time steps as a clone of the current $Q$ network; the use of the target network reduces the chances of oscillations and divergence. A variation of the deep Q-network, known as a Deep Recurrent Q-Network \cite{hausknecht2015deep}, adds a \ac{LSTM} layer to help learn temporal patterns. Additional variations include the double deep Q-network, and \ac{D3QN}. Furthermore, deep reinforcement learning has also been applied to problems with continuous action spaces. In \cite{lillicrap2015continuous}, an actor-critic algorithm known as \ac{DDPG} is presented that is based on the \ac{DPG} algorithm which exploits the idea of experience replay and target networks from the \ac{DQN} as well as batch normalization. \ac{DDPG} is applied successfully to many continuous control problems. In \cite{heess2015memorybased} \ac{RDPG} is introduced as an extension to \ac{DDPG} by the addition of recurrent \ac{LSTM}. The characteristics and capabilities of deep reinforcement learning warrant further investigation for its application to autonomous \ac{UAV} applications. A summary of the different model-free reinforcement learning algorithms is shown in Figure \ref{fig:rloverview}. \begin{figure}[h] \centering \includegraphics[width=4.5 in]{editor/rl_tree.pdf} \caption{Model-free reinforcement learning algorithms} \label{fig:rloverview} \end{figure} \section{Deep Learning for UAS Autonomy} \label{sec:DeepLearning} \emph{Deep learning} has shown great potential in learning complex representations from real environmental data. Its excellent learning capability has shown outstanding results in solving autonomous robotic tasks such as gait analysis, scene perception, navigation, etc., \cite{DL_scene,DL_gait}. The same aspects can be applied for enabling autonomy to the UAS. The various UAS focus areas where deep learning can be applied are scene perception, navigation, obstacle and collision avoidance, swarm operation, and situational awareness. This is also exemplified in the Fig.\ref{fig:dluas}. \begin{figure}[h] \centering \includegraphics[width=4.6in]{editor/dl_classification.pdf} \caption{Deep learning for UAS autonomy discussed in this section.} \label{fig:dluas} \end{figure} Deep learning has been applied as a feature extraction system to learn a high dimensional data representation from the raw sensor output. On the other hand, planning and situational awareness, involve several sub-tasks such as querying or surveying aerial images, navigation control/guidance, collision avoidance, position-dependent control actions, etc. Accordingly, we classify this section into two broad categories: (i) Feature extraction from sensor data and (ii) \ac{UAS} path planning and situational awareness. \subsection{Feature Extraction from Sensor Data} \label{sec:fe} The authors of \cite{Imagery_1} demonstrated the accuracy of a supervised deep learning image classifier to process the monocular images. The classifier predicted outputs of the forest trail direction such as left, right, or straight and claims an accuracy comparable to humans tested on the same image classification task. This scene perception task will require the \ac{MAV} to perceive the trail and react (take actions) to stay on the trail. The authors adopted a typical CNN architecture to accomplish the supervised image classification task. The CNN involved four convolutional layers interlaced with max pooling layers and concluding with two fully connected layers. The output fully connected layer adopted softmax classification that yields the probability of the input image to belong to a particular class. The network was trained using \ac{SGD}. The direction estimates from the CNN were extended to provide navigation control. The navigation control for autonomous trail following was tested on ParrotAR Drone interfaced with a laptop and a standalone quadrotor. The paper reported lower classification accuracy for the real-world testing conditions as opposed to the good quality GoPro images in the training dataset. The AlexNet \cite{alexnet} architecture was employed for palm tree detection and counting in \cite{Imagery_3} from aerial images. The images were collected from the QuickBird satellite. A sliding window technique with a window size of $17\times17$ pixels and a stride of 3 pixels was adopted to collect the image dataset. Only a sample with a palm tree located in the center was classified as positive palm tree detection. Spatial coordinates of the detected palm tree classes are obtained and those corresponding to the same palm tree samples are merged. Those spatial coordinates with a Euclidean distance below a certain threshold are grouped into one coordinate. The remaining coordinates represent the actual coordinates of the detected palm trees. The work reported accurate detection of 96\% palm trees in the study area. Faster R-CNN \cite{fastRCNN} architecture was employed for car detection from low-altitude UAV imagery in \cite{Imagery_4}. Faster R-CNN comprises a region proposal network (RPN) module and a fast R-CNN detector. The RPN module is a deep convolutional architecture that generates region proposals of varying scales and aspect ratios. Region proposals may not necessarily contain the target object. These region proposals are further refined by the fast R-CNN detector. The RPN and fast R-CNN detector modules share their convolutional layers and are jointly trained for object detection. For the car detection task, the VGG-16 model \cite{vgg16} was adopted to form the shared convolutional network. The RPN generates $k$ region proposals in the form of $2k$ box classification and $4k$ box regression outputs. The box regression outputs correspond to the coordinates of the $k$ region proposals while the box classification represents the objectness score, \emph{i.e.,} the probability that each proposal contains the target object (car) or not. The faster R-CNN is trained with a multitask loss function comprising of classification and regression components \iffalse \begin{equation} L\{p_i,t_i\} = \frac{1}{N_{cls}}\sum_i \Big(L_{cls}(p_i,p_i^*) \Big) + \lambda \frac{1}{N_{reg}} \sum_i \Big(p_i^*L_{reg}(t_i,t_i^*) \Big) \label{eq:rcnn} \end{equation} where $N_{cls}$ is the mini-batch size, $N_{reg}$ refers to the number of anchor locations, $p_i$ is the predicated probability of anchor $i$ being an object, $p_i^*$ is the ground truth probability, $t_i$ is the predicted bounding box vector, $t_i^*$ is the actual bounding box vector, and $\lambda$ is the balancing parameter. \fi The car detection imagery was collected with GoPro Hero Black Edition-3 mounted on a DJI Phantom-2 quadcopter. The paper reported car detection accuracy of 94.94\% and demonstrated the robustness of the method to scale, orientation, and illumination variations. For a simple exposition, the faster R-CNN architecture is shown in Fig.\ref{fig:fasterrcnn}. \begin{figure}[h] \centering \includegraphics[width=4.6in]{editor/fasterRCNN.pdf} \caption{Faster R-CNN architecture} \label{fig:fasterrcnn} \end{figure} In \cite{Imagery_5}, the faster R-CNN architecture is applied for maize tassel detection from UAV RGB imagery. Here, different CNN architectures were experimented to form the shared layers between the RPN and fast R-CNN detector modules. The paper reported higher accuracy with ResNet \cite{resnet} in contrast to VGGNet for image resolution of $600\times600$ and UAV altitude of 15 m. The faster R-CNN architecture was compared with You Only Look Once (YOLO v3) \cite{YOLOv3} for car detection from UAV imagery in \cite{Imagery_6}. YOLOv3 is an advancement over its predecessors YOLOv1 \cite{YOLOv1} and YOLOv2 \cite{YOLOv2}. Unlike its predecessors, YOLOv3 can perform multi-label classification of the detected object. Secondly, the bounding box prediction assigns an objectness score of 1 to the predicted box that overlaps the ground truth box more than a predefined threshold. In this way, YOLOv3 assigns one bounding box corresponding to a ground truth object. Additionally, YOLOv3 predicts bounding boxes at 3 different scales. Lastly, it adopts a 53-layered CNN feature extractor named Darknet-53. The study found both YOLOv3 and faster R-CNN performing comparably well in classifying the car object from the image. Although YOLOv3 outperformed faster R-CNN in processing time and sensitivity, \emph{i.e.,} the ability to identify all the cars in the image. In \cite{Acoustic_1}, a \ac{PS-DNN} is used for voice identification of people for emergency rescue missions. The microphone array embedded onboard a Parrot Bebop UAV is used for collecting acoustic data. The PS-DNN is posed as a multitask learning framework to achieve two simultaneous tasks - sound source separation and sound source identification. The PS-DNN for multitask learning is a feedforward neural network with partially shared hidden layers between the two sub-networks. Mel filter bank feature vectors obtained by applying windowed \ac{STFT} on the acoustic signals are fed as input to the PS-DNN. The network was trained with Adam learning optimizer \cite{adam} with a learning rate of $2\times10^{-4}$. The study demonstrated promising accuracy when a partially annotated dataset was employed. Three \ac{ESC} models - CNN, RNN, and \ac{GMM} - were experimented in \cite{Acoustic_2} to detect commercial drones in real noisy environments. The dataset consisted of ordinary real-life noises and sounds from commercial drones such as 3DR Solo, DJI Phantom-3, DJI Phantom-4, and DJI Inspire. The study demonstrated RNN outperforming the CNN and GMM models. The RNN architecture is a bidirectional \ac{LSTM} with 3 layers and 300 LSTM units. An early-stopping strategy is adopted in the training phase such that if the accuracy and loss do not improve after 3 epochs, the training is stopped. RNN exhibited good generalization over unseen data types with an F-score of 0.6984 and a balanced precision and recall while the CNN resulted in false positives. On the other hand, GMM exhibited better detection performance to CNN but low F-scores deterring practical use. Drone identification based on acoustic fingerprints using \ac{CNN}, \ac{RNN}, and \ac{CRNN} is presented in \cite{Acoustic_3}. CRNN \cite{CRNN} exploits the advantages of both CNN and RNN to extract spatio-temporal features. The three different architectures were utilized to extract unique acoustic signatures of the flying drones. The authors collected the drone acoustic dataset by recording the sound produced by the drone's propellers while flying them in an indoor environment. Two types of UAVs from the Parrot family named Bebop and Mambo were utilized in this study. The neural networks classify the audio input as drone and not drone. The work portrayed the CNN outperforming both RNN and CRNN in terms of accuracy, precision, F1-score, and recall while RNN exhibited lesser training time. However, the performance of RNN was very poor on all counts which could be attributed to the short duration audio clips as opposed to long sequential data. CRNN, however, outperformed RNN and exhibited comparable performance to that of CNN with the added benefit of lesser training time. The authors also extended their work to multi-label classification to identify the audio clips as Bebop, Mambo, and Unknown. In this task again, a similar performance trend was observed as with the binary classification. \subsection{UAS Path Planning and Situational Awareness} \label{sec:sa} A CNN-based controller strategy for autonomous indoor UAV navigation is considered in \cite{PSA_1}. The limited precision of \ac{GPS} in the indoor environment and the inability to carry heavy weight sensors render indoor navigation a challenging task. The CNN aims to learn a controller strategy to mimic an expert pilot's navigation decisions. The dataset of seven unique indoor locations was collected with a single forward facing camera onboard a Parrot Bebop Drone. The classifier is trained to return flight commands - Move Left, Move Right, Move Forward, Spin Left, Spin Right, and Stop - by training with manually labeled expert flight commands. The CNN classifier followed the CaffeNet \cite{caffenet} architecture with five convolutional layers and three fully connected layers. The classifier was trained on NVIDIA GTX 970M \ac{GPU} with NVIDIA cuDNN \cite{cudnn}. The trained classifier is tested on a combination of familiar and unseen test environments with different objects, lighting, and geometry. The classifier reported success rates in the range of 60\%-80\% for the test locations implying acceptable robustness in flying autonomously through buildings with different objects and geometry. An interesting approach to UAV navigation is adopted in \cite{PSA_2} where it is taught to fly by crashing. Here, the authors create a crash dataset by crashing the UAV under different scenarios $11500$ times in addition to non-crash data sampled from the same trajectories. In other words, the drone is allowed to learn not to collide into objects by crashing. The collision data is collected by placing the Parrot AR. Drone 2.0 in a random location which is then allowed to takeoff in a random direction and follow a straight line path until the collision. This way the model is allowed to learn if going straight in a specific direction is good or not. The network architecture adopted the AlexNet \cite{alexnet} pre-trained on ImageNet \cite{imagenet}. The pre-trained weights act as initialization for the network weights rather than randomly initialized weights except for the last fully connected layer. The AlexNet architecture involves five convolutional layers and three fully connected layers. The final layer adopts the binary softmax activation function which classifies the navigational actions for the drone. Given an input image, the network decides whether to go left, right or straight. Experimental demonstrations portrayed the efficacy of this supervised learning approach in avoiding glass walls/doors, corridors, and hallways in contrast to an image depth estimation method. A regression CNN for indoor navigation is proposed in \cite{regCNN}. Autonomous indoor navigation is enabled by predicting the distance to collision based on the visual input from the monocular camera onboard. The authors adopt a self-supervised approach to collect indoor flight dataset annotated with distance to the nearest obstacle in three different diverging directions. The automated annotation is enabled with the help of three pairs of infrared and ultrasonic sensors mounted on the UAV pointing towards different directions with respect to the camera's field of view. The regression CNN follows a two-stream architecture with the first two layers of the streams similar to that of the AlexNet CNN. The two streams are fused to concatenate the feature maps from the streams followed by processing with a convolutional layer similar to the third convolutional layer of AlexNet. The two subsequent convolutional layers in the single-stream section also adopt the last two convolutional layers of AlexNet except for the classifier unit in AlexNet which is replaced by a single fully-connected regression layer. The training of the regression CNN was performed with \ac{SGD} with momentum in 30 epochs with a mini-batch size of 128. The implementation and training were performed in MATLAB on a desktop server with Intel Xeon E5-2630 processor, 64GB of RAM, and a GTX1080 \ac{GPU}. The UAV is a Parrot AR-Drone 2.0 with a 720p forward-facing camera onboard. During the experiments, a WiFi connection is established between the UAV and a laptop with an Intel Core i7-6700HQ, 16GB of RAM, and a GTX1070 \ac{GPU} to perform the CNN inference and motion planning. The authors compared the performance of the proposed regression CNN against two previously discussed state-of-the-art schemes \cite{PSA_1} and \cite{PSA_2}. Regression CNN demonstrated continuous navigation time without collision 4.6$\times$ and 1.7$\times$ more compared to \cite{PSA_1} and \cite{PSA_2} respectively. A \ac{MAV}-assisted supervised deep learning approach for ground robot path planning to perform search and rescue operation is proposed in \cite{aerialSearchCNN}. The path planning is executed in three stages. The initial stage involves a human operator flying the MAV in vision-assisted mode to localize a goal location such as a ground robot or a victim. During this initial flight, the camera imagery from the MAV is collected for initial terrain classification. The terrain is mapped to obtain a precise elevation map by monocular 3D reconstruction. The CNN classifier is trained on-the-spot without any \emph{apriori} information. The on-the-spot classifier training involves an operator flying the MAV and labeling a few regions of interest from the live camera imagery. Many training patches are gathered from the few labeled regions by cropping patches that fall on previously labeled areas. The authors of \cite{aerialSearchCNN} also report a spot training time of 10 - 15 min on a CNN. Post training, the patches are classified and projected on to the terrain map. After the goal location is found, the second stage involves an autonomous vision-guided flight to a series of waypoints. The path exploration follows an exhaustive search over the candidate paths in order to effectively reduce the response time. The authors demonstrated the efficacy of their approach via simulation as well as field trials. The MAV for field trials was custom built with onboard \ac{IMU}, quadrotor, downward facing camera, onboard Odroid U3 quad-core computer, and PIXHAWK autopilot software. The ground robot for the experiment was a Bluebotics Absolem which is capable of driving over rough terrain. The field trials with canyon and driveway scenarios demonstrated feasible and efficient path exploration over multiple terrain classes, elevation changes, and untraversable terrain. Another CNN architecture - whereCNN - was proposed in \cite{ground2aerialCNN} to perform ground to aerial geolocalization. The method aims at mapping a street-view query image to its corresponding location on a city-scale aerial-view image. The CNN architecture for cross-view image matching is inspired by Siamese network \cite{siamese} and is comprised of two identical CNNs to learn a shared deep representation across pairs of street and aerial view images. A contrastive loss function is used as the overall loss to train the whereCNN such that the matched pairs are penalized by their squared Euclidean distance and the mismatched pairs by the squared Euclidean distance to a small margin (for the distance that is smaller than the margin). A smaller margin causes the network to be influenced by harder negatives. The dataset is comprised of 78k pairs of Google street view images along with their corresponding aerial view. The whereCNN was trained for 4 days on an NVIDIA Grid K520 \ac{GPU}. The authors demonstrated that the whereCNN trained without sharing parameters between the siamese network entities generalizes reasonably well on unseen data. The method exhibited cross-view matching accuracy of over 22\% for Charleston, San Diego, and San Francisco. In Table \ref{tab:dluav}, we summarize the deep learning techniques that enable autonomous UAV applications. \begin{table*}[!h] \caption{Deep learning for UAV autonomy} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{3.8 cm}|p{2.6 cm}|p{4cm}|} \hline \textbf{Proposed solution} & \textbf{Architecture} & \textbf{Application}\\ \hline Giusti et al. \cite{Imagery_1} &CNN &Outdoor UAV navigation \\ \hline Li et al. \cite{Imagery_3} &AlexNet &Palm tree detection\newline and counting \\ \hline Xu et al. \cite{Imagery_4} &Faster R-CNN &Car detection from\newline low-altitude UAV imagery \\ \hline Liu et al. \cite{Imagery_5} &Faster R-CNN &Maize tassel detection \\ \hline Benjdira et al. \cite{Imagery_6} &Faster R-CNN, \newline YOLOv3 &Car detection from\newline UAV imagery \\ \hline Morito et al. \cite{Acoustic_1} &PS-DNN &Emergency rescue mission \\ \hline Jeon et al. \cite{Acoustic_2} &RNN &Drone identification \\ \hline S. Al-Emadi et al. \cite{Acoustic_3} &CNN, RNN, CRNN &Drone identification \\ \hline D. K. Kim and T. Chen \cite{PSA_1} &CaffeNet &Indoor UAV navigation \\ \hline Gandhi et al. \cite{PSA_2} &AlexNet &Indoor UAV navigation \\ \hline A. Kouris and C. Bouganis \cite{regCNN} &CNN &Indoor UAV navigation \\ \hline Delmerico et al. \cite{aerialSearchCNN} &CNN &UAV-assisted ground \newline robot navigation \\ \hline Lin et al. \cite{ground2aerialCNN} &whereCNN &Ground to aerial geolocalization \\ \hline \end{tabular} \\ \label{tab:dluav} \end{table*} \subsection{Open Problems and Challenges} In this section \ref{sec:DeepLearning}, we discussed the state of the art deep learning techniques for achieving various \ac{UAS} tasks. Specifically, we discussed how deep learning can be leveraged to accomplish feature extraction from sensor data, planning, and situational awareness. However, there exist several open research challenges on the road to achieving complete autonomy of \ac{UAS} tasks. A few of these are enlisted below: \begin{enumerate} \item \emph{Lack of realistic datasets}: The realistic gap between simulated and actual deployed scenarios poses a severe challenge to the deployed deep learning solutions. The diverse scenarios that can be confronted by a UAV in a realistic setting in terms of the varied obstacles in the traversed path, occluded or visually artifacted targets in an object detection task, the effects caused by the sensors on board, etc., are hard to model in a virtual setting. In addition, generating such a realistic dataset from actual UAVs followed by annotating them is a laborious task. \item \emph{Fast deep learning:} Generalizing a supervised deep learning solution to unseen data such as those not represented by the training dataset is an open research challenge. On-the-spot learning implying training of the neural network on-the-fly with limited snapshots of the scenario will prove useful in allowing the model to continue learning new scenarios without forgetting past knowledge. The recently introduced model agnostic meta learning (MAML) \cite{maml} opens door to developing such fast learning techniques. \item \emph{Resource-heavy deep learning techniques:} The computational complexity of deep learning architectures is another significant hurdle that poses severe constraints on the latency, weight, flight time, power consumption, and cost. Denser architectures require powerful computational platforms such as \acp{GPU} that are often above the prebuilt onboard computational capacity of the UAVs requiring auxiliary computational units. Such additional computational platforms increase the cost, weight, flight time, and power consumption of the UAVs. \item \emph{Vulnerability to cyberattacks:} Vulnerability of the deployed deep learning techniques to various security attacks is a cause of serious concern. Spoofing attacks, signal jamming, identity forging, among others can disrupt the intended UAV operation leading to asset loss and damage. Integrating adversarial learning techniques to the application-specific deep learning approaches can be one way to tackle such security threats. \end{enumerate} \section{Reinforcement Learning for UAS Autonomy} \label{sec:RL} Reinforcement learning provides a learning framework allowing agents to act optimally via sequential interactions with its environment. In comparison to supervised or unsupervised learning, reinforcement learning allows the agent to leverage its own experiences derived from environmental interactions. Additionally, reinforcement learning provides a means to specify goals for the agent by means of a reward and penalty scheme. These characteristics of reinforcement learning have led to many research efforts on its application to autonomous \ac{UAS} applications. Reinforcement learning has been primarily applied to lower-level control system tasks that regulate the \ac{UAV}'s velocity, attitude, and navigation as well as other higher-level tasks. \subsection{UAS Control System} Stable control of a \ac{UAS} is a complex task due to nonlinear flight dynamics. Traditional control approaches such as \ac{PID} controllers have been successfully used for \ac{UAS} for attitude and velocity control in stable environments. However, the performance of these controllers can deteriorate in dynamic or harsh environments. The main disadvantages of \ac{PID} control being a constant parameter feedback controller are the control efforts are reactive and the controller does not have apriori knowledge of or the ability to learn about the environment. Techniques from adaptive and robust control can provide insights on designing controllers that can adapt to dynamic environments and operate effectively in the presence of uncertainties. However, a shortcoming of these traditional control techniques is that they typically require a mathematical model of the environmental dynamics and do not explicitly learn from past experiences. Reinforcement learning algorithms present a potential solution to the problem of \ac{UAS} control due to their ability to adapt to unknown environments. There have been many research efforts focusing on the application of reinforcement learning to control systems on a \ac{UAS} \cite{Waslander_2005,BouAmmar_2010,dosSantos_2012,Zhang_2016_MPC,Hwangbo_2017,Lambert_2019,Koch_2019,Bohn_2019}. Much of the research has been focused on quadrotor \acp{UAV}; however, some of the early works involved autonomous helicopters. Many of the reinforcement learning based control systems discussed in this section are for attitude control of the UAV but some of the works consider trajectory tracking and maneuvering as well. Additionally, several algorithmic approaches have been studied including both online and offline methods operating in conjunction with traditional control algorithms as well as \ac{DRL} based approaches. Early works of applying reinforcement learning to \ac{UAV} control problems focused on autonomous helicopters \cite{Bagnell_2001, Kim_nips_2004_autonomous_helicopter, Ng_autonomous_inverted_helicopter_2004, Abbeel_2006}. In these works, data was collected from a human pilot flying a remote control helicopter and the dynamics were learned offline. From the learned dynamics, reinforcement learning algorithms were used to design controllers for various maneuvers including hovering, trajectory tracking, and several advanced maneuvers including inverted hovering, flips, rolls, tunnels, and others from the Academy of Model Aeronautics (AMA) remote control helicopter competition. The first work that used reinforcement learning for quadrotor UAV control did so for altitude control \cite{Waslander_2005}. A model-based reinforcement learning algorithm that rewards accurate tracking and good damping performance was utilized to find an optimal control policy. To benchmark with a traditional approach, an integral sliding mode controller was also implemented. Tests conducted on \ac{STARMAC} quadrotors showed both the reinforcement learning and integral sliding mode controllers to have comparable performance, both significantly exceeding that of traditional linear control strategies. In \cite{BouAmmar_2010}, \ac{FVI} is used to design a velocity control system for a quadrotor UAV. The reinforcement learning FVI controller was compared to a cascaded velocity and attitude controller designed using nonlinear control techniques. The performance of each controller was compared using numerical simulations in MATLAB/SIMULINK. While both controllers produced satisfactory results, the reinforcement learning controller was outperformed in terms of settling time but had a lower percent overshoot. The authors stated that a non-parametric approach to value function estimation, such as the use of a wavelet network, may have resulted in better performance for the reinforcement learning controller. The authors emphasized that an advantage of the reinforcement learning controller is that it does not require any prior mathematical knowledge of quadrotor dynamics to yield satisfactory behavior. In \cite{dosSantos_2012}, a Learning Automata reinforcement learning algorithm called \ac{FALA} was used to learn the optimal parameters of nonlinear controllers for trajectory tracking and attitude control. Traditional approaches such as \ac{PID}, sliding mode, and backstepping controllers were used to benchmark against \ac{FALA}. The performance of the controllers was analyzed in simulation under varying non-linear disturbances including wind and ground effects. The reinforcement learning tuned controllers outperformed the mathematically tuned controllers in terms of tracking errors. In \cite{Zhang_2016_MPC}, an off-policy method, \ac{MPC}, is used for guided policy search for a deep neural network policy for UAV obstacle avoidance. During training, MPC is used to generate control actions for the UAV using knowledge of the full state, this is used along with the state observations to train the policy network in a supervised learning setting. During testing, only the state observations are available to the policy neural network. Simulations were conducted that demonstrated that the proposed approach was able to successfully generalize to new environments In \cite{Hwangbo_2017}, a neural network based policy trained using reinforcement learning is used for trajectory tracking and recovery maneuvers. The authors proposed a new reinforcement learning method that uses deterministic policy optimization using natural gradient descent. Experiments were conducted in both simulation and on a real quadrotor UAV, the Ascending Technologies Hummingbird, that demonstrated the effectiveness of the proposed approach. In simulations, the proposed method outperformed the popular algorithms \ac{TRPO} and \ac{DDPG}. The trajectory tracking test resulted in a small but acceptable steady-state error. Additionally, a recovery test where the quadrotor was manually thrown upside down demonstrated autonomous UAV stabilization. A benefit of the proposed algorithm is low computation time; average time of 6 $\mu$s was reported. In \cite{Lambert_2019}, deep \ac{MBRL} is used for low-level control of a quadrotor UAV. Deep \ac{MBRL} is used to learn a forward dynamics model of the quadrotor and then \ac{MPC} is used as a framework for control. The algorithms were evaluated using a Crazyflie nano quadrotor. Stable hovering for 6 seconds using 3 minutes of training data was achieved emphasizing the ability to generate a functional controller with limited data and without assuming any apriori dynamics model. In \cite{Koch_2019}, multiple neural network based reinforcement learning algorithms are evaluated for attitude control of UAVs. The algorithms that were evaluated include \ac{DDPG}, \ac{TRPO}, and \ac{PPO}. The reinforcement learning algorithms were compared against \ac{PID} control systems for attitude control of UAVs in a simulation environment. The authors also developed an open-source training environment utilizing OpenAI and was evaluated using the Gazebo simulator. The simulations indicated that the agents trained with PPO outperformed a tuned PID controller in terms of the rise time, overshoot, and average tracking error. In \cite{Bohn_2019}, \ac{PPO} is applied for attitude control of fixed-wing UAVs. The \ac{PPO} method was chosen largely due to the success reported in \cite{Koch_2019}. The PPO controller was trained in a simulation environment to control the attitude (pitch, roll) and airspeed of the UAV to the specified setpoints. The results showed that the DRL controller was able to generalize well to environments with turbulence. The advantages of the DRL controller were emphasized in the high turbulence scenarios by it outperforming the PID controller in multiple performance metrics including success percentage, rise time, settling time, and percent overshoot. A DRL robust control algorithm for quadrotor \acp{UAV} is presented in \cite{Wang_2019_DPG_IC}. The algorithm uses \ac{DPG} which is an actor-critic method. Furthermore, similar to classical control design, DPG is augmented with an integral compensator to eliminate steady state errors. Additionally, a two phase learning protocol consisting of an offline and online learning phase is defined for training the model. The offline training is completed using a simplified quadrotor model but the robust generalization capabilities are validated in simulation by changing model parameters and adding disturbances. The capability of the model to learn an improved policy online is demonstrated with faster response time and less overshoot compared to original policy learned offline. \subsection{Navigation and Higher Level Tasks} In this section, the use of reinforcement learning for higher level planning tasks such as navigation, obstacle avoidance, and landing maneuvers is studied. In \cite{Imanberdiyev_2016}, a model-based reinforcement learning algorithm is used as a high level control method for autonomous navigation of quadrotor \acp{UAV} in an unknown environment. A reinforcement learning algorithm called TEXPLORE \cite{texplore_paper} is utilized to perform a targeted exploration of states that are both uncertain in the model and promising for the final policy. This is in contrast to an algorithm such as Q-learning that attempts to exhaustively explore the state space. TEXPLORE uses decision trees and random forests to learn the environmental model. In particular, the decision trees are used to predict the relative state transitions and transition effects. A random forest is used to learn several models of the environment as a single decision tree may learn an inaccurate model. The final model is averaged over the decision trees in the random forest. TEXPLORE then performs its targeted exploration using an algorithm called \ac{UCT}. The authors implement and compare the TEXPLORE algorithm to Q-Learning for a navigation task. The navigation task involves the UAV traveling from a start to an end state under battery constraints i.e., the UAV requires a recharge during the mission in order to make it to the goal. The navigation task is performed in a simulated grid environment implemented using ROS and Gazebo. It is shown that the TEXPLORE algorithm learns effective navigation policies and outperforms the Q-Learning algorithm considerably. In \cite{pham2018autonomous}, a \ac{PID} and Q-Learning algorithm for navigation of a \ac{UAV} in an unknown environment is presented. The problem is modeled as a finite \ac{MDP}. The environment is modeled as a finite set of spheres with the centers forming a grid, the state of the \ac{UAV} is its approximate position i.e. one of the points on the grid, and the actions available to the agent are head North, South, East, or West. In this work, a constant flight altitude is assumed and thus the state space is two dimensional. The objective of the agent is to navigate to a goal position following the shortest path in an unknown environment. A \ac{PID} and Q-Learning algorithm are used in conjunction to navigate the \ac{UAV} to the goal position in the unknown environment. The Q-Learning algorithm and $\epsilon$-greedy policy are used by the agent to select the next action given the current state. The action is then translated to a desired position and is inputted to the \ac{PID} controller which outputs control commands to the \ac{UAV} to complete the desired action. The proposed algorithm was implemented and tested in both simulation and on a Parrot AR Drone 2.0. In both simulation and experimentation, the \ac{UAV} was able to learn the shortest path to the goal after 38 episodes. In \cite{pham2018autonomous_functionapprox}, the authors of \cite{pham2018autonomous} utilize an approximated Q-Learning algorithm that employs function approximation in conjunction with the previously described \ac{PID} and Q-Learning control algorithm for \ac{UAV} navigation tasks. Function approximation is used to handle the large state space and to provide faster convergence time. Fixed sparse representation is used to represent the Q table as a parameter vector. Compared to the work in \cite{pham2018autonomous}, the state representation consists of the relative distance of the \ac{UAV} to the goal and relative distances to obstacles in four directions obtained using on-board radar. Both simulation and real tests demonstrated faster convergence and \ac{UAV} navigation to the goal position. In \cite{Wang_2019_Navigation}, the authors introduce a DRL algorithm, a variant of \ac{RDPG} called Fast-RDPG, for autonomous UAV navigation in large complex environments. The Fast-RDPG differs from RDPG as it uses non-sparse rewards allowing for the agent to learn online and speed up the convergence rate. The reward function design is discussed which includes transition (i.e., progress towards the goal), obstacle proximity penalty, free space, and time step penalty. The Fast-RDPG algorithm outperforms RDPG and DDPG in terms of rate of success, crash, and stray metrics. Generalization of the Fast-RDPG algorithm to environments of different sizes, different target altitudes, and 3D navigation is discussed as well. In \cite{singla_drl_oa_2019}, a Deep Recurrent Q-Network with temporal attention is proposed as a \ac{UAV} controller for obstacle avoidance tasks. The model uses a conditional generative adversarial network to predict a depth map from monocular RGB images. The predicted depth map is then used to select the optimal control action. The temporal attention mechanism is used to weight the importance of a sequence of observations over time which is important for obstacle avoidance tasks. The performance of the proposed approach was compared to Deep Q-Network, \ac{D3QN}, and Deep Recurrent Q-Network without temporal attention algorithms and showed superior performance in simulations. In \cite{Ramos_2018}, a \ac{DRL} algorithm called Deep \ac{DPG} is used to enable an advanced autonomous UAV maneuvering and landing on a moving platform. The authors integrate the Deep \ac{DPG} algorithm into their reinforcement learning simulation framework implemented using Gazebo and \ac{ROS}. The training phase of the proposed approach was conducted in simulation and the testing phases were conducted in both simulation and real flight. The experiments demonstrated the feasibility of the proposed algorithm in completing the autonomous landing task. Additionally, this work showed that agents trained in the simulation are capable of performing effectively in real flights. In \cite{uav_auto_land_drl_polvara_2018}, a \ac{DRL} based approach to perform autonomous landing maneuver is presented. The approach relies on a single downward facing camera as the sole sensor. The landing maneuver is considered as a three phase problem: landmark detection, descent maneuver, and touchdown. A hierarchy of two independent \acp{DQN} is proposed as a solution for the landmark detection and descent maneuver problems. The touchdown maneuver is not considered in the research; however, the authors indicated that it may be solved using a closed loop \ac{PID} controller. A \ac{DQN} is employed for the landmark detection component and a double \ac{DQN} is used for the descent. Additionally, the authors propose a new form of prioritized experience replay called \emph{partitioned buffer replay} to handle sparse rewards. Various simulations were conducted that indicated that the proposed \ac{DRL} approach was capable of performing the landing maneuver and could effectively generalize to new scenarios. In Table \ref{tab:rluav}, we summarize the reinforcement learning techniques that enable autonomous UAV applications. \begin{table*}[!h] \caption{Reinforcement for UAS autonomy} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{3.4 cm}|p{3.4 cm}|p{4 cm}|} \hline \textbf{Proposed Solution} & \textbf{Reinforcement Learning Technique} & \textbf{Application}\\ \hline J. A. Bagnell and J. G. Schneider \cite{Bagnell_2001} &Model-based, PEGASUS &Helicopter control \\ \hline Kim et al. \cite{Kim_nips_2004_autonomous_helicopter} &Model-based, PEGASUS &Helicopter hovering and maneuvers \\ \hline Ng. et. al.\cite{Ng_autonomous_inverted_helicopter_2004} &Model-based, PEGASUS &Helicopter inverted hovering \\ \hline Abbeel et. al. \cite{Abbeel_2006} &Differential Dynamic Programming &Helicopter aerobatic maneuvers \\ \hline S. L. Waslander and G. Hoffmann \cite{Waslander_2005} &Model-based; \newline \ac{LWLR},\newline Policy Iteration &Quadrotor altitude control \\ \hline Bou-Ammar et. al. \cite{BouAmmar_2010} &Fitted Value Iteration &Quadrotor velocity control \\ \hline S. R. B. dos Santoes et. al. \cite{dosSantos_2012} &Finite Action-set Learning Automata &Quadrotor trajectory tracking and attitude control \\ \hline Zhang et. al. \cite{Zhang_2016_MPC} &MPC Guided Policy Search &Quadrotor obstacle avoidance \\ \hline Hwangbo et. al. \cite{Hwangbo_2017} &Neural network policy &Waypoint tracking and recovery tests \\ \hline Lambert et. al. \cite{Lambert_2019} &Deep model-based &Hovering \\ \hline Koch et. al. \cite{Koch_2019} &DDPG, TRPO, PPO &Attitude control \\ \hline Bøhn et. al. \cite{Bohn_2019} &PPO &Attitude control \\ \hline Y. Wang et. al. \cite{Wang_2019_DPG_IC} &DPG &UAV control \\ \hline Imanberdiyev et. al. \cite{Imanberdiyev_2016} &Model-based,TEXPLORE &UAV navigation \\ \hline Pham et. al. \cite{pham2018autonomous} &Q-Learning &UAV navigation \\ \hline Pham et. al. \cite{pham2018autonomous_functionapprox} &Q-Learning with function approximation &UAV navigation \\ \hline C. Wang et. al. \cite{Wang_2019_Navigation} &Fast-RDPG &UAV navigation \\ \hline Singla et. al. \cite{singla_drl_oa_2019} &Deep recurrent Q network with temporal attention &Obstacle avoidance \\ \hline A. Rodriguez-Ramos et. al. \cite{Ramos_2018} &DDPG &Landing on a moving platform \\ \hline Polvara et. al. \cite{uav_auto_land_drl_polvara_2018} &DQN &Autonomous landing \\ \hline \end{tabular} \\ \label{tab:rluav} \end{table*} \subsection{Open Problems and Challenges} There are still several open problems and challenges associated with reinforcement learning based autonomous UAV solutions. Many problems and challenges are associated with the transition from simulation to hardware. This is evidenced by limited results on the performance of reinforcement learning solutions performing high complexity planning tasks in real life tests. A challenge associated with the transition is managing the reality gap between simulation and real life testing. Additionally, as deep reinforcement learning solutions are utilized for autonomy, the integration onto an embedded UAV platform can become challenging due to the computational requirements of the algorithms and the \ac{SWaP} constraints of the UAV. Other challenge areas include developing algorithmic solutions that enable higher degrees of autonomy. For example, more complex tasks and missions may require the UAV to cooperate with other autonomous systems and/or humans via \acp{NUI}. Also, the majority of the published works consider scenarios with a static mission objective in dynamic environments; however, in general, the autonomous agent will need to be able to operate in scenarios where both mission objectives and the environment are dynamic. It is also possible that the mission will consist of multiple objectives that need to be completed simultaneously \section{Simulation Platforms for UAS} \label{sec:Simulation} The ability to accurately simulate \ac{UAS} in realistic operational environments is an invaluable capability. This is largely due to the fact that real hardware-based testing of \ac{UAS} is both a time consuming and expensive process. The potential for injuries and damages or losses are the main challenges associated with hardware-based testing. Additional challenges and constraints include limited battery life and the laws and regulations of outdoor flight. These challenges are exacerbated in the context of deep learning and reinforcement learning based autonomy solutions as they require large amounts of training data and experiences in order to learn effective behaviors and are also often unstable during their training phases. Additionally, it can also be challenging and/or costly to collect ample training data for machine learning based autonomous \ac{UAS} algorithms. Physically and visually realistic \ac{UAS} simulations are potential solutions to several of these challenges. For example, a realistic visual simulation of an operational environment could be used to create a dataset for a deep learning algorithm. Furthermore, simulation provides a means to test \ac{UAS} in scenarios that can be hard to create in real life e.g. failure modes, harsh environmental conditions, etc. Simulation also provides a means for establishing easily repeatable environments for algorithm comparisons and software regression testing. \subsection{Simulation Suites} This section now presents a survey of popular simulation software platforms for \ac{UAS}. Previous surveys conducted in \cite{UAV_flight_controller_simulator_survey, Simulation_Hentati, Mairaj_2019} introduced the majority of available \ac{UAS} simulation platforms for various applications. The discussion in this section focuses primarily on open-source simulators that appear useful for research and development of autonomous \ac{UAS} applications. Gazebo \cite{Gazebo_website, Gazebo_paper} is an open-source robotics simulator capable of simulating multiple robots in both indoor and outdoor environments. This is enabled by its integration with high-performance physics engines, e.g., \ac{ODE}, Bullet, Simbody, and \ac{DART} as well as its ability to model various sensors, noise, and environmental effects. The Gazebo architecture is modular by allowing for worlds and objects to be defined using \ac{SDF} files while enabling sensor and environmental effect modules to be added as plugins. \ac{OGRE} \cite{Ogre_website} is utilized by Gazebo for high fidelity visual rendering of the environment that captures different textures and lighting. Gazebo is also one of the default simulators integrated with the popular robotics middleware package \ac{ROS}. By itself, Gazebo does not provide the capability to simulate \ac{UAV}s; however, there have been multiple works that define the necessary model, sensor, and controller plugins to facilitate \ac{UAV} simulation and is discussed herein. An example of \ac{UAV} simulation using Gazebo is shown in Figure \ref{fig:gazebo}. In \cite{hector_quadrotor_paper,hector_quadrotor_wiki} simulation of quadrotor \acp{UAV} using Gazebo and \ac{ROS} is implemented as an open-source package called hector\_quadrotor. The hector\_quadrotor package provides the geometry, dynamics, and sensor models for quadrotor \acp{UAV}. Sensor models for \ac{IMU}, barometer, ultrasonic sensor, magnetic field, and \ac{GPS} in addition to the default sensor models provided by Gazebo such as LIDAR and cameras. \acp{EKF} and cascaded \ac{PID} controllers are implemented and utilized for state estimation and control respectively. A tutorial example of integrating a LIDAR based \ac{SLAM} algorithm with the simulated \acp{UAV} is included in the package's documentation. RotorS is another open-source \ac{MAV} simulator using Gazebo and \ac{ROS} \cite{RotorS_chapter,rotorS_wiki}. Models of various multirotor \acp{UAV} including the AscTec Hummingbird, AscTec Pelican, and AscTec Firefly are included with the simulator. Default simulator sensors include \ac{IMU}, a generic odometry sensor, and visual inertial sensor. Similar to hector\_quadrotor, RotorS provides a baseline UAV simulation using Gazebo by defining the required UAV, sensor, and controller configuration files and plugins. The RotorS package provides a well documented and functional UAV simulator that a researcher can use for rapid prototyping of new autonomous UAV control algorithms. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=3 in]{editor/figures/gazebo.png} \caption{UAV simulation in Gazebo \cite{px4_gazebo_pic}} \label{fig:gazebo} \end{figure} In \cite{Koch_2019}, a framework called GymFC for tuning \ac{UAV} flight control systems was introduced. The framework integrates the popular reinforcement learning toolkit OpenAI Gym \cite{OpenAI_Gym} and the Gazebo simulator to facilitate research and development of attitude flight control systems using \ac{DRL}. GymFC defines three layers to provide seamless integration of reinforcement learning based \ac{UAV} control algorithms: Digital Twin Layer, Communication Layer, and Environment Interface Layer. The Digital Twin Layer consists of the simulated \ac{UAV} and environment as well as interfaces to the Communication Layer. The Communication Layer is the interface between the Digital Twin and Environment Interface Layer that implements lower level functionality to enable control of the \ac{UAV} and the simulation. The Environment Interface Layer implements the environmental interface defined by the OpenAI Gym API that the reinforcement learning agent interacts with. In the original work \cite{Koch_2019}, the proposed \ac{DRL} based attitude controllers were only evaluated in simulation. The open-source Neuroflight framework \cite{koch2019neuroflight} has since been introduced for deploying neural network based low-level flight control firmware on real \acp{UAV}. Neuroflight utilizes GymFC for initial training and testing of controllers in a simulation environment and then deploys the trained models to the \ac{UAV} platform. Initial tests of Neuroflight have demonstrated stable flight and maneuver execution while the neural network based controller runs on an embedded processor onboard the \ac{UAV}. The Aerostack software framework \cite{Aerostack_Paper, aerostack_git, Simulation_Sanchez-Lopez} defines an architectural design to enable advanced \ac{UAV} autonomy. Additionally, Aerostack has been used for autonomous \ac{UAV} research and development in both simulations (utilizing the RotorS simulator \cite{RotorS_chapter}) and in hardware such as the Parrot AR Drone. Microsoft AirSim \cite{airsim2017fsr, airsim_git} is an open-source simulator for both aerial and ground vehicles. AirSim provides realistic visual rendering of simulated environments using the Unreal Engine, as shown in Figure \ref{fig:airsim}. AirSim was designed as a simulation platform to facilitate research and development of \ac{AI} enabled autonomous ground and aerial vehicles which motivates its use when developing deep learning and reinforcement learning \ac{UAS} solutions. The software is cross platform and can be used on Linux, Windows, and Macintosh operating systems. The AirSim software comes with extensive documentation, tutorials, and \acp{API} for interfacing with vehicles, sensors, and environment for programmatic control and data collection for model training. Recently, AirSim was used as a platform to host a simulation-based drone racing competition called Game of Drones \cite{madaan2020airsim}. \begin{figure}[h] \centering \hspace{-1 cm} \includegraphics[width=3 in]{editor/figures/AirSimDemo.png} \caption{UAV simulation in AirSim \cite{airsim_pic}} \label{fig:airsim} \end{figure} A final consideration is that the popular flight control stacks - PX4 and ArduPilot (discussed in detail in section \ref{sec:flightstack}) - can both integrate with Gazebo and AirSim for software-in-the-loop and hardware-in-the-loop simulations. The Gazebo interfaces maintained by PX4 are derived from the RotorS project. \subsection{Open Problems and Challenges} Even with the advances made in the realm of UAS simulations, there are still multiple problems and challenges associated with it. The first problem is typical to any open-source platforms used in different domains - there is no official or industry accepted standard platform. For example, the two most popular open-source flight control stacks, ArduPilot and PX4, both support multiple simulators but there is not a specified official/default simulator common to them. At this time, it appears that both Gazebo or AirSim have the potential for use in autonomous \ac{UAS} research and development. A challenge associated with the Gazebo simulator is that although it is widely used in \ac{UAS} simulation, it technically does not provide native \ac{UAS} simulation support. Works such as \cite{hector_quadrotor_paper, RotorS_chapter} implement the required plugins, configuration, and baseline controllers to enable \ac{UAV} simulation using Gazebo. Additionally, as common with open-source software, there is often limited software maintenance, development support, and documentation of the open-source simulators. An additional challenge associated with \ac{UAS} simulation is that there can be steep learning curves associated with advanced usage and software development. It appears to be straightforward to install and run examples provided by the simulator; however, it may take time to familiarize with simulator configurations, development workflow, and software APIs. For example, a developer may be required to add support for a new \ac{UAV} platform, sensor type, or environment tailored for the research application. This problem could be mitigated to an extent as the use of these platforms become widespread and if there is a uniform standard to add new features that can be made available to the community. An open problem is assessing the reality gap between simulation and real life deployment. This problem will be further studied as research and development of algorithms for autonomous \ac{UAS} continues. Other open problems are associated with the seemingly limited consideration of \ac{UAV} swarm operation, human interaction via \acp{NUI} or ground control stations, and communication systems utilized by the UAS. \section{UAV Hardware for Rapid Prototyping} Rapid \ac{UAS} hardware-based prototyping is an essential step in deploying and validating machine learning solutions. Certain factors such as the unique requirements of the deep learning solution and the cost of \ac{COTS} UAS in commercial market are the driving factors in choosing the custom prototyping route. The requirements of deep learning solutions could be unique to the problem under consideration and consequently the needs would vary. For instance, an object detection task might require a stable flight platform with good quality image sensor. However, a target tracking or acoustic-based search and rescue mission might require maneuverable platform with image sensor and acoustic sensors onboard respectively. UAS prototyping for testing deep learning solutions involve several steps such as choosing the appropriate hardware platform, sensors, computational resources, memory unit, flight controller software, among others which depend on the size, weight, and onboard carrying capacity of the UAS platform. This section will serve as a comprehensive guide in choosing the appropriate UAS platform, flight stack software, computational resources as well as the various challenges incurred in UAS prototyping. \subsection{Classification Choice} \label{sec:uasclass} \acp{UAV} are classified based on their wings, size, landing, etc., as seen in the beginning of the chapter (section \ref{subsec:classUAS}). In this section, however, we will focus on the Fixed-wing and Rotary-wing \acp{UAV}. The various UAV classifications will guide the reader in understanding the nuances of the platforms in terms of its hovering, maneuvering, payload capabilities, among others allowing application-specific selection. Fixed-wing UAV has rigid wings with airfoil allowing it to produce the desired lift and aerodynamics by deflecting the oncoming air. Although they cannot hover at a place and maintain low speed, they support long endurance flights. Further, they require an obstruction-free runway to take-off and land. However, in comparison to rotary-wing, they carry heavier payloads and are energy-efficient owing to their gliding characteristic. The MQ-9 Reaper is an example of a fixed-wing UAV as in Fig. \ref{fig:fixed_wing}. Rotary-wing UAV possesses two or more rotary blades positioned around a fixed mast to achieve the desired aerodynamic thrust. Rotary-wing platforms are capable of hovering tasks, low-altitude flights, and perform \ac{VTOL}. In contrast to fixed-wing, they present flexible maneuverability advantages owing to the rotary blades. Rotary-wing \acp{UAV} are further classified into single-rotor, multi-rotor, and fixed-wing hybrid \cite{chapman_2019}. \begin{figure*}[h] \minipage[b]{0.5\textwidth \centering \includegraphics[width=2.3 in]{editor/figures/reapermq9.JPG} \caption{Fixed Wing \ac{UAV} \cite{airForce_2015} } \label{fig:fixed_wing} \endminipage\hfill \minipage[b]{0.49\textwidth \centering \includegraphics[width=2.3 in]{editor/figures/single-rotor-drone-compressor.jpg} \caption{Single-Rotor \ac{UAV} \cite{shan2018} } \label{fig:single_rotor} \endminipage\hfill \end{figure*} Single-rotor \acp{UAV} rely on a single front rotor to stay airborne. Although, they possess a tail rotor to control the heading as in Fig. \ref{fig:single_rotor}, it does not count towards the rotor count. The required airflow to move forward is generated by the rotor blades. They are also capable of \ac{VTOL} and hovering tasks. Since they rely on a singular rotor to stay elevated, the blades are usually longer. In contrast to multi-rotor \acp{UAV}, they can carry heavier payloads and are energy-efficient owing to lesser power requirements for a single rotor. The energy-efficient operation enables longer flight times when compared to multi-rotor platforms. Therefore, single-rotor platforms might present themselves as beneficial for aerial surveying applications which require carrying heavier payloads and extended flight times. Helicopters are an example of single-rotor \acp{UAV}. \begin{figure*}[h] \minipage[b]{0.5\textwidth \centering \hspace{-1 cm} \includegraphics[width=1.95 in]{editor/figures/muti-rotor-uav.JPG} \caption{Multi-Rotor \ac{UAV}} \label{fig:multi_rotor} \endminipage\hfill \minipage[b]{0.49\textwidth}\hspace{-0.5cm} \centering \includegraphics[width=2.4 in]{editor/figures/fixed-wing-hybrid-uav.jpg} \caption{Fixed Wing Hybrid \ac{UAV} \cite{nasa} } \label{fig:fixed_wing_hybrid} \endminipage\hfill \end{figure*} Multi-rotor \acp{UAV}, on the other hand, uses multiple rotor blades to achieve the desired aerodynamic thrust for lifting and propelling as in Fig. \ref{fig:multi_rotor}. Most common examples of this category are tricopter, quadcopter (quadrotor), hexacopter, and octocopter. Multi-rotor platforms can perform complex maneuvering and hovering tasks but have limited payload capability and flight endurance. They also provide a stable platform for aerial inspection, photography, and precision agriculture applications. Fixed-wing hybrid UAV platforms combine the aerodynamic benefits of fixed-wing and rotary-wing UAV classes (Fig.\ref{fig:fixed_wing_hybrid}). This coupling adds the \ac{VTOL}, hovering, increased flight speed, and long endurance capabilities. Owing to the fairly recent arrival of the hybrid class, there are still very few developmental resources available for this class. The discussion in this section will enable the developer in choosing the appropriate UAV platform tailored to meet the requirements pertinent to their unique machine learning solution. \subsubsection*{Build or Buy} Here, we will ponder upon the pros and cons of buying versus building a UAV. Commercial \acp{UAV} available in the market would serve as an easier and cost-friendly option to rapidly test deep learning solutions. However, specific mission requirements might urge towards building a custom model. Commercial \acp{UAV} are often preprogrammed and tested for stability. Most of them come in a ready-to-fly state requiring minimal setup out of the box. The prebuilt \acp{UAV} offer limited customization and could be difficult to repair and/or replace components. An essential requirement for deep learning solutions is the computational power, however, prebuilt UAV platforms have limited onboard computational resources requiring external processors. A costlier option could be purpose-built commercial \acp{UAV} with custom attachments to fit the mission requirements. UAV prototyping, on the contrary, offers several benefits. Often developers can add custom sensors, batteries, and computational units to a flight-ready UAV platform for rapid deployment and testing. The lift and payload capacity of the UAV judges its flight endurance and stability. Achieving flight stability is guided by several factors such as the right component balance and the ground controller's pilot skills. Building a flight-ready UAV would entail requiring immense electrical and mechanical skills which could be envisioned as a pro as well as a con. The prototyping procedure could be time-consuming while garnering the electro-mechanical skills would be knowledgeable. Another major requirement while building custom prototypes would be the flight controller software needed to control and navigate the \acp{UAV}. To conclude, we have listed a few commercial drones and their specifications in Table \ref{table:drones}. The next subsection sheds light on the flight stack software. \begin{table}[h!] \caption{Commercially available drones} \centering \def\arraystretch{1.4}% \resizebox{\textwidth}{!}{% \begin{tabular}{|l|c|c|c|c|} \hline \multicolumn{1}{|c|}{{\textbf{UAV platform}}} & \textbf{Specifications} & \begin{tabular}[c]{@{}c@{}}\textbf{Onboard/}\\ \textbf{External} \\\textbf{DL Processing}\end{tabular} & \textbf{SDK} & \textbf{Estimated Cost} \\ \hline Ryze Tello EDU \cite{ryze_tello} & \begin{tabular}[c]{@{}c@{}}87 g Weight, 13min Flight, \\ WiFi 802.11n, Range Finder, Barometer LED, Camera\end{tabular} & External via SDK & Tello-Python & \$129.00 \\ \hline DJI Inspire 2 \cite{dji_inspire_2} & \begin{tabular}[c]{@{}c@{}}3.44 kg Weight, 4.25 kg payload, 27 min fight time, \\ 2.4000 GHz-2.4835GHz, 5.725 GHz-5.850GHz, GPS, GLONASS, \\ GALILEO, Camera, Vision systems for obstacle avoidance\end{tabular} & External via SDK & Mobile SDK & \$3,299.00 \\ \hline DJI Matrice 100 \cite{dji_matrice_100} & \begin{tabular}[c]{@{}c@{}}2.355 kg weight, 3.6 kg payload, 13 - 40 min flight time, \\ 5.725-5.825 GHz, 922.7MHz-927.7 MHz, \\ 2.400-2.483 GHz (Lightbridge)\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK, \\ Mobile SDK\end{tabular} & N/A \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 200 Series V2 \cite{dji_matrice_200}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}4.91 kg weight, 1.23 kg payload, 33 min flight time, \\ 2.4000-2.4835 GHz, 5.725-5.850 GHz, \\ Different Payload configurations\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK\\ Payload SDK\\ Mobile SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 300 RTK \cite{dji_matrice_300}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}6.3 kg weight, 2.7 kg payload, 55 min flight time, \\2.4000-2.4835 GHz, 5.725-5.850 GHz, \\ Camera Gimbal, infrared ToF Sensing System, FPV Camera, GPS\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK\\ Payload SDK\\ Mobile SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Matrice\\ 600 Pro \cite{dji_matric_600}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}9.5 kg weight, 15.5 kg payload, 16 - 38 min flight time, \\ 920.6 MHz-928 MHz, 5.725 GHz-5.825 GHz, 2.400 GHz-2.483 GHz, \\ Camera Gimbal, Collision avoidance system, GPS, GLONASS\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard via \\ Manifold 2-C \\ or Manifold 2-G\end{tabular} & \begin{tabular}[c]{@{}c@{}}Onboard SDK, \\ Mobile SDK\end{tabular} & \$5,699.00 \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Mavic 2 \\ Enterprise \cite{dji_mavic_2_enterprise}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}905 g weight, 1100 g payload, 29 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, GLONASS, Visual Camera, \\ Omnidirectional Obstacle Sensing, Speaker, Beacon, Spotlight\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Mavic 2 \\ Enterprise Dual \cite{dji_mavic_2_enterprise}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}899 g weight, 1100 g payload, 29 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, GLONASS, \\Thermal Camera, Visual Camera, Camera, Speaker,\\ Omnidirectional Obstacle Sensing, Beacon, Spotlight\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & Request Quote \\ \hline DJI Mavic 2 Pro \cite{dji_mavic_2} & \begin{tabular}[c]{@{}c@{}}905 g weight, 31 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, \\ GLONASS, Pro Camera, Omnidirectional Obstacle Sensing\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & \$1,599.00 \\ \hline DJI Mavic 2 Zoom \cite{dji_mavic_2} & \begin{tabular}[c]{@{}c@{}}905 g weight, 31 min flight time, \\ 2.400-2.4835 GHz, 5.725-5.850 GHz, GPS, \\ GLONASS, Zoom Camera, Omnidirectional Obstacle Sensing\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Mobile SDK, \\ Windows SDK\end{tabular} & \$1,349.00 \\ \hline DJI P4 Multispectral \cite{dji_p4} & \begin{tabular}[c]{@{}c@{}}1487 g weight, 27 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz,\\ GPS, GLONASS, GALILEO, RGB Camera, 5 monochome sensors\end{tabular} & External via SDK & Mobile SDK & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Phantom 4 \\ Pro V2.0 \cite{dji_phantom_pro_v2}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}1375 g weight, 30 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz, \\ GPS, GLONASS, GALILEO, RGB Camera, infrared sensors\end{tabular} & External via SDK & Mobile SDK & \$1,599.00 \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}DJI Phantom 4 \\ RTK \cite{dji_phantom_4_rtk}\end{tabular}}& \begin{tabular}[c]{@{}c@{}}1391 g weight, 30 min flight time, \\ 2.4000 GHz-2.4835 GHz, 5.725 GHz-5.850 GHz, \\ GPS, GLONASS, GALILEO, RGB Camera, infrared sensors\end{tabular} & External via SDK & Mobile SDK & Request Quote \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[l]{@{}l@{}}Parrot ANAFI \\ ANAFI Thermal \cite{parrot_anafi_thermal}\end{tabular}} & \begin{tabular}[c]{@{}c@{}}315 g weight, 26 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, Barometer, magnetometer,\\ vertical camera, ultra sonar, 6 axis, IMU,3 axis accelerometer, \\3 axis gyroscope, thermal imaging camera, 4k camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & \$1,900.00 \\ \hline Parrot ANAFI USA \cite{parrot_anafi_usa} & \begin{tabular}[c]{@{}c@{}}500 g weight, 32 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, GALILEO, Barometer, \\ magnetometer, vertical camera, ultra sonar, 6 axis, IMU, 4k camera \\ 3 axis accelerometer, 3 axis gyroscope, 32x zoom camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & Coming soon \\ \hline Parrot ANAFI Work \cite{parrot_anafi_work} & \begin{tabular}[c]{@{}c@{}}321 g weight, 25 min flight time, \\ Wi-Fi 802.11a/b/g/n, GPS, GLONASS, Barometer, magnetometer, \\ vertical camera, ultrasonar, 6 axis, IMU, 3 axis accelerometer,\\ 3 axis gyroscope, thermal imaging camera, 4k camera\end{tabular} & External via SDK & \begin{tabular}[c]{@{}c@{}}Parrot \\ Ground SDK\end{tabular} & \$999.00 \\ \hline \end{tabular}% } \label{table:drones} \end{table} \subsection{Flight Stack} \label{sec:flightstack} Flight stack is the flight controller software that comprises of a set of positional, navigational guidance and control algorithms, interfacing, and communication links that directs the UAV's flight path and maneuverability. A flight stack is typically comprised of firmware, middleware, and interface layers as in Fig. \ref{fig:flight_stack} \cite{caleberg_2019} whereby the middleware supports the communication link to enable command and control (C2) and telemetry data message passing. The software layer performs the interfacing of the firmware via the communication link protocol. Software layer refers to the \ac{GCS} software that performs UAV configuration and monitoring. \begin{figure}[h] \centering \includegraphics[width=4.5 in]{editor/figures/FlightStackDiagram.png} \caption{Flight Stack} \label{fig:flight_stack} \end{figure} \iffalse \subsubsection*{Firmware Layer} The firmware layer in the flight stack is responsible for controlling \acp{UAV} at the hardware level. The firmware Layer handles numerous tasks such as auto-leveling or obstacle avoidance. There are two main firmware layers available: \textit{Ardupilot} and \textit{PX4}. Both of these share many of the same features such as simulation tools, analytical tools, and many of the same flight modes. Below are the main differences between them. Ardupilot is an open-source project designed to give developers and hobbyists alike a fully-featured and reliable tool to create autopilot systems. Ardupilot has a large community surrounding it which makes it a great platform for developers as there are many forums to ask questions on. Ardupilot comes with a substantial documentation which provides in-depth and extensive guidance. Ardupilot has been installed on more than one million vehicles including \acp{UAV}, boats, submarines, and other land based vehicles. The main advantage of Ardupilot when compared to the other firmware layer is its \ac{GPL} License. This also may be considered its disadvantage, depending on the point of view. The \ac{GPL} license can be used in commercial with one requirement, any change made to the \ac{GPL} licensed software must be made available. This means that if anyone makes a change to the Ardupilot code, that change needs to be added back into the main repository which is publicly available. This is great for hobbyists since it encourages innovation to be shared and means that Ardupilot's features are always expanding. This a disadvantage for projects that include proprietary software using Ardupilot as the proprietary software would need to be made publicly available. The other autopilot system for the firmware layer is known as PX4. PX4 is the flight controller that Dronecode promotes. Dronecode is a nonprofit organization that dedicates its time to promoting open-source components and the communities around them. As part of the Linux Foundation, Dronecode and PX4 have a large community around them. The main benefit of PX4 over Ardupilot comes down to its \ac{BSD} License. The \ac{BSD} License allows developers who use this platform to sell their code commercially without needing to make it publicly available. This is why PX4 has become the industry standard over the past few years. Some consider this a disadvantage however because any improvements made to the code-base are not required to be shared and therefore PX4 may lack some features that have been developed but kept closed-source. \subsubsection*{Middleware Layer} The middleware layer can be thought of as the glue that holds the flight stack together. The middleware handles the communication between the layers of the flight stack. The industry standard choice is a protocol known as \ac{MAVLink}. \ac{MAVLink} is another project hosted under Dronecode and the Linux Foundation. Several libraries and language bindings exist for \ac{MAVLink} enabling virtually any application to use the \ac{MAVLink} protocol. This means the \ac{UAS} solution can be written in a number of languages such as C/C++, Python, C\verb|#|, Objective-C, Java, JavaScript/TypeScript, Lua, Swift, Clojure, Go, and Haskell. The MAVLink protocol provides a packet structure that uses a 6 byte header. This small overhead is beneficial to prevent overloading of the wireless link between the ground and the \ac{UAV}. A standard set of messages are also provided which allows applications that are designed to interface with Ardupilot or PX4 can be interoperable \cite{caleberg_2019}. \subsubsection*{Interface/Software Layer} The interface/software layer is the piece of software that utilizes the MAVLink protocol to interface with the firmware layer. There are many interface layers already available. They are broken up into three categories. There is a command line interface layer known as MAVProxy. It lets users control their \ac{UAV} from a terminal such as bash or powershell. There are several \ac{GUI} interface layers. The two most popular are Mission Planner from Ardupilot and QGroundControl from Dronecode. These \acp{GUI} can be used to configure, fly, and view the status of a \ac{UAV}. The last category is an \ac{API} known as Dronekit. Dronkit is a python library that is used to communicate with a \ac{UAV} via the MAVLink protocol \cite{chapman_2019}. \fi There are many open-source flight controller software available today namely; ArduPilot, PX4, Paparazzi, among others. Flight controller software enables autonomous operation capability to specific UAV platforms (airframes). This comprises fault detection and handling, C2 link protocol, battery monitoring, obstacle avoidance, landing, return home features, data logging, among others. The fault detection and handling support features such as landing when missing C2 link, return to home when missing C2 link, automatic parachute release, battery voltage warning, geofence, land/return to home when battery low, safety check for sensor error, etc. Some of the C2 link protocols are MavLink, UAVTalk, XBUS, XBee, FrSky, HoTT, \ac{PPM}, and Lightweight TeleMetry (LTM). ArduPilot is an open-source flight controller software released under GNU General Public License (GPL) which supports a wide range of vehicles including fixed-wing UAV, multi-rotor UAV, single-rotor UAV, boats, and submarines \cite{ardu}. It can be run on a Linux-based operating system (OS) allowing support on single-board computers to full PC systems. ArduPilot has a desktop \ac{GCS} software for mission planning and calibration for Linux, Windows, and Macintosh OS. It also supports MAVLink, FrSky, and LTM communication protocols. ArduPilot additionally supports the usage of multiple radio control receivers for redundancy, failover, and/or handoffs. PX4 flight controller \cite{px4} from DroneCode collaborative project \cite{dronecode} is released under \ac{BSD} license and supports both fixed-wing and multi-rotor airframes. PX4 enables operation with QGroundControl GCS software from where the UAV can be configured as well as monitored. Both ArduPilot and PX4 supports satellite waypoint navigation and satellite position hold. ArduPilot and PX4 additionally support stereovision navigation function and follow me autonomous navigation features respectively. Paparazzi flight controller supports fixed-wing, flapping-wing, hybrid, and multi-rotor airframes and is released for public use under GNU GPL \cite{paparazzi}. The GCS software of Paparazzi enables UAV configuration, monitoring, and custom flight plan configuration for navigational control and guidance. The supported C2 link protocols are MavLink, XBee, SBus, and \ac{PPM}. Paparazzi supports all autonomous navigation features offered by ArduPilot and PX4 in addition to automatic takeoff and landing. Several other open-source flight controller software worth mentioning are OpenPilot \cite{openpilot}, LibrePilot \cite{librepilot}, BetaFlight \cite{beta}, dRonin \cite{dronin}, and INAV \cite{inav}. \subsection{Computational Unit} \iffalse A \ac{UAS} that takes advantage of deep learning ultimately needs computational resources to house and execute the trained model. \acp{UAV} have two main options as to where this can take place: an on-board computer or a \ac{GCS}. There are many parts that make up a \ac{UAV}. One of these parts is known as the flight controller. The purpose of the flight controller is to control the motors of the \ac{UAV} in accordance to any input that it is given. This input can be in the form of user input from a wireless remote or from some other origin such as a deep learning algorithm. The problem with flight controllers in the context of deep and reinforcement learning is that flight controllers generally have low computational resources. This is simply because manual flight of a \ac{UAV} requires minimal computation. In order to overcome this challenge a separate computer, known as a companion computer, can be added to a \ac{UAV}. Companion computers are more powerful than their flight controller counterparts which means they can be used to execute a trained model and then send the decision to the flight controller as input. The advantage to executing the model on board the ]ac{UAV} is that the \ac{UAV} can react in real time. The companion computer can be connected to any sensors on the \ac{UAV} and interpret this input to make a decision without needing to off-load all of the sensor data. Having the companion computer on the \ac{UAV} means that if the communication between the \ac{UAV} and the \ac{GCS} is severed, the \ac{UAV} will continue to execute the flight plan or the trained model. The disadvantage to the companion computer approach is the added weight it brings to the \ac{UAV}. Adding more weight will decrease the flight time. This may not be important in the development stage but could be of the utmost importance in a production ready \ac{UAS}. It is also uncommon to find a commercial \ac{UAV} that has a built in companion computer or even one that has the functionality to add one. An alternative to having a companion computer on the \ac{UAV} is to utilize a \ac{GCS}. A \ac{GCS} is a computer or mobile device that is on the ground and communicates with the \ac{UAV}. Many commercial \acp{UAV} use \acp{GCS} to display video streams from cameras on-board the \ac{UAV}. But \acp{GCS} can also be used to execute a trained model. The advantage to using a \ac{GCS} as the computation hub is that it can have much more power in terms of processing. It also will not add weight to the \ac{UAV} which can increase the flight time. Using a \ac{GCS} allows for many more commercially available \acp{UAV} to be used as many of them offer \acp{API} to read data from its sensors and control the \ac{UAV} pragmatically from the ground. The disadvantage to this option is that there needs to be a reliable low latency wireless link between the \ac{UAV} and \ac{GCS} in order to offload data from the \ac{UAV} to the \ac{GCS}. If this link is degraded or disconnected during flight, there needs to be a fallback mode in order to make sure the \ac{UAS} operates safely. \fi The computational resources on the UAV is a primary concern when it comes to deploying deep learning solutions. The payload capacity of the UAV and the power consumption of the processors are the two major determinants for onboard UAV processor selection. Further, given two processor platforms of comparable weight, an essential performance metric for selection could be the ratio of the inference speed of the deep learning solution to the power consumption of the processor. Additional metrics for selection could be the memory space and volume of the processors. There are several computational platforms such as Raspberry Pi 4 Model B, Odroid XU4, Jetson Tegra K1 , SnapDragon flight board, Jetson TX1, among others with on-chip \acp{CPU} and \acp{GPU}. Table \ref{tab:comp} shows a comparison of these platforms in terms of various metrics such as memory, \ac{CPU}, \ac{CPU} speed, \ac{GPU}, \ac{GPU} performance, and dimensions. Raspberry Pi 4 Model B (Pi 4B) is a small, low-cost 1.5GHz 64-bit ARM Cortex-A72 \ac{CPU}-based hardware platform with multiple \ac{RAM} options developed for educational purpose. The Pi is also equipped with a Broadcom VideoCore VI \ac{GPU}. However, the Pi 4 model B has a very high power draw in contrast to its predecessors. Odroid XU4 is a developmental platform that is based on Samsung Exynos 5422 Octa-core \ac{CPU} and ARM Mali-T628 6 Core \ac{GPU}. The XU4 consists of two sockets with 1.4GHz ARM Cortex-A7 and 2GHz ARM Cortex-A15 \acp{CPU}. The Mali-T628 supports OpenGL ES 3.1/2.0/1.1 \cite{opengl} and OpenCL 1.2 \cite{opencl} full profile. Jetson Tegra K1 (TK1) is a developmental kit from NVIDIA comprising of Kepler \ac{GPU} with 192 CUDA cores and 4-Plus-1 quad-core ARM Cortex-A15 \ac{CPU}. The TK1 has a very low power footprint while being capable of 300 Giga\ac{FLOPS} of 32-bit floating-point computations. The Jetson TX1 on the other hand hosts an NVIDIA Maxwell 256 CUDA core GPU and quad-core ARM Cortex-A57 CPU. The power draw for a typical CUDA load is in the range of 8-10W. In contrast to TK1, the TX1 comes at a much lower form factor of 50mm $\times$ 80mm. The Snapdragon flight board based on Snapdragon 801 processor was introduced by Qualcomm for autonomous vehicle platforms. The board comes with a 2.26GHz Qualcomm Krait quad-core \ac{CPU} and Qualcomm Adreno 330 \ac{GPU} with nearly 148 Giga\ac{FLOPS} and 4GB \ac{RAM}. In contrast to TX1 and TK1, the Snapdragon flight board comes at a smaller form factor of 58mm $\times$ 40mm. Such a smaller form factor (nearly half the size of a credit card) and lightweight ($<$13g) would serve as an ideal payload option for \acp{UAV}. \begin{table*}[!h] \caption{Computational Platforms for \ac{UAV}} \centering \def\arraystretch{1.5}% \begin{tabular}{|p{1.6cm}|p{2.4cm}|p{3cm}|p{2.1cm}|p{2cm}|} \hline \textbf{Platforms} & \textbf{CPU} & \textbf{GPU} & \textbf{Dimensions} & \textbf{Memory}\\ \hline \textbf{Pi 4B} &ARM Cortex-A72 \newline Speed: 1.5GHz &Broadcom VideoCore VI \newline 32Giga\ac{FLOPS} &85mm$\times$56mm &RAM Options: 2GB, 4GB, 8GB\\ \hline \textbf{Odroid XU4} &ARM Cortex-A7\newline Speed: 1.4GHz \newline ARM Cortex-A15\newline Speed: 2GHz &ARM Mali-T628\newline 102.4Giga\ac{FLOPS}&83mm$\times$59mm &2GB RAM \newline eMMC5.0 HS400 Flash\\ \hline \textbf{Jetson TK1} &ARM Cortex-A15 \newline Speed: 2.3GHz &Kepler 192 CUDA core\newline 300Giga\ac{FLOPS} &127mm$\times$127mm &2GB RAM \newline 16GB Flash\\ \hline \textbf{Jetson TX1} &ARM Cortex-A57\newline Speed: 2GHz &Maxwell 256 CUDA core\newline 1Tera\ac{FLOPS} &50mm$\times$87mm &4GB RAM \newline 16GB Flash\\ \hline \textbf{Snapdragon Flight} &Qualcomm Krait 400 \newline Speed: 2.26GHz &Qualcomm Adreno 330\newline 148Giga\ac{FLOPS}&58mm$\times$40mm &2GB \ac{RAM}\newline 32GB Flash\\ \hline \end{tabular} \\ \label{tab:comp} \end{table*} Here, we briefly discussed a few computational platforms that could potentially enable deep learning solutions on \ac{UAV} platforms and contrasted them on the basis of their physical and performance specifications. Next, we will discuss the UAS safety and regulations enforced to prevent risk and/or injury to people and property. \subsection{UAS Safety and Regulations} \subsubsection*{Safety} \label{subsec:safety} \acp{UAV} have become increasingly popular recently for a diverse array of applications including but not limited to personal hobby, photography, aerial survey, precision agriculture, power-line inspection, entertainment, tactical surveillance, border security, etc. \ac{FAA} estimates an even increased adoption of \acp{UAV} in the coming years with an estimate of nearly 3.5 Million in 2021 \cite{faaest}. The advent of \acp{UAV} have posed significant safety and security challenges. Safety encompasses physical risks posed to people and infrastructure as well as UAV cyber-security risks. \ac{FAA} has reported over 4889 incidents causing serious harm to people and infrastructure between 2014 and 2017 \cite{droneincident}. UAV risk factors such as obstacle collision, human factor, rogue \acp{UAV}, untimely battery, and sensor errors, etc., must be carefully assessed prior to any \ac{UAV} missions. Such risk assessment becomes increasingly essential when opting for self-designed \acp{UAV} as opposed to commercial drones. As discussed in section \ref{sec:flightstack}, most of the commercial drones incorporate general safety measures as part of the flight controller software such as obstacle avoidance, return home or land when battery low or sensor error, geofence, among others. Hence, strict \ac{UAV} safety assessment must be conducted in a studied and regulatory manner to alleviate risks to the mission as well as people and infrastructure. \subsubsection*{Regulations} In the United States, \ac{FAA} is the regulatory body that enforces aviation rules for air traffic control. Commercial as well as hobbyist use of \acp{UAV} must abide by the regulations enforced by \ac{FAA} as detailed in \cite{faadronezone}. The rules and regulations are enforced based on weight, coverage distance, application, speed, and flight altitude. The regulations restrict operating \acp{UAV} over/near people, in certain airspaces (airports, military facilities, or no-fly zones), and non-line-of-sight operation to avoid accidents and injuries. Commercial \ac{UAV} operation requires the pilots to get licenses as well as are restricted to operate during daylight hours. Recreational flying involves similar rules such as registering the \ac{UAV}, line-of-sight operation, daylight operation, drone altitude not more than 400 feet from the ground, restricted from operating near manned aircraft, people, automobiles, and mental as well as physical alertness during drone operation. \section{Conclusion} \label{sec:Conclusion} This chapter presented how the modern era of machine learning can overcome challenges and accelerate the realization of truly autonomous \ac{UAS}. We begin by presenting a tutorial study of the basic deep learning and reinforcement learning techniques to refine the reader's perception and equip them for further research in this realm. Next, the recent advances in deep learning and reinforcement learning techniques as applied to various autonomous \ac{UAV} tasks were reviewed in depth. The inherent challenges and open problems pertaining to the application of machine learning techniques for autonomous \ac{UAS} tasks were clearly stated to open doors for future research. Additionally, to bridge the gap between simulations and hardware implementations, we present a detailed account of the various simulation suites, \ac{UAV} platforms, flight stacks, and regulatory standards. The various challenges and factors to consider while prototyping \ac{UAV} for machine learning solutions were also discussed. Furthermore, this chapter will serve as a comprehensive handbook to pave a clear roadmap for future research and development in pursuing autonomous \ac{UAS} solutions. \input{editor/acronym} \bibliographystyle{spmpsci}
1,116,691,501,316
arxiv
\section{Introduction\label{intro}} Relativistic perfect fluids with a linear equation of state on a prescribed spacetime $(M,\gt)$ are governed by the relativistic Euler equations\footnote{Our indexing conventions are as follows: lower case Latin letters, e.g. $i,j,k$, will label spacetime coordinate indices that run from $0$ to $3$ while upper case Latin letters, e.g. $I,J,K$, will label spatial coordinate indices that run from $1$ to $3$.} \begin{equation} \nablat_i \Tt^{ij}=0 \label{relEulA} \end{equation} where \begin{equation*} \Tt^{ij} = (\rho+p)\vt^i \vt^j + p \gt^{ij} \end{equation*} is the stress energy tensor, $\vt^{i}$ is the fluid four-velocity normalized by $\gt_{ij}\vt^i \vb^j=-1$, and the fluid's proper energy density $\rho$ and pressure $p$ are related by \begin{equation*} p = K \rho. \end{equation*} Since $K=\frac{dp}{d\rho}$ is the square of the sound speed, we will always assume\footnote{While this restriction on the sound speed is often taken for granted, it is, strictly speaking, not necessary; see \cite{Geroch:2010} for an extended discussion.} that $0\leq K \leq 1$ in order to ensure that the speed of sound is less than or equal to the speed of light. We further restrict our attention to exponentially expanding Friedmann-Lema\^{i}tre-Robertson-Walker (FLRW) spacetimes $(M,\gt)$ where $M = (0,1]\times \Tbb^3$ and\footnote{By introducing a change of time coordinate via $\tilde{t}=-\ln(t)$, the metric \eqref{FLRW} can be brought into the more recognizable form $\gt = -d\tilde{t}\otimes d\tilde{t} + e^{2\tilde{t}}\delta_{ij}dx^I \otimes dx^J$, where now $\tilde{t} \in [0,\infty)$. } \begin{equation} \label{FLRW} \gt = \frac{1}{t^2} g \end{equation} with \begin{equation} \label{conformal} g = -dt\otimes dt + \delta_{IJ}dx^I \otimes dx^J. \end{equation} It is important to note that, due to our conventions, the future is located in the direction of \textit{decreasing} $t$ and future timelike infinity is located at $t=0$. Consequently, we require that $\vt^0<0$ holds in order to guarantee that the four-velocity is future directed. For use below, we find it convenient to introduce the \textit{conformal four-velocity} via \begin{equation} \label{c-velocity} v^i = \frac{1}{t}\vt^i. \end{equation} \subsection{Stability for $0\leq K\leq 1/3$} It can be verified by a straightforward calculation that \begin{equation} \label{Hom-A} (\rho_*,v_*^i) = (t^{3(1+K)}\rho_c,-\delta^i_0), \quad t\in (0,1], \end{equation} defines a spatially homogeneous solution of the relativistic Euler equations \eqref{relEulA} on the exponentially expanding FLRW spacetimes $(M,\gt)$ for any choice of the parameter $0\leq K\leq 1$ and constant $\rho_c\in (0,\infty)$. From a cosmological perspective, these solutions are, in a sense, the most natural since they are also spatially isotropic and hence do not determine a preferred direction. The future, nonlinear stability of the solutions \eqref{Hom-A} on the exponentially expanding FLRW spacetimes was first established in the articles\footnote{In these articles, stability was established in the more difficult case where the fluid is coupled to Einstein's equations. However, the techniques used there also work in the simpler setting considered in this article where gravitational effects are neglected.} articles \cite{RodnianskiSpeck:2013,Speck:2012} for the parameter values $0<K<1/3$. Stability results for the end points $K=1/3$ and $K=0$ were established later\footnote{Again, stability was established in these articles in the more difficult case where the fluid is coupled to Einstein's equations.} in \cite{LubbeKroon:2013} and \cite{HadzicSpeck:2015}, respectively. See also \cite{Friedrich:2017,LiuOliynyk:2018b,LiuOliynyk:2018a,Oliynyk:CMP_2016} for different proofs and perspectives, the articles \cite{LeFlochWei:2021,LiuWei:2021} for related stability results for fluids with nonlinear equations of state on the exponentially expanding FLRW spacetimes, the articles \cite{FOW:2021,Speck:2013,Wei:2018} for analogous stability results on other classes of expanding cosmological spacetimes, and \cite{Ringstrom:2008} for related, early stability results for the Einstein-scalar field system. One of the important aspects of these works is they demonstrate that spacetime expansion can suppress shock formation in fluids, which was first discovered in the Newtonian cosmological setting \cite{BrauerRendallReula:1994}. This is in stark contrast to fluids on Minkowski space where arbitrary small perturbations of a class of homogeneous solutions to the relativistic Euler equations form shocks in finite time \cite{Christodoulou:2007}. A consequence of these stability proofs is that the spatial components of the conformal four-velocity $v^i$ of small, nonlinear perturbations of the homogeneous solution \eqref{Hom-A} decay to zero at future timelike infinity, that is, \begin{equation* \lim_{t\searrow 0} v^I = 0, \end{equation*} for the parameter values $0\leq K < 1/3$. This behaviour is, of course, consistent with the isotropic homogeneous solutions \eqref{Hom-A}. On the other hand, when $K=1/3$, the spatial components of the conformal four-velocity $v^i$ for perturbed solutions do not, in general, decay to zero at timelike infinity, and instead limit to a spatial vector field $\xi^I$ on $\Tbb^3$, that is, \begin{equation*} \lim_{t\searrow 0} v^I = \xi^I. \end{equation*} This behaviour is consistent with a family of \textit{non-isotropic} homogeneous solutions defined by\footnote{More generally, we could set the spatial components of the conformal four-velocity $v_\bullet^I$ to be any non-zero vector in $\Rbb^3$ and determine $v_\bullet^0$ via the conditions $g_{ij}v^i_\bullet v^j_\bullet =-1$ and $v^0_\bullet <0$. However, for simplicity, we will assume here that $v_\bullet^I$ is chosen so that it is pointing in the direction of the coordinate vector field $\del{1}=\fdel{\;}{x^1}$.} \begin{equation} \label{Hom-B} (\rho_\bullet,v_\bullet^i) = (t^{3(1+K)}\rho_c,-\sqrt{1+\nu_c^2}\delta^i_0 + \nu_c\delta^i_1), \quad t\in (0,1], \end{equation} where $(\rho_c,\nu_c)\in (0,\infty)\times (0,\infty)$, which satisfy the relativistic Euler equations for $K=1/3$. The known stability results for $K=1/3$ imply the future stability of nonlinear perturbations of these solutions. Noting that solutions of the type \eqref{Hom-B} can be made arbitrarily close to solutions of the type \eqref{Hom-A} for $K=1/3$ by choosing $\nu_c$ sufficiently small, from a stability point of view there seems to be verify little difference between the two classes of solutions for small $\nu_c$. Indeed, the future nonlinear stability of both classes of solutions, where $\nu_c$ is sufficiently small, can be achieved via a common proof. However, as will become clear, the essential difference between these solutions is that, from an initial data point of view, stable perturbations of solutions of the type \eqref{Hom-A} are generated from initial $(\rho,v^I)|_{t=1}$ that is sufficiently close to $(\rho_*,v_*^I)|_{t=1}$ and satisfies \begin{equation} \label{Hom-A-idata} \min_{x\in \Tbb^3} (g_{IJ}v^I v^J)\bigl|_{t=1}=0, \end{equation} while stable perturbations of solutions of the type \eqref{Hom-B} are generated from initial data $(\rho,v^I)|_{t=1}$ that is sufficiently close to $(\rho_\bullet,v_\bullet^I)|_{t=1}$ and satisfies \begin{equation} \label{Hom-B-idata} \min_{x\in \Tbb^3} (g_{IJ}v^I v^J)\bigl|_{t=1}>0. \end{equation} \subsection{Stability for $1/3<K<1$} Until recently, it was not known if any solutions of the relativistic Euler equations were stable to the future for the parameter values $1/3< K< 1$. In fact, it was widely believed that solutions to the relativistic Euler were not stable for these parameter values. This belief was due, in part, to the influential work of Rendall \cite{Rendall:2004} who used formal expansion to investigate the asymptotic behaviour of relativistic fluids on exponentially expanding FLRW spacetimes with a linear equation of state. Rendall observed that the formal expansions become inconsistent for $K$ in the range $1/3<K<1$ if the leading order term in the expansion of $g_{IJ}v^I v^J$ at $t=0$ vanished somewhere. He speculated that the inconsistent behaviour is the expansions could be due inhomogeneous features developing in the fluid density that would ultimately result in the blow-up of the density contrast $\frac{\del{I}\rho}{\rho}$ at future timelike infinity. Speck \cite[\S 1.2.3]{Speck:2013} added further support to Rendall's arguments by presenting a heuristic analysis that suggested uninhibited growth would set in for solutions of the relativistic Euler equations for the parameter values $1/3 < K<1$ . Combined, these considerations left the stability of solutions to the relativistic Euler equations in doubt for $K$ in the range $1/3 < K <1$. However, it was established in \cite{Oliynyk:2021} that there exists a class of non-isotropic homogeneous solutions of the relativistic Euler equations that are stable to the future under small, nonlinear perturbations. This class of homogeneous solutions should be viewed as the natural continuation of the solutions \eqref{Hom-B} over the parameter range $1/3 < K <1$, and they are defined by \begin{equation}\label{Hom-C} \bigl(\rho_\bullet,v_\bullet^i) = \biggl( \frac{\rho_c t^{\frac{2(1+K)}{1-K}}}{(t^{2\mu}+ e^{2u})^{\frac{1+K}{2}}}, -t^{-\mu}\sqrt{e^{2u}+t^{2 \mu} }\delta_0^i+ t^{-\mu }e^{u}\delta_1^i\biggr), \quad t\in (0,1], \end{equation} where \begin{equation}\label{mu-def} \mu = \frac{3K-1}{1-K} \end{equation} and $u=u(t)$ solves the initial value problem (IVP) \begin{align} u'\!(t) &=\frac{K\mu t^{2 \mu-1}}{t^{2 \mu }+(1-K) e^{2 u(t)}},\quad 0<t\leq 1, \label{HomeqB.1} \\ u(1) &= u_0. \label{HomeqB.2} \end{align} Existence of solutions to this IVP is guaranteed by Proposition 3.1.~from \cite{Oliynyk:2021}, which we restate here: \begin{prop} \label{Homprop} Suppose $1/3<K<1$, $\mu = (3K-1)/(1-K)$, and $u_0 \in \Rbb$. Then there exists a unique solution $u \in C^\infty((0,1]) \cap C^0([0,1])$ to the initial value problem \eqref{HomeqB.1}-\eqref{HomeqB.2} that satisfies \begin{equation} |u(t)-u(0)| \lesssim t^{2\mu} \AND |u'\!(t)| \lesssim t^{2\mu-1} \label{Hombounds} \end{equation} for all $t\in (0,1]$. Moreover, for each $\rho_c\in (0,\infty)$, the solution $u$ determines a homogeneous solution of the relativistic Euler \eqref{relEulA} equations via \eqref{c-velocity} and \eqref{Hom-C}. \end{prop} The main result of \cite{Oliynyk:2021} was a proof of the nonlinear stability to the future of the homogeneous solutions \eqref{Hom-C} for the parameter values $1/3 < K <1/2$. It was also established in \cite{Oliynyk:2021} that under a $\Tbb^2$-symmetry assumption, future stability held for the full parameter range $1/3<K<1$. An important point that is worth emphasising is that the initial data used to generate the perturbed solutions from \cite{Oliynyk:2021} satisfies the condition \eqref{Hom-B-idata} at $t=1$, and furthermore, this positivity property propagates to the future in the sense that the perturbed solutions satisfy \begin{equation*} \inf_{x\in M}(t^{2\mu}g_{IJ}v^I v^J)>0. \end{equation*} It is this property of the perturbed solutions from \cite{Oliynyk:2021} that avoids the problematic scenario identified by Rendall. This article has two main aims: the first is to establish the nonlinear stability to the future of the homogeneous solutions \eqref{Hom-C} for the full parameter range $1/3 < K<1$ without the $\Tbb^2$-symmetry that was required in \cite{Oliynyk:2021}. The second aim is to provide convincing numerical evidence that shows the density contrast blow-up scenario of Rendall is realized if the condition \eqref{Hom-B-idata} on the initial data is violated. Before stating a precise version of our stability result for the homogeneous solutions \eqref{Hom-C}, we first recall two formulations of the relativistic Euler equations from \cite{Oliynyk:2021}. The first formulation, which was introduced in \cite{Oliynyk:CMP_2015} and subsequently employed in \cite{Oliynyk:CMP_2016} to establish stability for the parameter range $0<K\leq 1/3$, involves representing the fluid in terms of the modified fluid density $\zeta$ defined via \begin{equation}\label{mod-den} \rho = t^{3(1+K)}\rho_c e^{(1+K)\zeta} \end{equation} and the spatial components $v_I$ of the conformal fluid four-covelocity\footnote{Here and in the following, all spacetime indices will be raised and lowered with the conformal metric $g_{ij}$. } $v_i=g_{ij}v^j$. In terms of these variables, the relativistic Euler equations \eqref{relEulA} can be formulated as the following symmetric hyperbolic system: \begin{equation} \label{relEulB} B^k \del{k}V = \frac{1}{t}\Bc \pi V \end{equation} where \begin{align} V &= (\zeta, v_J )^{\tr} , \label{Vdef}\\ v_0 & = \sqrt{|v|^2 +1} , \qquad |v|^2 = \delta^{IJ}v_I v_J, \label{v0def}\\ v^i & = \delta^{iJ}v_J - \delta^{i}_0 v_0, \label{viupdef}\\ \Bc &= \frac{-1}{v^0}\begin{pmatrix} 1 & 0 \\ 0 & \frac{1-3K}{v_0}\delta^{JI} \end{pmatrix}, \label{Bcdef}\\ \pi &= \begin{pmatrix} 0 & 0 \\ 0 & \delta_{I}^J \end{pmatrix}, \label{pi-def} \\ L^k_I &= \delta^k_J - \frac{v_J}{v_0} \delta^k_0, \label{Ldef} \\ M_{IJ} &= \delta_{IJ} - \frac{1}{(v_0)^2}v_I v_J, \label{Mdef}\\ B^0 &= \begin{pmatrix} K & \frac{K}{v^0} L^0_M \delta^{MJ} \\ \frac{K}{v^0} \delta^{LI} L^0_L & \delta^{LI} M_{LM} \delta^{MJ} \end{pmatrix} \label{B0def} \intertext{and} B^K &= \frac{1}{v^0}\begin{pmatrix} Kv^K & K L^K_M \delta^{MJ} \\ K \delta^{LI} L^K_L & \delta^{LI} M_{LM} \delta^{MJ} v^K \end{pmatrix}. \label{BKdef} \end{align} The second formulation of the relativistic Euler equations is obtained by introducing a new density variable $\zetat$ via \begin{equation} \label{zetatdef} \zetat = \zeta + \ln(v_0) \end{equation} and decomposing the spatial components of the conformal fluid four-velocity as \begin{align} v_1 &= \frac{t^{-\mu } e^{u(t)+w_1}}{\sqrt{t^{2 \mu } \left((w_2-w_3)^2+(w_2+w_3)^2\right)+1}}, \label{cov2a} \\ v_2 &= \frac{(w_2+w_3) e^{u(t)+w_1}}{\sqrt{t^{2 \mu } \left((w_2-w_3)^2+(w_2+w_3)^2\right)+1}} \label{cov2b} \intertext{and} v_3 &= \frac{(w_2-w_3) e^{u(t)+w_1}}{\sqrt{t^{2 \mu } \left((w_2-w_3)^2+(w_2+w_3)^2\right)+1}}, \label{cov2c} \end{align} where $u(t)$ solves the IVP \eqref{HomeqB.1}-\eqref{HomeqB.2}. Then setting \begin{align} \wbr_1 &= u+w_1, \label{wbr1def} \\ \psi = &t^{2 \mu }+e^{2 \wbr_1}, \label{psidef}\\ \chi = &t^{2\mu }-(K-1) e^{2 \wbr_1}, \label{chidef}\\ \phi &= 2 t^{2\mu } \left(w_2^2+w_3^2\right)+1, \label{phidef} \\ \eta_\Lambda &= \left(2 w_\Lambda t^{2 \mu } (w_2-w_3)+(-1)^\Lambda 1\right), \quad \Lambda =2,3, \label{etadef} \intertext{and} \xi_\Lambda &= \left(2 w_\Lambda t^{2 \mu } (w_2+w_3)+1\right), \quad \Lambda =2,3, \label{xidef} \end{align} it was shown in \cite[\S 3.2]{Oliynyk:2021} that in terms of the variables \begin{equation} \label{Wdef} W = (\zetat, w_1, w_2, w_3 )^{\tr} \end{equation} the relativistic Euler equations become \begin{equation} \label{relEulC} \del{t}W + \Ac^I \del{I}W =-\frac{\mu}{t}\Pi W + t^{\mu-1}\Gc \end{equation} where \begin{equation} \label{Ac1rep} \Ac^1 = \frac{1}{\sqrt{\frac{t^{2\mu}}{e^{2\wt_1}}+1}}\begin{pmatrix} -\frac{1}{\sqrt{\phi}} & -\frac{t^{2 \mu }}{\psi \sqrt{\phi}} & \frac{2 t^{2\mu } w_2}{\phi^{3/2}} & \frac{t^{2\mu} w_3}{\phi^{3/2}} \\ -\frac{K t^{2 \mu } e^{-2 \wbr_1} \psi}{\sqrt{\phi}\chi} & \frac{(2 K-1) t^{2 \mu }+(K-1) e^{2 \wbr_1}}{\sqrt{\phi} \chi} & -\frac{2 K t^{2\mu } \psi w_2}{\phi^{3/2}\chi} & -\frac{2 K t^{2\mu } \psi w_3}{\phi^{3/2}\chi} \\ K t^{2\mu }w_2 e^{-2 \wbr_1} \sqrt{\phi} & -\frac{K t^{2\mu } w_2 \sqrt{\phi}}{\psi} & -\frac{1}{\sqrt{\phi}} & 0 \\ K t^{2\mu }w_3 e^{-2 \wbr_1} \sqrt{\phi} & -\frac{K t^{2\mu } w3 \sqrt{\phi}}{ \psi} & 0 & - \frac{1}{\sqrt{\phi}} \\ \end{pmatrix}, \end{equation} \begin{equation} \label{Ac2rep} \Ac^2 = \frac{1}{\sqrt{\frac{t^{2\mu}}{e^{2\wt_1}}+1}}\begin{pmatrix} -\frac{t^{\mu } (w_3+w_2)}{\sqrt{\phi}} & -\frac{t^{3 \mu } (w_3+w_2)}{\psi \sqrt{\phi}} & \frac{t^{\mu } \eta_3}{\phi^{3/2}} & -\frac{t^\mu \eta_2}{\phi^{3/2}} \\ -\frac{K t^{3 \mu } (w_2+w_3) e^{-2 \wbr_1} \psi}{\sqrt{\phi}\chi} & \frac{t^{\mu } (w_2+w_3) \left((2 K-1) t^{2 \mu }+(K-1) e^{2 \wbr_1}\right)}{\sqrt{\phi} \chi} & -\frac{K t^{\mu } \psi \eta_3}{\phi^{3/2}\chi} & \frac{K t^{\mu } \psi\eta_2}{\phi^{3/2}\chi} \\ -\frac{1}{2} K t^{\mu } e^{-2 \wbr_1} \sqrt{\phi} & \frac{K t^{\mu } \sqrt{\phi}}{2 \psi} & -\frac{t^{\mu } (w_3+w_2)}{\sqrt{\phi}} & 0 \\ -\frac{1}{2} K t^{\mu } e^{-2 \wbr_1} \sqrt{\phi} & \frac{K t^{\mu } \sqrt{\phi}}{2 \psi} & 0 & -\frac{t^{\mu } (w_3+w_2)}{\sqrt{\phi}} \\ \end{pmatrix}, \end{equation} \begin{equation} \label{Ac3rep} \Ac^3 = \frac{1}{\sqrt{\frac{t^{2\mu}}{e^{2\wt_1}}+1}}\begin{pmatrix} \frac{t^{\mu } (w_3-w_2)}{\sqrt{\phi}} & \frac{t^{3 \mu } (w_3-w_2)}{\psi \sqrt{\phi}} & -\frac{t^{\mu } \xi_3}{\phi^{3/2}} & \frac{t^\mu \xi_2}{\phi^{3/2}} \\ -\frac{K t^{3 \mu } (w_2-w_3) e^{-2 \wbr_1} \psi}{\sqrt{\phi}\chi} & \frac{t^{\mu } (w_2-w_3) \left((2 K-1) t^{2 \mu }+(K-1) e^{2 \wbr_1}\right)}{\sqrt{\phi} \chi} & \frac{K t^{\mu } \psi \xi_3}{\phi^{3/2}\chi} & -\frac{K t^{\mu } \psi\xi_2}{\phi^{3/2}\chi} \\ -\frac{1}{2} K t^{\mu } e^{-2 \wbr_1} \sqrt{\phi} & \frac{K t^{\mu } \sqrt{\phi}}{2 \psi} & \frac{t^{\mu } (w_3-w_2)}{\sqrt{\phi}} & 0 \\ \frac{1}{2} K t^{\mu } e^{-2 \wbr_1} \sqrt{\phi} & -\frac{K t^{\mu } \sqrt{\phi}}{2 \psi} & 0 & \frac{t^{\mu } (w_3-w_2)}{\sqrt{\phi}} \\ \end{pmatrix}, \end{equation} \begin{equation} \label{Gcdef} \Gc =\begin{pmatrix} 0 \\ -\frac{K (3 K-1) \left(e^{2 w_1}-1\right) e^{2 u}}{\left((K-1) e^{2 u}-t^{2 \mu }\right) \left((K-1) e^{2 \wbr_1}-t^{2 \mu }\right)} \\ 0 \\ 0 \end{pmatrix}, \end{equation} and \begin{equation} \label{Pidef} \Pi = \diag(0,0,1,1). \end{equation} For later use, we also define \begin{equation} \label{Piperpdef} \Pi^\perp = \id - \Pi, \end{equation} and observe that $\Pi$ and $\Pi^\perp$ satisfy the relations \begin{equation} \Pi^2 = \Pi, \quad (\Pi^\perp)^2 = \Pi^\perp, \quad \Pi\Pi^\perp =\Pi^\perp \Pi = 0 \AND \Pi+\Pi^\perp = \id. \label{Pirel} \end{equation} An important point regarding the formulation \eqref{relEulC} is that it is symmetrizable. Indeed, as shown in \cite{Oliynyk:2021}, multiplying \eqref{relEulC} by the positive definite, symmetric matrix \begin{equation} \label{A0rep} A^0 = \begin{pmatrix} K & 0 & 0 & 0 \\ 0 & \frac{t^{2 \mu } e^{2 \wbr_1}-(K-1) e^{4 \wbr_1}}{\psi^2} & 0 & 0 \\ 0 & 0 & \frac{2 e^{2 \wbr_1} \left(2 w_3^2 t^{2 \mu }+1\right)}{\phi^2} & -\frac{4 w_2 w_3 t^{2 \mu } e^{2 \wbr_1}}{\phi^2} \\ 0 & 0 & -\frac{4 w_2 w_3 t^{2 \mu } e^{2 \wbr_1}}{\phi^2} & \frac{2 e^{2 \wbr_1} \left(2 w_2^2 t^{2 \mu }+1\right)}{\phi^2} \\ \end{pmatrix} \end{equation} yields \begin{equation} \label{relEulD} A^0\del{t}W + A^I \del{I}W =-\frac{\mu}{t} A^0\Pi W + t^{\mu-1}A^0\Gc \end{equation} where it is straightforward to verify from \eqref{Ac1rep}-\eqref{Ac3rep} that the matrices \begin{equation}\label{AIdef} A^I = A^0 \Ac^I \end{equation} are symmetric, that is, \begin{equation}\label{AI-sym} (A^I)^{\tr}=A^I. \end{equation} We are now in a position to state the main stability theorem of this article. The proof is presented in Section \ref{sec:proof}. Before stating the theorem, it is important to note that, due to change of variables defined via \eqref{zetatdef}-\eqref{cov2c} and \eqref{Wdef}, the homogeneous solutions \eqref{Hom-C} correspond to the trivial solution $W=0$ of \eqref{relEulC}. \begin{thm} \label{mainthm} Suppose $k\in\Zbb_{>3/2+1}$, $1/3<K < 1$, $\mu = (3K-1)/(1-K)$, $\sigma > 0$, $u_0\in \Rbb$, $u \in C^\infty((0,1])\cap C^0([0,1])$ is the unique solution to the IVP \eqref{HomeqB.1}-\eqref{HomeqB.2} from Proposition \ref{Homprop} and $\zetat_0, w^0_J \in H^{k+1}(\Tbb^3)$. Then for $\delta>0$ small enough, there exists a unique solution \begin{equation*} W=(\zetat,w_J)^{\tr} \in C^0\bigl((0,1], H^{k+1}(\Tbb^3,\Rbb^4)\bigr)\cap C^1\bigl((0,1],H^{k}(\Tbb^3,\Rbb^4)\bigr) \end{equation*} to the initial value problem \begin{align} \del{t}W + \Ac^I\del{I}W &= -\frac{\mu}{t}\Pi W + t^{\mu-1}\Gc \hspace{0.5cm} \text{in $(0,1]\times \Tbb^3$,} \label{relEulE.1}\\ W &= (\zetat_0, w^0_J)^{\tr} \hspace{1.65cm} \text{in $\{1\}\times \Tbb^3$,} \label{relEulE.2} \end{align} provided that \begin{equation*} \biggl(\norm{\zetat_0}_{H^{k+1}}^2+\sum_{J=1}^3\norm{w^0_J}_{H^{k+1}}^2\biggr)^{\frac{1}{2}}\leq \delta. \end{equation*} Moreover, \begin{enumerate}[(i)] \item $W=(\zetat,w_J)^{\tr}$ satisfies the energy estimate \begin{equation*} \Ec(t) + \int_t^1 \tau^{2\mu-1}\bigl(\norm{D\zetat(\tau)}_{H^k}^2+\norm{Dw_1(\tau)}_{H^k}^2\bigr)\,d\tau \lesssim \norm{\zetat_0}_{H^{k+1}}^2+\sum_{J=1}^3\norm{w^0_J}_{H^{k+1}}^2 \end{equation*} for all $t\in (0,1]$ where\footnote{The norm $\norm{Df}_{H^k}$ is defined by $\norm{Df}^2_{H^k}= \sum_{J=1}^3 \norm{\del{J}f}^2_{H^k}$.} \begin{equation*} \Ec(t)=\norm{\zetat(t)}_{H^k}^2+\norm{w_1(t)}_{H^k}^2+t^{2\mu}\Bigl(\norm{D\zetat(t)}_{H^k}^2+\norm{Dw_1(t)}_{H^k}^2+\norm{w_2(t)}_{H^{k+1}}^2+\norm{w_3(t)}_{H^{k+1}}^2\Bigr), \end{equation*} \item there exists functions $\zetat_*, w_1^* \in H^{k-1}(\Tbb^3)$ and $\wb_2^*,\wb_3^* \in H^{k}(\Tbb^3)$ such that the estimate \begin{align*} \bar{\Ec}(t) \lesssim t^{\mu-\sigma} \end{align*} holds for all $t\in (0,1]$ where \begin{equation*} \bar{\Ec}(t)=\norm{\zetat(t) - \zetat_*}_{H^{k-1}}+\norm{w_1(t) - w_1^*}_{H^{k-1}} +\norm{t^\mu w_2(t) - \wb_2^*}_{H^{k}}+\norm{t^\mu w_3(t) - \wb_3^*}_{H^{k}}, \end{equation*} \item $u$ and $W=(\zetat,w_J)^{\tr}$ determine a unique solution of the relativistic Euler equations \eqref{relEulA} on the spacetime region $M=(0,1]\times \Tbb^3$ via the formulas \begin{align} \rho &= \frac{\rho_c t^{\frac{2(1+K)}{1-K}} e^{(1+K)\zetat}}{(t^{2\mu}+ e^{2(u+w_1)})^{\frac{1+K}{2}}}, \label{relEulsol.1}\\ \vt^0 &= -t^{1-\mu}\sqrt{e^{2 (u+w_1)}+t^{2 \mu} },\label{relEulsol.2}\\ \vt^1 &=t^{1-\mu }\biggl( \frac{e^{u+w_1}}{\sqrt{ (t^{\mu}w_2-t^{\mu}w_3)^2+(t^{\mu}w_2+t^{\mu}w_3)^2+1}} \biggr), \label{relEulsol.3} \\ \vt^2 &= t^{1-\mu }\biggl( \frac{(t^{\mu}w_2+t^{\mu}w_3) e^{u+w_1}}{\sqrt{ (t^{\mu}w_2-t^{\mu}w_3)^2+(t^{\mu}w_2+t^{\mu}w_3)^2+1}}\biggr),\label{relEulsol.4}\\ \vt^3 &= t^{1-\mu }\biggl( \frac{(t^{\mu}w_2-t^{\mu}w_3) e^{u+w_1}}{\sqrt{ (t^{\mu}w_2-t^{\mu}w_3)^2+(t^{\mu}w_2+t^{\mu}w_3)^2+1}}\biggr), \label{relEulsol.5} \end{align} \item and the density contrast $\frac{\del{I}\rho}{\rho}$ satisfies \begin{equation} \label{den-constrast-A} \lim_{t\searrow 0} \Bigl\| \frac{\del{I}\rho}{\rho} - (1+K)\del{I}(\zetat_*-w_1^*) \Bigr\|_{H^{k-2}} = 0. \end{equation} \end{enumerate} \end{thm} \begin{comment} \subsubsection{Proof summary} The proof of Theorem \ref{mainthm} proceeds in three main steps. The first step involves casting the relativistic Euler equations \eqref{relEulC} into a suitable Fuchsian form following closely the approach of \cite{Oliynyk:2021} with one critical modification that is responsible for the improvements in the stability results presented here. The next step involves verifying that the coefficients of the Fuchsian system satisfy the required properties needed to apply the Fuchsian global existence theory from \cite{BOOS:2021}. In the third and final step, we apply the Fuchsian global existence theory from \cite{BOOS:2021} in conjunction with a uniqueness result and continuation principle for solutions to \eqref{relEulC} in order to deduce the nonlinear stability of the trivial solution $W=0$. Since the trivial solution corresponds, through an appropriate choice of initial condition for $u$, to any of the homogeneous solutions \eqref{Hom-C} via \eqref{zetatdef}-\eqref{cov2c} and \eqref{Wdef}, the stability of the trivial solution implies the stability of the whole family of homogeneous solutions \eqref{Hom-C}, which completes the proof. \end{comment} \subsection{Instability for $1/3 < K< 1$} It is essential for the stability result stated in Theorem \ref{mainthm} to hold that the initial data used to generated the nonlinear perturbations of homogeneous solutions of the type \eqref{Hom-C} satisfies the condition \eqref{Hom-B-idata}. This leaves the question of what happens when this condition is violated, which would be guaranteed to happen for some choice of initial data from any given open set of initial data that contains initial data corresponding to an isotropic homogeneous solution \eqref{Hom-A}. To investigate this situation, we consider a $\Tbb^2$-symmetric reduction of the system \eqref{relEulB} obtained by the ansatz \begin{align} \tilde \zeta(t,x^1,x^2,x^3)&=\ztt(t,x^1), \label{zttt-def} \\ v_{I}(t,x^1,x^2,x^3) &= t^{-\mu}\wtt(t,x^1) \delta_I^1, \label{wttt-def} \end{align} where $\zetat$ is as defined above by \eqref{zetatdef}. It is not difficult to verify via a straightforward calculation that the relativistic Euler equations \eqref{relEulB} will be satisfied provided that $\ztt$ and $\wtt$ solve\footnote{Here, we set $x=x^1$.} \begin{align} \del{t}\ztt-\frac{\wtt}{(t^{2 \mu }+\wtt^2)^{\frac{1}{2}}} \partial_{x}\ztt - \frac{t^{2\mu}}{(t^{2 \mu }+\wtt^2)^{\frac{3}{2}}}\partial_{x}\wtt &=0, \label{eqn:dotzeta} \\ \label{eqn:dotw} \del{t}\wtt-\frac{Kt^{2 \mu } (t^{2 \mu }+\wtt^2)^{\frac{1}{2}}}{(t^{2 \mu }-(K-1)\wtt^2)}\partial_{x}\ztt +\frac{\bigl((2 K-1) t^{2 \mu }+(K-1) \wtt^2\bigr)\wtt}{(t^{2 \mu }+\wtt^2)^{\frac{1}{2}} (t^{2 \mu }-(K-1)\wtt^2)}\partial_{x}\wtt &=\frac{t^{2 \mu -1}(-3 K+\mu +1) \wtt}{t^{2 \mu }-(K-1)\wtt^2} . \end{align} In Section \ref{numsol}, we numerically solve this system for specific choices of initial data \begin{equation*} (\ztt,\wtt)|_{t={t_0}} = (\ztt_0,\wtt_0) \quad \text{in $\Tbb^1$.} \end{equation*} Importantly, these choices include initial data for which $\wtt_0$ crosses zero at two points in $\Tbb^1$, and as a consequence, violates the condition \eqref{Hom-B-idata}. From our numerical solutions, we observe the following behaviour: \begin{enumerate}[(1)] \item For all $K\in (1/3,1)$ and all choices of initial data $(\ztt_0,\wtt_0)$ that are sufficiently close to homogeneous initial data of either family of solutions \eqref{Hom-A} and \eqref{Hom-C}, $\ztt$ and $\wtt$ remain bounded and converge pointwise as $t\searrow 0$. \smallskip \item For each $K\in (1/3,1)$ and each choice of initial data $(\ztt_0,\wtt_0)$ that violates \eqref{Hom-B-idata} and is sufficiently close to homogeneous initial data of the family of solutions \eqref{Hom-A}, there exists a $\ell=\ell(K)\in \Zbb_{\geq 0}$ such that \begin{equation*} \sup_{x\in \Tbb^1}\bigl(|\del{x}^{\ell} \ztt(t,x)|+|\del{x}^{\ell}\wtt(t,x)|\bigr) \nearrow \infty \quad \text{as $t\searrow 0$.} \end{equation*} This indicates an instability in the $H^\ell$-spaces for solutions of \eqref{eqn:dotzeta}-\eqref{eqn:dotw} that is not present, c.f.~Theorem \ref{mainthm}, in solutions generated from initial data satisfying \eqref{Hom-B-idata}. We also observe that the integer $\ell$ is a monotonically decreasing function of $K$ with a minimum value of $1$. For the initial data we tested, the blow-up at $t=0$ in the derivatives occurs at a finite set of spatial points. \smallskip \item For all $K\in (1/3,1)$ and all choices of initial data $(\ztt_0,\wtt_0)$ that are sufficiently close to homogeneous initial data of either family of solutions \eqref{Hom-A} and \eqref{Hom-C}, solutions to \eqref{eqn:dotzeta}-\eqref{eqn:dotw} are approximated remarkably well, for times sufficiently close to zero, by solutions to the \textit{asymptotic system}\footnote{Note this system is obtain from \eqref{eqn:dotzeta}-\eqref{eqn:dotw} simply by discarding the terms involving spatial derivatives.} \begin{align} \del{t}\tilde{\ztt}{} &=0, \label{zttt-asympt} \\ \del{t}\tilde{\wtt}{} &=\frac{t^{2 \mu -1}(-3 K+\mu +1) \tilde{\wtt}{}}{t^{2 \mu }-(K-1)\tilde{\wtt}{}^2}, \label{wttt-asympt} \end{align} everywhere except, possibly, at a finite set of points where steep gradients form in $z$, which only happens for $K$ large enough and initial data violating \eqref{Hom-B-idata}. \smallskip \item For each $K\in (1/3,1)$ and each choice of initial data $(\ztt_0,\wtt_0)$ that violates \eqref{Hom-B-idata} and is sufficiently close to homogeneous initial data of the family of solutions \eqref{Hom-A}, the density contrast $\frac{\del{x}\rho}{\rho}$ develops steep gradients near a finite number of spatial points where it becomes unbounded as $t\searrow 0$. This behaviour was anticipated by Rendall in \cite{Rendall:2004}, and it is \textit{not consistent} with either the standard picture for inflation in cosmology where the density contrast remains bounded as $t\searrow 0$, or with the behaviour of the density contrast of solutions generated from initial data satisfying \eqref{Hom-B-idata}, c.f.~Theorem \ref{mainthm}. \end{enumerate} \subsection{Stability/instability for $K=1$} When the sound speed is equal to the speed of light, i.e. $K=1$, it is well known that the irrotational relativistic Euler equations coincide, under a change of variables, with the linear wave equation. Even though the future global existence of solutions to linear wave equations on exponentially expanding FLRW spacetimes can be inferred from standard existence results for linear wave equations, a corresponding future global existence result for the irrotational relativistic Euler equations does not automatically follow. This is because the change of variables needed to interpret a wave solution as a solution of the relativistic Euler equations requires the gradient of the wave solution to be timelike. Thus an instability in the irrotational relativistic Euler equations can still occur for $K=1$ if the gradient of the wave solution starts out timelike but becomes spacelike somewhere in finite time. This phenomena was shown in \cite{Fournodavlos:2022} to occur in the more difficult case where coupling to Einstein's equations with a positive cosmological constant was taken into account. In fact, it was shown in \cite{Fournodavlos:2022} that all wave solutions generated from initial data sets that correspond to a sufficiently small perturbation of the FLRW fluid solution (i.e.~\eqref{Hom-A} in our setting) become spacelike in finite time. This proves that the self-gravitating version of the isotropic homogeneous \eqref{Hom-A} are unstable, and in the irrotational setting at least, characterizes the cause of the instability. What is not known is if the other family of homogeneous solutions \eqref{Hom-C} or their self-gravitating versions remain stable for $K=1$. \subsection{Future directions} The most natural and physically relevant generalization of the the stability result stated in Theorem \ref{mainthm} would be an analogous stability result for the coupled Einstein-Euler equations with a positive cosmological constant for $K$ satisfying $1/3<K<1$. We expect that establishing this type of stability result is feasible by adapting the arguments from \cite{Oliynyk:CMP_2016}. This expectation is due to the behaviour of the term $t^{-2}\rho v_i v_j$, which is the only potentially problematic term that could, if it grew too quickly as $t\searrow 0$, prevent the use of the arguments from \cite{Oliynyk:CMP_2016}. However, by Theorem \ref{mainthm}, we know that $\rho=\Ord\bigl(t^{\frac{2(1+K)}{1-K}}\bigr)$ and $v_i = \Ord\bigl(t^{\frac{1-3 K}{1-K}}\bigr)$ from which it follows that $t^{-2}\rho v_i v_j = \Ord(t^2)$. This shows that $t^{-2}\rho v_i v_j$ decays quickly enough as $t\searrow 0$ to expect that it should not be problematic. We are currently working on generalizing Theorem \ref{mainthm} to include coupling to Einstein's equations with a positive cosmological constant, and we will report on any progress in this direction in a follow-up article. We are also planning to investigate numerically, under a Gowdy symmetry assumption, if a similar behaviour, as described in Section \ref{numsol}, occurs for initial data that violates \eqref{Hom-B-idata} when coupling to Einstein's equations with a positive cosmological constant is taken into account. \section{Proof of Theorem \ref{mainthm}\label{sec:proof}} \subsection{Step 1: Fuchsian formulation} Applying the projection operator $\Pi$ to \eqref{relEulC}, while noting that $\Pi \Gc = 0$ by \eqref{Gcdef}-\eqref{Pidef}, yields \begin{equation*} \del{t}(\Pi W) + \Pi\Ac^I \del{I}W =-\frac{\mu}{t}\Pi W. \end{equation*} Multiplying this equation through by $t^{\mu}$ gives \begin{equation}\label{relEulF} \del{t}(t^{\mu}\Pi W) + t^{\mu}\Pi\Ac^I \del{I}W = 0. \end{equation} Applying $\Pi^\perp$ to \eqref{relEulC}, we further observe, with the help of \eqref{Pirel}, that \begin{equation} \label{relEulG} \del{t}(\Pi^\perp W) + \Pi^\perp\Ac^I \del{I}W = t^{2\mu-1}\Pi^\perp\Gc. \end{equation} Next, we decompose the term $\Pi^\perp\Ac^I \del{I}W$ in \eqref{relEulG} as follows \begin{equation*} \Pi^\perp\Ac^I \del{I}W=\Pi^\perp\Ac^I \Pi^\perp \del{I}( \Pi^\perp W) + t^{-\mu}\Pi^\perp\Ac^I \Pi t^{\mu}\del{I} W. \end{equation*} Inserting this into \eqref{relEulG} and multiplying the resulting equation on the left by $\Pi^\perp A^0 \Pi^\perp$ gives \begin{equation} \label{relEulH} \Pi^\perp A^0 \Pi^\perp\del{t}(\Pi^\perp W) +\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi^\perp \del{I}( \Pi^\perp W) + t^{-\mu}\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi t^{\mu}\del{I} W = t^{2\mu-1}\Pi^\perp A^0\Pi^\perp\Gc. \end{equation} It is worth noting at this point that it is the use of this equation to control $\Pi^\perp W$ instead of \eqref{relEulG} that is responsible for the improvement of the range of the parameter values for which stability holds from $1/3<K < 1/2$ in \cite{Oliynyk:2021} to $1/3<K<1$ in this article. Now, multiplying \eqref{relEulH} by \begin{equation} \label{Sdef} S = \begin{pmatrix} \frac{e^{-2\wbr_1}\psi^2}{\chi} & 0 & 0 & 0 \\ 0 & \frac{\psi^2}{t^{2 \mu } e^{2 \wbr_1}-(K-1) e^{4 \wbr_1}} & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{pmatrix} \end{equation} and adding the resulting equation to \eqref{relEulF} yields \begin{align*} \del{t}(t^{\mu}\Pi W)+S\Pi^\perp A^0 \Pi^\perp\del{t}(\Pi^\perp W) &+S\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi^\perp \del{I}( \Pi^\perp W) =-\Pi\Ac^I t^{\mu}\del{I}W\\ &- t^{-\mu}S\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi t^{\mu}\del{I} W + t^{2\mu-1}S\Pi^\perp A^0\Pi^\perp\Gc. \end{align*} Setting \begin{equation}\label{Wbdef} \Wb := \Pi^\perp W+t^{\mu}\Pi W = (\zetat,w_1,t^{\mu}w_2,t^{\mu}w_3)^{\tr}, \end{equation} it is then not difficult to verify that the above equation can be expressed as \begin{equation}\label{relEulHa} B^0\del{t}\Wb + B^I \del{I}\Wb = -\Pi\Ac^I t^{\mu}\del{I}W - t^{-\mu}S\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi t^{\mu}\del{I} W + t^{2\mu-1}S\Pi^\perp A^0\Pi^\perp\Gc \end{equation} where \begin{equation}\label{B-def} B^0 =S\Pi^\perp A^0 \Pi^\perp + \Pi \AND B^I =S\Pi^\perp A^0 \Pi^\perp \Ac^I \Pi^\perp. \end{equation} Noting from \eqref{Ac1rep}-\eqref{Ac3rep} that \begin{equation*} \Pi^\perp \Ac^I \Pi^\perp = \frac{b^I}{\sqrt{\frac{t^{2\mu}}{e^{2\wt_1}}+1}}\begin{pmatrix} -\frac{1}{\sqrt{\phi}} & -\frac{t^{2 \mu }}{\psi \sqrt{\phi}} &0 &0 \\ -\frac{K t^{2 \mu } e^{-2 \wbr_1} \psi}{\sqrt{\phi}\chi} & \frac{(2 K-1) t^{2 \mu }+(K-1) e^{2 \wbr_1}}{\sqrt{\phi} \chi} & 0 & 0\\ 0 & 0 & 0& 0\\ 0 & 0& 0& 0 \end{pmatrix} \end{equation*} where \begin{equation} \label{bI-def} b^1 = 1, \quad b^2 = t^{\mu}(w_3+w_2) \AND b^3 = t^\mu(w_2-w_3), \end{equation} a short calculation using \eqref{Pidef}-\eqref{Piperpdef}, \eqref{A0rep}, and \eqref{Sdef} shows that the matrices \eqref{B-def} are given by \begin{equation}\label{B0-form} B^0 = \begin{pmatrix} \frac{ K e^{-2\wbr_1}\psi^2}{\chi} & 0 & 0 & 0 \\ 0 & 1 & 0 & \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1\end{pmatrix} \end{equation} and \begin{equation} \label{BI-form} B^I = \frac{b^I}{\sqrt{\frac{t^{2\mu}}{e^{2\wt_1}}+1}}\begin{pmatrix} -\frac{K e^{-2\wbr_1}\psi^2}{\chi\sqrt{\phi}} & -\frac{K t^{2 \mu }e^{-2\wbr_1}\psi}{\sqrt{\phi}\chi} &0 &0 \\ -\frac{K t^{2 \mu } e^{-2 \wbr_1} \psi}{\sqrt{\phi}\chi} & \frac{(2 K-1) t^{2 \mu }+(K-1) e^{2 \wbr_1}}{\sqrt{\phi} \chi} & 0 & 0\\ 0 & 0 & 0& 0\\ 0 & 0& 0& 0 \end{pmatrix}. \end{equation} From these formulas, it is clear that the matrices $B^i$ are symmetric, that is, \begin{equation} \label{B-sym} (B^i)^{\tr}=B^i. \end{equation} We proceed by differentiating \eqref{relEulC} spatially to get \begin{equation*} \del{t}\del{J}W + \Ac^I \del{I}\del{J}W + \del{J}\Ac^I \del{I}W = -\frac{\mu}{t}\Pi \del{J}W + t^{2\mu-1}\del{J}\Gc. \end{equation*} Setting \begin{equation} \label{WbJdef} \Wb\!_J := t^\mu \del{J}W = (t^\mu\del{J}\zetat,t^\mu \del{J}w_1,t^{\mu}\del{J}w_2,t^{\mu} \del{J} w_3)^{\tr}, \end{equation} we can write this as \begin{equation*} \del{t}\Wb\!_J + \Ac^I \del{I}\Wb\!_J + \del{J}\Ac^I \Wb_I = \frac{\mu}{t}\Pi^\perp \Wb\!_J + t^{3\mu-1}\del{J}\Gc. \end{equation*} Multiplying the above equation on the left by $A^0$ and recalling the definitions \eqref{AIdef}, we find that $\Wb\!_J$ satisfies \begin{equation} \label{relEulI} A^0\del{t}\Wb\!_J + A^I \del{I}\Wb\!_J = \frac{\mu}{t}A^0\Pi^\perp \Wb\!_J + t^{3\mu-1}A^0\del{J}\Gc- A^0\del{J}\Ac^I \Wb_I. \end{equation} Finally, combining \eqref{relEulHa} and \eqref{relEulI} yields the Fuchsian system \begin{equation} \label{relEulK} \Asc^0\del{t}\Wsc + \Asc^I \del{I}\Wsc = \frac{\mu}{t}\Asc^0\Pbb \Wsc + \Fsc \end{equation} where \begin{align} \Wsc &= \begin{pmatrix} \Wb \\ \Wb\!_J \end{pmatrix}, \label{Wscdef} \\ \Asc^0 &= \begin{pmatrix} B^0 & 0 \\ 0 & A^0 \end{pmatrix}, \label{Asc0def} \\ \Asc^I &= \begin{pmatrix} B^I & 0 \\ 0 & A^I \end{pmatrix}, \label{AscIdef} \\ \Pbb &= \begin{pmatrix} 0 & 0 \\ 0 & \Pi^\perp \end{pmatrix}, \label{Pbbdef} \intertext{and} \Fsc &=\begin{pmatrix} -\Pi\Ac^I \Wb_I - t^{-\mu}S\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi \Wb_I + t^{2\mu-1}S\Pi^\perp A^0\Pi^\perp\Gc \\ t^{3\mu-1}A^0\del{J}\Gc- A^0\del{J}\Ac^I \Wb_I\end{pmatrix}. \label{Fsc0def} \end{align} As will be established in Step 2 below, the Fuchsian system \eqref{relEulK} satisfies assumption needed to apply the Fuchsian global existence theory from \cite{BOOS:2021}; see, in particular, \cite[Thm.~3.8.]{BOOS:2021} and \cite[\S 3.4.]{BOOS:2021}. This global existence theory will be used in Step 3 of the proof to establish uniform bounds on solutions to the initial value problem \eqref{relEulE.1}-\eqref{relEulE.2} under a suitable small initial data assumption. These bounds in conjunction with a continuation principle will then yield the existence solutions to \eqref{relEulE.1}-\eqref{relEulE.2} on $(0,1]\times \Tbb^3$ as well as decay estimates as $t\searrow 0$. \subsection{Step 2: Verification of the coefficient assumptions} In order to apply Theorem 3.8.~from \cite{BOOS:2021}, see also \cite[\S 3.4.]{BOOS:2021}, to the Fuchsian system \eqref{relEulK}, we need to verify that the coefficients of this equations satisfy the assumptions from Section 3.4.~of \cite{BOOS:2021}, see also \cite[\S 3.1.]{BOOS:2021}. To begin the verification, we set \begin{equation} \label{bvarsdef} \tb = t^{2\mu}, \AND \wb_\Lambda = t^\mu w_\Lambda, \quad \Lambda=2,3, \end{equation} and observe from \eqref{wbr1def}-\eqref{phidef}, \eqref{A0rep}, \eqref{B0-form} and \eqref{Asc0def} that the matrix $\Asc^0$ can be treated as a map depending on the variables \eqref{bvarsdef}, that is, \begin{equation} \label{A0smooth} \Asc^0 = \Asc^0(\tb,\wbr_1,\wb_2,\wb_3), \end{equation} where for each $R>0$ there exists constants $r,\omega >0$ such that $\Asc^0$ is smooth on the domain defined by \begin{equation} \label{smoothdom} (\tb,\wbr_1,\wb_2,\wb_3) \in (-r,2) \times (-R,R) \times (-R,R)\times (-R,R), \end{equation} and satisfies \begin{equation} \label{A0lb} \Asc^0(\tb,\wbr_1,0,0) \geq \omega \id \end{equation} for all $(\tb,\wbr_1)\in (-r,2)\times (-R,R)$. In the following, we will always be able to choose $R>0$ and $r>0$ as needed in order to guarantee that the statements we make are valid. Differentiating $\Asc^0$ with respect to $t$ then shows, with the help of \eqref{wbr1def}, \eqref{Wbdef} and \eqref{bvarsdef}-\eqref{A0smooth}, that \begin{align} \del{t}\Asc^0 &= D\Asc^0(\tb,\wbr_1,\wb_2,\wb_3) \begin{pmatrix} 2\mu t^{2\mu-1} \\ u'(t)+\del{t}w_1\\ \del{t}\wb_2\\ \del{t}\wb_3 \end{pmatrix} \notag \\ &= D\Asc^0(\tb,\wbr_1,\wb_2,\wb_3) \left(\begin{pmatrix} 2\mu t^{2\mu-1} \\ u'(t)\\ 0 \\ 0 \end{pmatrix} + \Pc_1\del{t}\Wb \right) \label{dtA0} \end{align} where \begin{equation*} \Pc_1 = \diag(0,1,1,1), \end{equation*} and $\del{t}\Wb$ can be computed from from \eqref{relEulHa}, that is, \begin{equation} \label{dt-Wb} \del{t}\Wb =(B^0)^{-1}\Bigl(- B^I \del{I}\Wb -\Pi\Ac^I \Wb_I - t^{-\mu}S\Pi^\perp A^0 \Pi^\perp\Ac^I \Pi \Wb_I + t^{2\mu-1}S\Pi^\perp A^0\Pi^\perp\Gc\Bigr). \end{equation} We note from \eqref{wbr1def}-\eqref{chidef}, \eqref{Sdef}, \eqref{B0-form} and \eqref{bvarsdef} that the matrices \begin{equation}\label{B0smooth} S = S(\tb,\wbr_1) \AND B^0 = B^0(\tb,\wbr_1) \end{equation} are smooth on the domain $(\tb,\wbr_1)\in (-r,2)\times (-R,R)$, and that $B^0$ is bounded below by \begin{equation}\label{B0-lbnd} B^0 \geq \omega \id \end{equation} for all $(\tb,\wbr_1)\in (-r,2)\times (-R,R)$ where $\omega$ can be taken as the same constant as in \eqref{A0lb}. We further note from \eqref{wbr1def}-\eqref{phidef}, \eqref{A0rep}, \eqref{AIdef}, \eqref{BI-form}, \eqref{bI-def} and \eqref{bvarsdef} that the matrices \begin{equation}\label{BIsmooth} A^i= A^i(\tb,\wbr_1,\wb_2,\wb_3) \AND B^I= B^I(\tb,\wbr_1,\wb_2,\wb_3) \end{equation} are smooth on the domain \eqref{smoothdom}, while is clear from \eqref{Gcdef} that the vector-valued map \begin{equation} \label{Gcsmooth} \Gc = \Gc(\tb,\wbr_1,w_1) \end{equation} is smooth on the domain $(\tb,\wbr_1,w_1)\in (-r,2)\times (-R,R) \times (-R,R)$. Next, setting \begin{equation} \label{wh1def} \wh_1 = t^\mu e^{-2 \wbr_1}, \end{equation} it follows from \eqref{wbr1def}-\eqref{xidef}, \eqref{Ac1rep}-\eqref{Ac3rep} and \eqref{bvarsdef} that the matrices $\Ac^I$ can be expanded as \begin{equation} \label{AcIsmooth} \Ac^I = \Ac^I_1(\wh_1,\wb_2,\wb_3)+ t^\mu \Ac^I_2(\tb,\wbr_1,\wb_2,\wb_3)+ t^{2\mu} \Ac^I_3(\tb,\wbr_1,\wb_2,\wb_3) \end{equation} where the $\Ac^I_2$, $\At^I_3$ are smooth on the domain \eqref{smoothdom} and the $\Ac^I_1$ are smooth on the domain defined by \begin{equation*} (\wh_1,\wb_2,\wb_3) \in (-R,R)\times (-R,R)\times (-R,R). \end{equation*} It is also not difficult to verify from \eqref{Ac1rep}-\eqref{Ac3rep} that the $\Ac^I_1$ satisfy \begin{equation} \label{PiperpAcIPi} \Pi^\perp \Ac^I_1 \Pi = 0. \end{equation} Differentiating the matrices $\Ac^I$ spatially, we have by \eqref{wbr1def}, \eqref{WbJdef}, \eqref{bvarsdef}, \eqref{wh1def} and \eqref{AcIsmooth} that \begin{align} \del{J}\Ac^I &= D\Ac^I_1(\wh_1,\wb_2,\wb_3)\begin{pmatrix}-2 e^{-2 \wbr_1} t^\mu \del{J}w_1 \\ t^\mu \del{J}w_2 \\ t^\mu \del{J}w_2 \end{pmatrix}\notag \\ &+ t^\mu D\Ac^I_2(\tb,\wbr_1,\wb_2,\wb_3)\begin{pmatrix} 0 \\ \del{J}w_1\\ t^\mu \del{J}w_2 \\ t^\mu\del{J} w_3 \end{pmatrix} +t^{2\mu} D\Ac^I_3(\tb,\wbr_1,\wb_2,\wb_3)\begin{pmatrix} 0 \\ \del{J}w_1\\ t^\mu \del{J}w_2 \\ t^\mu\del{J} w_3 \end{pmatrix} \notag \\ & = \Bigl(D\Ac^I_1(\wh_1,\wb_2,\wb_3)\Pc_2 + D\Ac^I_2(\tb,\wbr_1,\wb_2,\wb_3)\Pc_3+t^\mu D\Ac^I_2(\tb,\wbr_1,\wb_2,\wb_3)\Pc_3\Bigr) \Wb\!_J, \label{dJAcI} \end{align} where \begin{equation*} \Pc_2 = \begin{pmatrix} 0 & -2 e^{-2 \wbr_1} & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix} \AND \Pc_3 = \begin{pmatrix} 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & t^\mu & 0 \\ 0 & 0 & 0 & t^\mu \end{pmatrix}. \end{equation*} By \eqref{Pidef}-\eqref{Piperpdef} and \eqref{A0rep}, we note that the matrix $A^0$ satisfies $[\Pi^\perp,A^0] = 0$ and $\Pi^\perp A^0 \Pi = \Pi A^0 \Pi^\perp = 0$. Using these identities, it is then follows from the definitions \eqref{Asc0def} and \eqref{Pbbdef} that $\Asc^0$ satisfies \begin{equation} \label{Asc0Pbbcom} [\Pbb,\Asc^0] = 0 \end{equation} and \begin{equation} \label{PbbAsc0Pbbperp} \Pbb^\perp \Asc^0 \Pbb = \Pbb \Asc^0 \Pbb^\perp = 0, \end{equation} where \begin{equation} \label{Pbbperpdef} \Pbb^\perp = \id -\Pbb. \end{equation} Additionally, by \eqref{Pidef}-\eqref{Pirel}, we observe that $\Pbb$ satisfies \begin{equation} \label{Pbbrel} \Pbb^2 = \Pbb, \quad \Pbb^{\tr} = \Pbb, \quad \del{t}\Pbb = 0 \AND \del{I} \Pbb = 0, \end{equation} while the symmetry of the matrices $\Asc^i$, that is, \begin{equation} \label{Ascisym} (\Asc^i)^{\tr} = \Asc^i, \end{equation} is obvious from the definitions \eqref{A0rep} and \eqref{Asc0def}-\eqref{AscIdef}, and the relations \eqref{AI-sym} and \eqref{B-sym}. Now, from the definitions \eqref{wbr1def}, \eqref{Wbdef}, \eqref{WbJdef}, \eqref{Wscdef}, \eqref{bvarsdef} and \eqref{wh1def}, the formulas \eqref{dtA0} and \eqref{dt-Wb}, the estimates \eqref{Hombounds} for $u(t)$ and $u'(t)$, the smoothness properties \eqref{A0smooth}, \eqref{B0smooth}, \eqref{BIsmooth}, \eqref{Gcsmooth} and \eqref{AcIsmooth} of the matrices $\Asc^0$, $S$, $A^0$, $B^0$, $A^I$, $B^I$, $\Ac^I$ and the source term $\Gc$, the lower bound \eqref{B0-lbnd} on $B^0$, and the identity \eqref{PiperpAcIPi}, it is not difficult to verify that for each $\mu \in (0,\infty)$ that there exists a constant $\theta>0$ such that \begin{equation}\label{dtAsc-bnd} |\del{t}\Asc^0| \leq \theta (t^{2\mu-1}+1) \end{equation} for all $(t,\Wsc,D\Wsc)\in [0,1]\times B_R(\Rbb^{16})\times B_{R}(\Rbb^{16\times 3})$, where $D\Wsc = (\del{I}\Wsc)$. From \eqref{Fsc0def} and similar considerations, it is also not difficult to verify \begin{equation} \label{Fsc-bnd} |\Fsc| \lesssim (t^{2\mu-1}+1)|\Wsc| \end{equation} for all $(t,\Wsc)\in [0,1]\times B_R(\Rbb^{16})$. It is also clear that we can view \eqref{relEulK} as an equation for the variables $\Wsc=(\Wb,\Wb_J)$, with $\Wb=(\zetat,w_1,\wb_2,\wb_3)$ and $\Wb_J=(\zetat_J, w_{1J},\wb_{2J},\wb_{3J})$, where the maps $\Asc^i$ and $\Fsc$ depend on the variables $(t,\Wb)$ and $(t,\Wsc)$, respectively. Taken together, \textbf{(i)} the variable definitions \eqref{wbr1def}, \eqref{Wbdef}, \eqref{WbJdef}, \eqref{bvarsdef} and \eqref{wh1def}, \textbf{(ii)} the smoothness properties \eqref{A0smooth}, \eqref{B0smooth}, \eqref{BIsmooth}, \eqref{Gcsmooth} and \eqref{AcIsmooth} of the matrices $\Asc^0$, $S$, $A^0$, $B^0$, $A^I$, $B^I$, $\Ac^I$ and the source term $\Gc$, \textbf{(iii)} the identities \eqref{Asc0Pbbcom}-\eqref{PbbAsc0Pbbperp} and the lower bound \eqref{A0lb} satisfied by matrix $\Asc^0$, \textbf{(iv)} the definitions \eqref{AscIdef} and \eqref{Fsc0def} of the matrices $\Asc^I$ and the source term $\Fsc$, \textbf{(v)} the properties \eqref{Pbbrel} of the projection map $\Pbb$, and \textbf{(vi)} the bounds \eqref{dtAsc-bnd} and \eqref{Fsc-bnd} on $\del{t}\Asc^0$ and $\Fsc$, respectively, imply that for any\footnote{By \eqref{mu-def}, $\mu\in (0,\infty)$ corresponds to $1/3<K<1$.} $\mu \in (0,\infty)$ and $R>0$ chosen sufficiently small, there exist constants $\theta,\gamma_1=\gammat_1,\gamma_2=\gammat_2>0$ such that the Fuchsian system \eqref{relEulK} satisfies satisfies all the assumptions from Section 3.4 of \cite{BOOS:2021} for following choice of constants: $\kappa=\kappat=\mu$, $\beta_\ell=0$, $1\leq \ell \leq 7$, \begin{gather*} \quad p=\begin{cases}2\mu & \text{if $0<\mu\leq 1/2$}\\ 1 & \text{if $\mu > 1$} \end{cases} \end{gather*} and $\lambda_1=\lambda_2=\lambda_3= \alpha=0$. As discussed in \cite[\S 3.4]{BOOS:2021}, under the time transformation\footnote{Note that our time variable $t$ is assumed to be positive as opposed to \cite{BOOS:2021}, where it is taken to be negative. This causes no difficulties as we can change between these two conventions by using the simple time transformation $t\rightarrow -t$.} $t \mapsto t^p$, the transformed version of \eqref{relEulK} will satisfy all of the assumptions from Section 3.1 of \cite{BOOS:2021}. Moreover, since the matrices $\Asc^I$ have a regular limit as $t\searrow 0$, the constants $\btt$ and $\tilde{\btt}$ from Theorem 3.8 of \cite{BOOS:2021} vanish. This fact together with $\beta_1=0$ and $\kappa=\kappat=\mu$ implies that the constant\footnote{This constant is denoted by $\zeta$ in the article \cite{BOOS:2021}. We denote it here by $\mathfrak{z}$ because $\zeta$ is already being used denote the modified density.} $\mathfrak{z}$ from Theorem 3.8 of \cite{BOOS:2021} that determines the decay rate is given by $\mathfrak{z}= \mu$. \subsection{Step 3: Existence and uniqueness} By \eqref{A0rep} and \eqref{AI-sym}, we know that the matrices $A^i$ are symmetric. Furthermore, from the analysis carried out in Step 2 above, we know that the matrices $A^i$ and the source term $A^0 \Gsc$ depend smoothly on the variables $(t,w_J)$ for $t\in (0,1]$ and $w_J$ in an open neighbourhood of zero, and that the matrix $A^0$ is positive definite on this neighbourhood. As a consequence, the system \eqref{relEulE.1} is symmetrizable and can be put in the symmetric hyperbolic form \eqref{relEulD} by multiplying it on the left by the matrix $A^0$. Since $k\in\Zbb_{>3/2+1}$ and $W_0 :=(\zetat_0, w^0_J)^{\tr}\in H^{k+1}(\Tbb^3,\Rbb^4)$, we obtain from an application of standard local-in-time existence and uniqueness theorems and the continuation principle for symmetric hyperbolic systems, see Propositions 1.4, 1.5 and 2.1 from \cite[Ch.~16]{TaylorIII:1996}, the existence of a unique solution \begin{equation*} W=(\zetat,w_J) \in C^0\bigl((T_*,1], H^{k+1}(\Tbb^3,\Rbb^4)\bigr)\cap C^1\bigl((T_*,1],H^{k}(\Tbb^3,\Rbb^4)\bigr) \end{equation*} to IVP \eqref{relEulE.1}-\eqref{relEulE.2} where $T_*\in [0,1)$ is the maximal time of existence. From the computations carried out in Step 1 of the proof, this solution determines via \eqref{Wbdef} and \eqref{WbJdef} a solution \begin{equation} \label{Wscvar} \Wsc = (\Wb,\Wb\!_J) \in C^0\bigl((T_*,1], H^{k}(\Tbb^3,\Rbb^{16})\bigr)\cap C^1\bigl((T_*,1],H^{k-1}(\Tbb^3,\Rbb^{16})\bigr) \end{equation} of the IVP \begin{align} \Asc^0\del{t}\Wsc + \Asc^I \del{I}\Wsc &= \frac{\mu}{t}\Asc^0\Pbb \Wsc + \Fsc\hspace{1.20cm} \text{in $(T_*,1]\times \Tbb^3$,} \label{relEulM1} \\ \Wsc &= \Wsc_0 := (W_0,\del{J}W_0) \hspace{0.5cm}\text{in $\{1\}\times \Tbb^3$,} \label{relEulM2} \end{align} where we observe that \begin{equation} \label{Wsc-idata} \norm{\Wsc_0}_{H^k} \lesssim \norm{W_0}_{H^{k+1}} \leq \delta. \end{equation} On the other hand, by Step 2 we can apply\footnote{It it is important to note that the regularity $k\in \Zbb_{>3/2+1}$ of the initial data \eqref{Wsc-idata} is less than what is required to apply Theorem 3.8.~ \cite{BOOS:2021} to the Fuchsian system \eqref{relEulK}. The reason that we can still apply this theorem is that the matrices $\Asc^I$ in \eqref{relEulK} do not have any $1/t$ singular terms; see Remark A.3.(ii) from \cite{BeyerOliynyk:2020}.} Theorem 3.8.~from \cite{BOOS:2021} to the time transformed version of \eqref{relEulK} as described in \cite[Section 3.4]{BOOS:2021} to deduce, for $\delta>0$ chosen small enough and the initial data satisfying \eqref{Wsc-idata}, the existence of a unique solution \begin{equation*} \Wsc^* \in C^0\bigl((0,1],H^k(\Tbb^3,\Rbb^{16})\bigr)\cap L^\infty\bigl((0,1],H^k(\Tbb^3,\Rbb^{16}))\bigr)\cap C^1\bigl((0,1],H^{k-1}(\Tbb^3,\Rbb^{16})\bigr) \end{equation*} to the IVP \eqref{relEulM1}-\eqref{relEulM2} with $T_*=0$ that satisfies the following properties: \begin{enumerate}[(1)] \item The limit of $\Pbb^\perp \Wsc^*$ as $t\searrow 0$, denoted $\Pbb^\perp \Wsc^*(0)$, exists in $H^{k-1}(\Tbb^3,\Rbb^{16})$. \item The solution $\Wsc^*$ is bounded by the energy estimate \begin{equation}\label{eestA} \norm{\Wsc^*(t)}_{H^k}^2 + \int_{t}^1 \frac{1}{\tau} \norm{\Pbb \Wsc^*(\tau)}_{H^k}^2\, d\tau \lesssim \norm{\Wsc_0}_{H^k}^2 \end{equation} for all $t\in (0,1]$, where the implied constant depends on $\delta$. \item For any given $\sigma>0$, the solution $\Wsc^*$ satisfies the decay estimate \begin{gather} \norm{\Pbb \Wsc^*(t)}_{H^{k-1}} \lesssim t^{\mu-\sigma} \AND \norm{\Pbb^\perp \Wsc^*(t) - \Pbb^\perp \Wsc^*(0)}_{H^{k-1}} \lesssim t^{\mu-\sigma} \label{decayA2} \end{gather} for all $t\in (0,1]$, where the implied constants depend on $\delta$ and $\sigma$. \end{enumerate} By uniqueness, the two solutions $\Wsc$ and $\Wsc^*$ to the IVP \eqref{relEulM1}-\eqref{relEulM2} must coincide on their common domain of definition, and consequently, $\Wsc(t)=\Wsc^*(t)$ for all $t\in (T_*,1]$. But this implies by \eqref{Wscvar}, the energy estimate \eqref{eestA}, and Sobolev's inequality \cite[Thm.~6.2.1]{Rauch:2012} that \begin{equation*} \norm{\Wb(t)}_{W^{1,\infty}} \lesssim \norm{\Wb(t)}_{H^k} \leq \norm{\Wsc(t)}_{H^{k-1}} \lesssim \norm{\Wsc_0}, \quad T^*<t\leq 1. \end{equation*} By shrinking $\delta$ if necessary, we can, by \eqref{Wsc-idata}, make $\norm{\Wsc_0}_{H^k}$ as small as we like, which in turn, implies via the above estimate that we can bound $\Wb$ by $\norm{\Wb(t)}_{W^{1,\infty}} \leq \frac{R}{2}$ for all $t\in (T^*,1]$, where $R>0$ is as determined in Step 2 of the proof. This bound is sufficient to guarantee that the matrices $A^i$ and the source term $A^0\Gsc$ from the symmetric hyperbolic system \eqref{relEulD} remain well defined and that the matrix $A^0$ continues to be positive definite. By the continuation principle and the maximality of $T_*$, we deduce that $T_*=0$, and hence that $\Wsc(t)=\Wsc^*(t)$ for all $t\in (0,1]$. From this and the energy estimate \eqref{eestA}, it then follows with the help of the definitions \eqref{Pidef}-\eqref{Piperpdef}, \eqref{Wbdef}, \eqref{WbJdef}, \eqref{Pbbdef} and \eqref{Wscvar} that \begin{equation*} \Ec(t) + \int_t^1 \tau^{2\mu-1}\bigl(\norm{D\zetat(\tau)}_{H^k}^2+\norm{Dw_1(\tau)}_{H^k}^2\bigr)\,d\tau \lesssim \norm{W_0}_{H^k}^2, \quad 0<t\leq 1, \end{equation*} where \begin{equation*} \Ec(t)=\norm{\zetat(t)}_{H^k}^2+\norm{w_1(t)}_{H^k}^2+t^{2\mu}\Bigl(\norm{D\zetat(t)}_{H^k}^2+\norm{Dw_1(t)}_{H^k}^2+\norm{w_2(t)}_{H^{k+1}}^2+\norm{w_3(t)}_{H^{k+1}}^2\Bigr). \end{equation*} We further obtain from the decay estimate \eqref{decayA2} and \eqref{Pbbperpdef} the existence of functions $\zetat_*, w_1^* \in H^{k-1}(\Tbb^3)$ and $\wb_2^*,\wb_3^* \in H^{k}(\Tbb^3)$ such that \begin{equation} \label{Ecb-bnd} \bar{\Ec}(t) \lesssim t^{\mu-\sigma} \end{equation} for all $t\in (0,1]$ where \begin{equation*} \bar{\Ec}(t)=\norm{\zetat(t) - \zetat_*}_{H^{k-1}}+\norm{w_1(t) - w_1^*}_{H^{k-1}} +\norm{t^\mu w_2(t) - \wb_2^*}_{H^{k}}+\norm{t^\mu w_3(t) - \wb_3^*}_{H^{k}}. \end{equation*} We also note by \eqref{c-velocity}, \eqref{mod-den}, \eqref{v0def} and \eqref{zetatdef}-\eqref{cov2c} that $u$ and $W=(\zetat,w_J)^{\tr}$ determine a solution of the relativistic Euler equations \eqref{relEulA} on the spacetime region $M=(0,1]\times \Tbb^3$ via the formulas \eqref{relEulsol.1}-\eqref{relEulsol.5}. To complete the proof, we find from differentiating \eqref{relEulsol.1} that the density contrast can be expressed as \begin{equation} \label{den-constrast-B} \frac{\del{I}\rho}{\rho} = \frac{(1+K)(t^{2\mu}+ e^{2(u+w_1)})^{\frac{1+K}{2}}\del{I}\zetat -(1+K)(t^{2\mu}+ e^{2(u+w_1)})^{\frac{K-1}{2}}e^{2(u+w_1)}\del{I}w_1 }{(t^{2\mu}+ e^{2(u+w_1)})^{\frac{1+K}{2}}}. \end{equation} Since $\mu>0$, we can choose $\sigma>0$ small enough so that $\mu-\sigma >0$. Doing so then implies by \eqref{Ecb-bnd} that $\zetat$ and $w_1$ converge in $H^{k-1}(\Tbb^3)$ to $\zetat_*$ and $w_1^*$ as $t\searrow 0$. Since $u(t)$ converges as well by Proposition \ref{Homprop}, it is then not difficult to verify from \eqref{den-constrast-B} and the Sobolev and Moser inequalities \cite[Thms.~6.2.1 \& 6.4.1]{Rauch:2012} that \begin{equation*} \lim_{t\searrow 0} \Bigl\| \frac{\del{I}\rho}{\rho} - (1+K)\del{I}(\zetat_*-w_1^*) \Bigr\|_{H^{k-2}} = 0, \end{equation*} which completes the proof. \section{Numerical solutions\label{numsol}} \subsection{Numerical setup} In the numerical setup that we use to solve the system \eqref{eqn:dotzeta}-\eqref{eqn:dotw}, the computational domain is $[0,2\pi]$ with periodic boundary condition, the variables $\texttt{z}$ and $\texttt{w}$ are discretised in space using $2^{\text{nd}}$ order central finite differences, and time integration is performed using a standard $2^{\text{nd}}$ order Runge-Kutta method (\textit{Heun's Method}). As a consequence, our code is second order accurate\footnote{Strictly speaking one also needs to enforce the CFL condition to ensure convergence. In this case we have used the tightened 4/3 CFL condition for Heun's Method which is discussed in \cite{schneider:hal-01307287}.}. \subsubsection{Convergence tests} We have verified the second order accuracy of our code with convergence tests involving perturbations of both types of homogeneous solutions \eqref{Hom-A} and \eqref{Hom-C}. In our convergence tests, we have evolved the system \eqref{eqn:dotzeta}-\eqref{eqn:dotw} staring from the the two initial data sets \begin{align} \label{eqn:numericalID_A} (\ztt_{0},\wtt_{0}) &= (0,0.1\sin(x)) \intertext{and} \label{eqn:numericalID_B} (\ztt_{0},\wtt_{0}) &= (0,0.1\sin(x)+0.15) \end{align} using resolutions of $N =$ $200$, $400$, $800$, $1600$, $3200$, and $6400$ grid points. The initial data \eqref{eqn:numericalID_A} and \eqref{eqn:numericalID_B} satisfy the conditions \eqref{Hom-A-idata} and \eqref{Hom-B-idata}, respectively, and the solutions generated from this initial data represent perturbations of the homogeneous solutions \eqref{Hom-A} and \eqref{Hom-C}, respectively. To estimate the error, we took the base 2 log of the absolute value of the difference between each simulation and the highest resolution run. The results for are shown in Figures \ref{fig:W_conv_pos}, \ref{fig:Z_conv_pos}, \ref{fig:W_conv_cross} and, \ref{fig:Z_conv_cross} from which the second order convergence is clear. \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=0.799$]{ \includegraphics[width=0.3\textwidth]{Images/W_PosConv_t100} \label{fig:subfig1}} \subfigure[Subfigure 2 list of figures text][$t=0.599$]{ \includegraphics[width=0.3\textwidth]{Images/W_PosConv_t200} \label{fig:subfig2}} \subfigure[Subfigure 5 list of figures text][$t=0.028$]{ \includegraphics[width=0.3\textwidth]{Images/W_PosConv_t485} \label{fig:subfig5}} \caption{Convergence plots of \texttt{w} at various times. $K=0.5,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x)+0.15)$. } \label{fig:W_conv_pos} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=0.799$]{ \includegraphics[width=0.3\textwidth]{Images/Z_PosConv_t100} \label{fig:subfig1}} \subfigure[Subfigure 2 list of figures text][$t=0.599$]{ \includegraphics[width=0.3\textwidth]{Images/Z_PosConv_t200} \label{fig:subfig2}} \subfigure[Subfigure 5 list of figures text][$t=0.028$]{ \includegraphics[width=0.3\textwidth]{Images/Z_PosConv_t485} \label{fig:subfig5}} \caption{Convergence plots of $\ztt$ at various times. $K=0.5,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x)+0.15)$.} \label{fig:Z_conv_pos} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=0.799$]{ \includegraphics[width=0.3\textwidth]{Images/W_CrossConv_t100} \label{fig:subfig1}} \subfigure[Subfigure 4 list of figures text][$t=0.198$]{ \includegraphics[width=0.3\textwidth]{Images/W_CrossConv_t400} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][$t=0.028$]{ \includegraphics[width=0.3\textwidth]{Images/W_CrossConv_t485} \label{fig:subfig5}} \caption{Convergence plots of \texttt{w} at various times. $K=0.5,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$.} \label{fig:W_conv_cross} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=0.799$]{ \includegraphics[width=0.3\textwidth]{Images/Z_CrossConv_t100} \label{fig:subfig1}} \subfigure[Subfigure 4 list of figures text][$t=0.198$]{ \includegraphics[width=0.3\textwidth]{Images/Z_CrossConv_t400} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][$t=0.028$]{ \includegraphics[width=0.3\textwidth]{Images/Z_CrossConv_t485} \label{fig:subfig5}} \caption{Convergence plots of $\ztt$ at various times. $K=0.5,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$.} \label{fig:Z_conv_cross} \end{figure} \FloatBarrier \subsubsection{Code validation} A simple way to test the validity of our code is to verify that numerical solutions to \eqref{eqn:dotzeta}-\eqref{eqn:dotw} that are generated from initial data $(\ztt_0,\wtt_0)$ with $\wtt_0>0$ satisfy the decay rates of Proposition \ref{Homprop} \begin{align} \label{homdecay} |u(t)-u(0)| \lesssim t^{2\mu} \AND |u'\!(t)| \lesssim t^{2\mu-1}, \end{align} and Theorem \ref{mainthm} \begin{align} \label{nonhomdecay} \norm{\zetat(t) - \zetat_*}_{H^{k-1}}+\norm{w_1(t) - w_1^*}_{H^{k-1}}+\norm{t^\mu w_2(t) - \wb_2^*}_{H^{k}}+\norm{t^\mu w_3(t) - \wb_3^*}_{H^{k}} \lesssim t^{\mu-\sigma}, \;\; \sigma>0. \end{align} We first note that, by equating \eqref{cov2a} and \eqref{wttt-def} and recalling that $W = 0$ for homogeneous solutions, $u(t)$ can be expressed in terms of a homogeneous solution $\wtt_{H}(t)$ of \eqref{eqn:dotzeta}-\eqref{eqn:dotw} as $u(t) = \ln(\wtt_{H}(t))$. The decay rates for the homogeneous solution \eqref{homdecay} can then be re-written in terms of $\wtt_{H}$ as \begin{align} \label{num_homdecay1} |\ln(\wtt_{H}(t))-\ln(\wtt_{H}(0))| &\lesssim t^{2\mu}, \\ \label{num_homdecay2} \Bigl|\frac{\wtt^{\prime}_{H}(t)}{\wtt_{H}(t)}\Bigr| &\lesssim t^{2\mu-1}. \end{align} Similarly, for non-homogeneous solutions, we can express $w_{1}$ in terms of $\wtt$ by setting $w_{2}=w_{3}=0$ and equating \eqref{cov2a} and \eqref{wttt-def} to get $w_{1} = \ln(\wtt(t,x)) - \ln(\wtt_{H}(t))$. The decay rate \eqref{nonhomdecay}, in the $H^{1}$ norm, is then \begin{align} \label{num_nonhomdecay} \|\ztt(t,x) -\ztt(0,x)\|_{H^{1}}+\|[\ln(\wtt(t,x))-\ln(\wtt_{H}(t))]-[\ln(\wtt(0,x))-\ln(\wtt_{H}(0)]\|_{H^{1}} \lesssim t^{\mu-\sigma}. \end{align} We have estimated $\ztt|_{t=0},\wtt|_{t=0}, \wtt_{H}|_{t=0}$ by taking the values of the functions at a time-step close to $t=0$ and calculated $\wtt^{\prime}_{H}(t)$ using second order central finite differences. As shown in Figure \ref{fig:DecayRates}, the numerical solutions clearly replicate the above decay rates suggesting the code is correctly implemented. \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][Numerical test of \eqref{num_homdecay1}]{ \includegraphics[width=0.3\textwidth]{Images/HomogeneousDecay.png} \label{fig:subfig1}} \subfigure[Subfigure 4 list of figures text][Numerical test of \eqref{num_homdecay2}]{ \includegraphics[width=0.3\textwidth]{Images/DerivativeDecay.png} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][Numerical test of \eqref{num_nonhomdecay}]{ \includegraphics[width=0.3\textwidth]{Images/H1normdecay_final.png} \label{fig:subfig5}} \caption{Log-log decay plots of numerical solutions (Blue) against the corresponding bound (Orange) and the bound multiplied by a constant $c$ (Yellow). $K = 0.5, N = 1000$. Initial data for the homogeneous solution is $(\ztt(0,x),\wtt_{H}(0,x)) = (0,1)$. Initial data for the non-homogeneous solution is $(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x)+1)$.} \label{fig:DecayRates} \end{figure} \FloatBarrier \subsection{Numerical behaviour} Beyond the convergence tests, we have generated numerical solutions to the system \eqref{eqn:dotzeta}-\eqref{eqn:dotw} from a variety of initial data sets $(\ztt_0,\wtt_0)$ for which $\wtt_0$ satisfies the conditions \eqref{Hom-A-idata} and \eqref{Hom-B-idata}. We employed resolutions ranging from 1000 to 160,000 grid points in our simulations. For initial data satisfying \eqref{Hom-A-idata}, we chose functions $\wtt_0$ that cross the $x$-axis at least twice,\footnote{It is necessary to cross the $x$-axis at least twice to enforce the periodic boundary condition.} while for initial data satisfying \eqref{Hom-B-idata}, $\wtt_0$ does not cross the $x$-axis at all. All of the solutions in this article displayed in the figures are generated from initial data of the form \begin{align*} (\ztt_0,\wtt_{0}) = (0, a\sin(x+\theta)+c) \end{align*} for some particular choice of the constants $a,c, \theta \in \mathbb{R}$. From our numerical solutions, we observe, for the full parameter range $1/3<K<1$ and all choices of the initial data with $a$ sufficiently small, that $\ztt$ and $\wtt$ remain bounded and converge pointwise as $t\searrow 0$; see Figures \ref{fig:w_evo} and \ref{fig:z_evo}. \subsubsection{Derivative blow-up at $t=0$} While $\ztt$ and $\wtt$ remain bounded, our numerical simulations reveal that derivatives of the solutions of sufficiently high order blow-up at $t=0$ for the parameter values $1/3<K<1$ and initial data satisfying \eqref{Hom-A-idata}. In Table \ref{ell-table}, we list, for a selection of $K$ values, the corresponding minimum value of $\ell$ for which $\sup_{x\in \Tbb^1}\bigl(|\del{x}^{\ell} \ztt(t,x)|+|\del{x}^{\ell}\wtt(t,x)|\bigr) \nearrow \infty$ as $t\searrow 0$. From these values, it appears that $\ell$ is a monotonically decreasing function of $K$. \begin{table}[h] \begin{tabular}{|l|l|l|l|l|l|l|l|l|l|} \hline $K$ & $0.40$ & $0.45$ & $0.50$ & $0.55$ & $0.60$ & $0.65$ & $0.75$ & $0.85$ & $0.95$ \\ \hline $\ell$ & $4$ & $3$ & $2$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$ \\ \hline \end{tabular} \caption{Observed value of $\ell$ for various $K$}\label{ell-table} \end{table} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=1.0$]{ \includegraphics[width=0.3\textwidth]{Images/W_t0} \label{fig:subfig1}} \subfigure[Subfigure 3 list of figures text][$t=0.017$]{ \includegraphics[width=0.3\textwidth]{Images/W_t125} \label{fig:subfig3}} \subfigure[Subfigure 5 list of figures text][$t=0.0001$]{ \includegraphics[width=0.3\textwidth]{Images/W_t275} \label{fig:subfig5}} \caption{Plots of $\texttt{w}$ at various times. $K=0.6,\;\; N = 1000,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$} \label{fig:w_evo} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=1.0$]{ \includegraphics[width=0.3\textwidth]{Images/Zeta_t0} \label{fig:subfig1}} \subfigure[Subfigure 2 list of figures text][$t=0.088$]{ \includegraphics[width=0.3\textwidth]{Images/Zeta_t75} \label{fig:subfig2}} \subfigure[Subfigure 5 list of figures text][$t=0.0001$]{ \includegraphics[width=0.3\textwidth]{Images/Zeta_t275} \label{fig:subfig5}} \caption{Plots of $\ztt$ at various times. $K=0.6,\;\; N = 1000,(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$} \label{fig:z_evo} \end{figure} \FloatBarrier \subsubsection{Asymptotic behaviour and approximations\label{sec:asymp}} For the full range of parameter $1/3<K<1$ and all choices of initial data, we observe that our numerical solutions display ODE-like behaviour near $t=0$. In particular, these solutions can be approximated by solutions of the asymptotic system \eqref{zttt-asympt}-\eqref{wttt-asympt} at late times using the following procedure: \begin{enumerate}[(i)] \item Generate a numerical solution $(\ztt,\wtt)$ of \eqref{eqn:dotzeta}-\eqref{eqn:dotw} from initial data $(\ztt_{0},\wtt_{0})$ specified at time $t_{0}>0$. \item Fix a time $\tilde{t}_{0} \in (0,t_{0})$ when the numerical solution $(\ztt,\wtt)$ first appears to be dominated by ODE behaviour. \item Fix initial data for the asymptotic system \eqref{zttt-asympt}-\eqref{wttt-asympt} at $t=\tilde{t}_{0}$ by setting \begin{equation*} (\tilde{\ztt}{}_{0},\tilde{\wtt}{}_{0}) = (\ztt,\wtt)|_{t=\tilde{t}{}_0}. \end{equation*} \item Solve the asymptotic system \eqref{zttt-asympt}-\eqref{wttt-asympt} with initial data as chosen above in (iii) to obtain the asymptotic solution $(\tilde{\ztt}{},\tilde{\wtt}{})$ where \begin{equation}\label{zttt-sol} \tilde{\ztt}{} = \tilde{\ztt}{}_0, \end{equation} and $\tilde{\wtt}{}$ is defined implicitly by \begin{align}\label{wttt-sol} \frac{(3 K-\mu -1) \ln \left((3 K-1) t^{2 \mu }-(K-1) \mu \tilde{\wtt}{}^2\right)}{2 (3 K-1) \mu }-\frac{\ln (|\tilde{\wtt}{}|(1-3K)}{1-3 K}=c \end{align} and \begin{align*} c = \frac{(3 K-\mu -1) \ln \left((3 K-1) \tilde{t}_{0}^{2 \mu }-(K-1) \mu \tilde{\wtt}{}_{0}^2\right)}{2 (3 K-1) \mu }-\frac{\ln (|\tilde{\wtt}{}_{0}|(1-3K))}{1-3 K}. \end{align*} \item Compare the numerical solution $(\ztt,\wtt)$ to the asymptotic solution $(\tilde{\ztt}{},\tilde{\wtt}{})$ on the region $(0,\tilde{t}{}_0)\times \Tbb^1$. \end{enumerate} \bigskip Using this procedure, we find that numerical solutions $(\ztt,\wtt)$ of the system \eqref{zttt-asympt}-\eqref{wttt-asympt} can be \textit{remarkably well-approximated} by solutions $(\tilde{\ztt}{},\tilde{\wtt}{})$ of the asymptotic system. In particular, by setting $t=0$ in \eqref{wttt-sol} and noting that we can solve for $\tilde{\wtt}{}|_{t=0}$ to get \begin{equation}\label{wttf-def} \tilde{\wtt}{}_{f} := \tilde{\wtt}{}|_{t=0}=\frac{\text{sgn}(\tilde{\wtt}{}_{0})|\tilde{\wtt}{}_0|^{\frac{1}{1-K}}}{(\tilde{t}{}_{0}^{2\mu}+\tilde{\wtt}{}_{0}^{2})^{\frac{K}{2(1-K)}}} \end{equation} where $\text{sgn}(x)$ is the sign function, we have, with the help of \eqref{zttt-sol}, that \begin{equation} \label{asymp-sol-t=0} (\ztt,\wtt)|_{t=0} \approx (\tilde{\ztt}{}_0, \tilde{\wtt}{}_f). \end{equation} It is worth noting that this ODE-like asymptotic behaviour of solutions generated from initial data satisfying \eqref{Hom-B-idata} is expected by Theorem \ref{mainthm}. What is interesting is that this behaviour of solutions persists for initial data that violates \eqref{Hom-B-idata}. To illustrate how well solutions $(\ztt,\wtt)$ of \eqref{eqn:dotzeta}-\eqref{eqn:dotw} can be approximated by solutions $(\tilde{\ztt}{},\tilde{\wtt}{})$ of the asymptotic system \eqref{zttt-asympt}-\eqref{wttt-asympt} near $t=0$, we compare in Figure \ref{fig:HomogFull_Compare} the plot of $\tilde{\wtt}{}_f=\tilde{\wtt}{}|_{t=0}$, for a fixed choice of $\tilde{t}{}_0$ and $\tilde{\wtt}{}_0$ (see \eqref{wttf-def}), with that of $\wtt(t)$ at times close to zero. From the figure, it is clear that the agreement is almost perfect for times close enough to zero. \begin{figure}[h] \centering \subfigure[Subfigure 3 list of figures text][$t=0.039$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_Compare_t100} \label{fig:subfig3}} \subfigure[Subfigure 4 list of figures text][$t=0.024$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_Compare_t115} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][$t=0.007$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_Compare_t150} \label{fig:subfig5}} \caption{Comparison of numerical solution $\texttt{w}$ (Blue) and $\tilde{\wtt}{}_{f}$ (Orange). $K=0.6,\;\;N = 1000,\;\;(\ztt_{0},\wtt_{0}) = (0,0.1\cos(x))$, $(\tilde{t}_{0},\tilde{\wtt}{}_{0}) = (9.93\times 10^{-4},\wtt|_{t=9.93\times 10^{-4}})$.} \label{fig:HomogFull_Compare} \end{figure} \begin{comment} \begin{figure}[h] \centering \subfigure[Subfigure 3 list of figures text][$t=0.039$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_CompareID_t100.png} \label{fig:subfig3}} \subfigure[Subfigure 4 list of figures text][$t=0.024$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_CompareID_t115} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][$t=0.007$]{ \includegraphics[width=0.3\textwidth]{Images/HomogFull_CompareID_t150} \label{fig:subfig5}} \caption{Comparison of numerical solution $\texttt{w}$ (Blue) and $\tilde{\wtt}{}_{f}(\bar{\wtt},\bar{t})$ (Orange). $K=0.6,\;\;N = 1000,\;\;(\ztt_{0},\wtt_{0}) = (0,0.1\cos(x))$,.} \label{fig:HomogFull_CompareID} \end{figure} \end{comment} \FloatBarrier \subsubsection{Behaviour of the density contrast} By \eqref{mod-den}, \eqref{v0def}, \eqref{zetatdef} and \eqref{zttt-def}-\eqref{wttt-def}, the density can be written in terms of $\texttt{z}$ and $\texttt{w}$ as $\rho = (\texttt{w}^{2}+t^{2\mu})^{-\frac{K+1}{2}}\rho_{c}t^\frac{2(K+1)}{1-K}e^{(1+K)\texttt{z}}$ where $\rho_c \in (0,\infty)$. Differentiating this expression, we find after a short calculation that the density contrast is given by \begin{equation} \label{density-contrast-sym} \frac{\del{x}\rho}{\rho}= (1+K)\biggl(\del{x}\ztt - \frac{\wtt}{(t^{2\mu}+\wtt^2)}\del{x}\wtt\biggr). \end{equation} Using this formula to compute the density contrast for numerical solutions of \eqref{eqn:dotzeta}-\eqref{eqn:dotw}, we observe from our numerical solutions that density contrast displays markedly different behaviour depending on whether or not it is generated from initial data satisfying \eqref{eqn:numericalID_B}. For solutions generated from initial data satisfying \eqref{eqn:numericalID_B}, we find that the density contrast remains bounded and converges as $t\searrow 0$ to a fixed function, which is expected by Theorem \ref{mainthm}. An example of this behaviour is provided in Figure \ref{fig:Rho_x_pos}. On the other hand, the density contrast of solutions generated from initial data violating \eqref{eqn:numericalID_B} develop steep gradients and blows-up at $t=0$ at isolated spatial points; see Figure \ref{fig:Rho_x_cross} for an example of this behaviour. As in Section \ref{sec:asymp}, we can compare the density contrast of the full numerical solutions with the density contrast computed from a solutions of the asymptotic equation. We do this by evaluating \eqref{density-contrast-sym} at $t=0$ and using \eqref{asymp-sol-t=0} to approximate the density contrast at $t=0$ by \begin{equation*} \frac{\del{x}\rho}{\rho}\biggl|_{t=0}\approx (1+K)\biggl(\del{x}\tilde{\ztt}{}_0 - \frac{(\tilde{t}{}_0^{2\mu}+(1-K)\tilde{\wtt}{}_0^2)}{(1-K)(\tilde{t}{}_0^{2\mu}+\tilde{\wtt}{}_0^2)\tilde{\wtt}{}_0}\del{x}\tilde{\wtt}{}_0\biggr). \end{equation*} This formula identifies, at least heuristically, that the blow-up at $t=0$ in the density density contrast is due the vanishing of $\wtt$. Once again the agreement between the numerical and asymptotic plots is close enough that the two are practically indistinguishable as can be seen from Figure \ref{fig:Rho_x_asymptotic}. \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=1.0$]{ \includegraphics[width=0.3\textwidth]{Images/Rho_x_posID_0} \label{fig:subfig1}} \subfigure[Subfigure 3 list of figures text][$t=0.199$]{ \includegraphics[width=0.3\textwidth]{Images/Rho_x_posID_50} \label{fig:subfig3}} \subfigure[Subfigure 5 list of figures text][$t=6.14 \times 10^{-12}$]{ \includegraphics[width=0.3\textwidth]{Images/Rho_x_posID_800} \label{fig:subfig5}} \caption{Plots of density contrast, $\frac{\partial_{x}\rho}{\rho}$, at various times. K=0.6, N = 1000, $(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x)+0.15)$} \label{fig:Rho_x_pos} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=1.0$]{ \includegraphics[width=0.2\textwidth]{Images/Rho_x_crossID_0} \label{fig:subfig1}} \subfigure[Subfigure 4 list of figures text][$t=0.012$]{ \includegraphics[width=0.2\textwidth]{Images/Rho_x_crossID_135} \label{fig:subfig4}} \subfigure[Subfigure 5 list of figures text][$t=0.0015$]{ \includegraphics[width=0.2\textwidth]{Images/Rho_x_crossID_200} \label{fig:subfig5}} \subfigure[Subfigure 5 list of figures text][$t=3.13\times 10^{-4}$]{ \includegraphics[width=0.2\textwidth]{Images/Rho_x_crossID_250} \label{fig:subfig5}} \caption{Plots of density contrast, $\frac{\partial_{x}\rho}{\rho}$, at various times. K=0.6, N = 1000, $(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$.} \label{fig:Rho_x_cross} \end{figure} \begin{figure}[h] \centering \subfigure[Subfigure 1 list of figures text][$t=0.001$]{ \includegraphics[width=0.2\textwidth]{Images/Density_Compare_t200} \label{fig:subfig1}} \subfigure[Subfigure 2 list of figures text][$t=3.23\times 10^{-5}$]{ \includegraphics[width=0.2\textwidth]{Images/Density_Compare_t300} \label{fig:subfig2}} \subfigure[Subfigure 3 list of figures text][$t=1.02\times 10^{-6}$]{ \includegraphics[width=0.2\textwidth]{Images/Density_Compare_t400} \label{fig:subfig3}} \subfigure[Subfigure 4 list of figures text][$t=3.21\times 10^{-8}$]{ \includegraphics[width=0.2\textwidth]{Images/Density_Compare_t500} \label{fig:subfig4}} \caption{Plots of density contrast, $\frac{\partial_{x}\rho}{\rho}$, calculated from numerical results (Blue) and the asymptotic map (Green). K=0.45, N = 160000, $(\ztt_{0},\wtt_{0}) = (0,0.1\sin(x))$. Points near $\wtt_{0} = 0$ in the asymptotic map have been removed to emphasise agreement of the plots away from the singularities.} \label{fig:Rho_x_asymptotic} \end{figure} \FloatBarrier \subsection*{Acknowledgements} We thank Florian Beyer for helpful discussions and suggestions regarding the numerical aspects of this article. \bibliographystyle{amsplain}
1,116,691,501,317
arxiv
\section{Introduction} In his famous 4-th problem, Hilbert asked to characterize metric geometries whose geodesics are straight lines. He constructed a special class of examples, nowadays called \emph{Hilbert geometries} \cite{hilbert1895,hilbert1899}. These geometries have attracted a lot of interest, see for example the works of Y.~Nasu~\cite{nasu61}, P.~de~la~Harpe~\cite{delaHarpe93}, A.~Karlsson \& G.~Noskov~\cite{karlsson_guennadi02}, E.~Socie-Methou~\cite{sociemethou04}, T.~Foertsch \& A.~Karlsson~\cite{foertsch_karlsson05}, Y.~Benoist~\cite{benoist06}, B.~Colbois \& C.~Vernicos~\cite{colbois_vernicos07} and the two complementary surveys by Y.~Benoist~\cite{yvessurvey} and the last named author \cite{vernicos05}. A Hilbert geometry is a particularly simple metric space on the interior of a compact convex set $K$ (see definition below). This metric happens to be a complete Finsler metric whose set of geodesics contains the straight lines. Since the definition of the Hilbert geometry only uses cross-ratios, the Hilbert metric is a projective invariant. In the particular case where $K$ is an ellipsoid, the Hilbert geometry is isometric to the usual hyperbolic space. An important part of the above mentioned works, and of older ones, is to study how different or close to the hyperbolic geometry these geometries can be. For instance, if $K$ is not an ellipsoid, then the metric is never Riemannian, see D.C.~Kay~\cite[Corollary 1]{kay67}. This last result is actually related to the fact that among all finite dimensional normed vector spaces, many notions of curvatures are only satisfied by the Euclidean spaces (see also P.~Kelly \& L.~Paige~\cite{kp}, P.~Kelly \& E.~Strauss~\cite{ks,ks2}). However, if $\partial K$ is sufficiently smooth then the flag curvature, an analog of the sectional curvature, of the Hilbert metric is constant and equals $-1$ , see for example Z.~Shen~\cite[Example 9.2.2]{shen01}. Hence a question one can ask is whether or not these geometries behave like negatively curved Riemanniann manifold. The example of the triangle geometry which is isometric to a two dimensional normed vector space (see P.~De la~Harpe~ \cite{delaHarpe93}) shows that things are a little more involved (see also theorems cited below). The present work is partially inspired by the feeling that Hilbert geometries might be thought as geometries with Ricci curvature bounded from below, and focuses on the volume growth of balls. Unlike the Riemannian case, where there is only one natural choice of volume, there are several good choices of volume on a Finsler manifold. We postpone this issue to section \ref{sec_preliminaries} and fix just one {\it volume} (like the $n$-dimensional Hausdorff measure) for the moment. Let $B(o,r)$ be the metric ball of radius $r$ centered at $o$. The volume entropy of $K$ is defined by the following limit (provided it exists) \begin{equation}\label{defentropy} \ent K:=\lim_{r \to \infty} \frac{\log \vol B(o,r)}{r}. \end{equation} The entropy does not depend on the particular choice of the base point $o \in \inte K$ nor on the particular choice of the volume. If $h=\ent K$, then $\vol B(o,r)$ behaves roughly as $e^{hr}$. It is well-known and easy to prove (see, e.g., S.~Gallot, D.~Hulin \& J.~Lafontaine~\cite[Section III.H]{gallot_sylvestre_hulin}) that the volume of a ball of radius $r$ in the $n$-dimensional hyperbolic space is given, with $\omega_n$ the volume of the Euclidean unit ball of dimension $n$, by \begin{displaymath} n \omega_n \int_0^r (\sinh s)^{n-1} ds =O(e^{(n-1)r}). \end{displaymath} It follows that the entropy of an ellipsoid equals $n-1$. In general, it is not known whether the above limit exists. If the convex set $K$ is divisible, which means that a discrete subgroup of the group of isometries of the Hilbert geometry acts cocompactly, then the entropy is known to exist, see Y.~Benoist~\cite{benoist04}. If the convex set is sufficiently smooth, e.g., $C^2$ with positive curvature suffices, then the entropy exists and equals $n-1$ (see the theorem of B.~Colbois \& P.~Verovic below). In general, one may define lower and upper entropies $\underline{\ent}$, $\overline{\ent}$ by replacing the limit in the definition (\ref{defentropy}) by $\liminf$ or $\limsup$. There is a well-known conjecture (whose origin seems difficult to locate) saying that the hyperbolic space has maximal entropy among all Hilbert geometries of the same dimension. \begin{conjecture} For any $n$-dimensional Hilbert geometry, \begin{displaymath} \overline{\ent} K \leq n-1. \end{displaymath} \end{conjecture} Notice that such a result is a consequence of Bishop's volume comparison theorem for a complete Riemannian manifold of Ricci curvature bounded by $-(n-1)$ (see \cite[theorem 3.101, i)]{gallot_sylvestre_hulin}). Several particular cases of the conjecture were treated in the literature. The following one shows that the volume entropy does not characterize the hyperbolic geometry among all Hilbert geometries. \begin{theorem*}{(\bf B.~Colbois \& P.~Verovic \cite{colbois_verovic04})}\\ If $K$ is $C^2$-smooth with strictly positive curvature, then the Hilbert metric of $K$ is bi-Lipschitz to the hyperbolic metric and therefore \begin{displaymath} \ent K=n-1. \end{displaymath} \end{theorem*} \begin{theorem*}{(\bf B.~Colbois, C.~Vernicos \& P.~Verovic \cite{colbois_vernicos_verovic08})}\\ The Hilbert metric associated to a plane convex polygone is bi-Lipschitz to the Euclidean plane. In particular, its entropy is $0$. \end{theorem*} Instead of taking the volume of balls, another natural choice is to study the volume growth of the metric spheres $S(o,r)$. One may define a (spherical) entropy by \begin{equation}\label{defsentropy} \ent^s K:=\lim_{r \to \infty} \frac{\log \vol S(o,r)}{r}, \end{equation} provided the limit exists. In general, one may define upper and lower spherical entropies $\overline{\ent}^s K$ and $\underline{\ent}^s K$ by replacing the limits in the definition (\ref{defsentropy}) by a $\limsup$ or $\liminf$. The following theorem is a spherical version of the theorem of B.~Colbois \& P.~Verovic. \begin{theorem*} {\bf (A.A.~Borisenko \& E.A.~Olin \cite{borisenko_olin07})}\\ If $K$ is an $n$-dimensional convex body of class $C^3$ with positive Gauss curvature, then $\ent^s=n-1$. \end{theorem*} Our first main theorem weakens in a substantial way the assumptions in the theorem of B.~Colbois \& P.~Verovic and strengthens its conclusions for not only does it give the precise value of the entropy but also the \emph{entropy coefficient}. In order to state it, we introduce a projective invariant of convex bodies interesting in itself. Let $V$ be an $n$-dimensional vector space with origin $o$. Given a convex body $K$ containing $o$ in the interior, we define a positive function $a$ on the boundary by the condition that for $p \in \partial K$ we have $-a(p)p \in \partial K$. The letter $a$ stands for \emph{antipodal}. If $V$ is endowed with a Euclidean scalar product, we let $k(p)$ be the Gauss curvature and $n(p)$ be the outer normal vector at a boundary point $p$ (whenever they are well-defined, which is almost everywhere the case following A.D.~Alexandroff~\cite{alexandrov}). \begin{definition*} \label{def_cp_area} The \emph{centro-projective} area of $K$ is \begin{equation} \mathcal A_p(K):=\int_{\partial K}\frac{\sqrt{k}}{\langle n,p\rangle^{\frac{n-1}{2}}}\left(\frac{2a}{1+a}\right)^{\frac{n-1}{2}}\ dA. \end{equation} \end{definition*} It is not quite obvious (but true, as we shall see) that this definition does not depend on the choice of the scalar product. In fact, the centro-projective area is invariant under \emph{projective transformations} fixing the origin. The reader familiar with the theory of valuations may notice the similarity with the centro-affine surface area, whose definition is the same except that the second factor (containing the function $a$) does not appear. We refer to the books by Laugwitz \cite{laugwitz} and Leichtweiss \cite{leichtweiss_book} for more information on affine and centro-affine differential geometry. \begin{theorem*}[\bf First Main Theorem] If $K$ is $C^{1,1}$, then \begin{equation} \label{eq_entropy_coefficient_intro} \lim_{r \to \infty} \frac{\vol B(o,r)}{\sinh^{n-1}r}=\frac{1}{n-1} \mathcal A_p(K)\neq 0 \end{equation} and $\ent K=n-1$. Moreover, without any assumption on $K$, if $\mathcal A_p(K) \neq 0$ then $\underline{\ent} K \geq n-1$. \end{theorem*} In the two-dimensional case the $C^{1,1}$ assumption is not required, indeed, we are able to give an upper bound of the entropy depending on the Minkowski dimension of the set $\ex K$ of extremal points of $K$. Recall that an extremal point of a convex body $K$ is a point which can not be written as $\frac{a+b}{2}$ with $a,b \in K, a \neq b$. \begin{theorem*}[\bf Second Main Theorem] Let $K$ be a two-dimensional convex body. Let $d$ be the upper Minkowski dimension of the set of extremal points of $K$. Then the entropy of $K$ is bounded by \begin{equation} \overline{\ent K} \leq \frac{2}{3-d} \leq 1. \end{equation} Moreover, the equality in \eqref{eq_entropy_coefficient_intro} holds true (with $n=2$). \end{theorem*} The inequality is sharp if $K$ is smooth or contains some positively curved smooth part in the boundary. In this case the upper Minkowski dimension of $\ex K$ and the entropy both are $1$. On the other hand, for polygones the upper Min\-kow\-ski dimension of the set of extremal points and the entropy both vanish (see the theorem of B.~Colbois, C.~Vernicos \& P.~Verovic above), and the inequality is not sharp in this case. It should be noted that the entropy behaves in a rather subtle way (see also C.~Vernicos \cite{vernicos08} for a technical and complementary study, to this paper, of the entropy). As we have seen above, the entropy of a polygon vanishes. In contrast to this, we will construct a convex body with piecewise affine boundary whose entropy is strictly between $0$ and~$1$. Our next theorem, together with the previous ones, shows in particular that it suffices to assume $K$ to be merely of class $C^{1,1}$ in the theorem of A.A.~Borisenko \& E.A.~Olin. \begin{theorem*} For each convex body $K$, \begin{align*} \underline{\ent}^s K & =\underline{\ent} K,\\ \overline{\ent}^s K &=\overline{\ent} K. \end{align*} \end{theorem*} \subsection*{Plan of the paper} In the next section, we collect some well-known facts about convex bodies, Hilbert geometries and volumes on Finsler manifolds. A number of easy lemmas is proved which will be needed in the proof of our main theorem. Using some inequalities for volumes in normed spaces, we show that entropy and spherical entropy coincide for general convex bodies. In section \ref{sec_entropy_bounds}, we give the proofs of our main theorems. In the final section \ref{sec_centro_proj}, we give an intrinsic definition of the centro-projective surface area and study some of its properties. In particular, we show that it is upper semi-continuous with respect to Hausdorff topology. \subsection*{Acknowledgements} We wish to thank Bruno Colbois and Daniel Hug for interesting discussions and Franz Schuster for useful remarks on an earlier version of this paper. \section{Preliminaries on Convex bodies \\ and Hilbert Geometries} \label{sec_preliminaries} \subsection{Convex bodies} Let $V$ be a finite-dimensional real vector space. By \emph{convex body}, we mean a compact convex set $K\subset V$ with non-empty interior (note that this last condition is sometimes not required in the literature). Most of the time, the convex bodies will be assumed to contain the origin in their interiors. In such a case, we will call as usual \emph{Minkowski functional} the positive, homogeneous of degree one function whose level set at height 1 is the boundary $\partial K$. It is a convex function and by Alexandroff's theorem, it admits a quadratic approximation almost everywhere (see e.g. A.D.~Alexandroff~\cite{alexandrov} or L.C.~Evans \& R.F. Gariepy~\cite[p. 242]{MTFPF92}). In the following, boundary-points where Alexandroff's theorem applies will be called \emph{smooth}. Assuming the vector space to be equipped with an inner product, the principal curvatures of the boundary and its Gauss curvature $k$ are well defined at every smooth point. We will be concerned with generalizations and variations of \emph{Blaschke's rolling theorem}, a proof of which may be found in K.~Leichtwei\ss~\cite{leichtweiss93}. \begin{theorem}[\bf W. Blaschke, \cite{blaschke56}] Let $K$ be a convex body in $\mathbb R^n$ whose boundary is $C^2$ with everywhere positive Gaussian curvature. Then there exist two positive radii $R_1$ and $R_2$ such that for every boundary point $p$, there exists a ball of radius $R_1$ (resp. $R_2$) containing $p$ on its boundary and contained in $K$ (resp. containing $K$). \end{theorem} We first remark that for the ``inner part'' of Blaschke's result, the regularity of the boundary may be lowered. Recall that the boundary of a convex body is $C^{1,1}$ provided it is $C^1$ and the Gauss map is Lipschitz-continuous. Roughly speaking, the second condition says that the curvature of the boundary remains bounded, even if it is only almost everywhere defined. The following proposition then gives a geometrical characterization of such bodies, see L.~H\"ormander~\cite[proposition~2.4.3]{hor94} or V.~Bangert~\cite{bang99} and D.~Hug~\cite{hug99}. \begin{proposition}\label{c11} The boundary of a convex body $K$ is $C^{1,1}$ if and only if there exists some $R>0$ such that $K$ is the union of balls with radius $R$. \end{proposition} Without assumption on the boundary, there is still an integral version of Blaschke's rolling theorem. \begin{theorem}[\bf C. Sch\"utt \& E. Werner, \cite{schwer90}] \label{thm_schuett_werner} For a convex body $K$ containing the unit ball of a Euclidean space and $p \in \partial K$, let $R(p) \in [0,\infty)$ be the radius of the biggest ball contained in $K$ and containing $p$. Then for all $0<\alpha<1$ \begin{equation} \int_{\partial K} R^{-\alpha} d\mathcal{H}^{n-1} < \infty. \end{equation} \end{theorem} We will need the following refinement of this theorem. \begin{proposition} \label{proposition_blaschke_strong} In the same situation as in Theorem \ref{thm_schuett_werner}, for each Borel subset $B \subset \partial K$ we have \begin{multline} \int_B R^{-\alpha}d\mathcal{H}^{n-1} \leq\\ 2(n-1)^\alpha\left(\frac{2^\alpha}{1-2^{\alpha-1}}\right)^\alpha\left(\mathcal{H}^{n-1}(B)\right)^{1-\alpha} \left(\mathcal H^{n-1}(\partial K)\right)^\alpha. \end{multline} In particular for some constant $C$ depending on $K$ we have \begin{equation} \int_B R^{-\frac12}d\mathcal{H}^{n-1} \leq C \left(\mathcal{H}^{n-1}(B)\right)^{\frac12}\text{.} \end{equation} \end{proposition} \proof By (\cite{schwer90}, Lemma 4), we have for $0 \leq t \leq 1$ \begin{equation}\label{keyingredient} \mathcal{H}^{n-1}\bigl(\{p \in \partial K| R(p) \leq t\}\bigr)\leq (n-1)t\ \mathcal{H}^{n-1}(\partial K)\text{,} \end{equation} from which we deduce that, for each $0<\epsilon<1$ \begin{multline} \int_{\partial K \cap \{R<\epsilon\}} R^{-\alpha} d\mathcal H^{n-1} = \sum_{i=0}^\infty \int_{\partial K \cap \{\epsilon 2^{-i-1} \leq R<2^{-i} \epsilon\}} R^{-\alpha} d\mathcal H^{n-1}\\ \leq \sum_{i=0}^\infty (\epsilon 2^{-i-1})^{-\alpha}\ \mathcal H^{n-1} \bigl(\partial K \cap \{\epsilon 2^{-i-1} \leq R<2^{-i} \epsilon\}\bigr)\\ \leq \sum_{i=0}^\infty (\epsilon 2^{-i-1})^{-\alpha} (n-1)2^{-i}\epsilon\ \mathcal H^{n-1} (\partial K)\\ = \epsilon^{1-\alpha} (n-1) \frac{2^\alpha}{1-2^{\alpha-1}}\ \mathcal H^{n-1} (\partial K). \end{multline} It follows that \begin{align*} \int_B R^{-\alpha}d\mathcal{H}^{n-1} & = \int_{B \cap \{R<\epsilon\}} R^{-\alpha}d\mathcal{H}^{n-1}+\int_{B \cap \{R \geq \epsilon\}} R^{-\alpha}d\mathcal{H}^{n-1}\\ & \leq \epsilon^{1-\alpha} (n-1) \frac{2^\alpha}{1-2^{\alpha-1}}\ \mathcal H^{n-1} (\partial K)+ \epsilon^{-\alpha}\ \mathcal{H}^{n-1}(B). \end{align*} Choosing \begin{displaymath} \epsilon:=\frac{1-2^{\alpha-1}}{2^\alpha(n-1)}\ \frac{\mathcal H^{n-1} (B)}{\mathcal H^{n-1}(\partial K)} \end{displaymath} yields the inequality of the lemma. \endproof \subsection{Hilbert geometries} Given two distinct points $x, y \in \inte K$, the \emph{Hilbert distance} between $x$ and $y$ is defined by \begin{displaymath} d(x,y):=\frac12 \bigl|\log [a,b,x,y]\bigr|, \end{displaymath} where $a$ and $b$ are the intersections of the line passing through $x$ and $y$ with the boundary $\partial K$, and $[a,b,x,y]$ denotes the cross-ratio (with the convention of \cite{bridson_haefliger}). This distance is invariant under projective transformations. If $K$ is an ellipsoid, the Hilbert geometry on $\inte K$ is isometric to hyperbolic $n$-space. Unbounded closed convex sets with non-empty interiors and not containing a straight line are projectively equivalent to convex bodies. Therefore, the definition of the distance naturally extends to the interiors of such convex sets. In particular the convex sets bounded by parabolas are also isometric to the hyperbolic space. Let us assume the origin $o$ lies inside the interior of $K$. We will write $B(r)$ for the \emph{metric ball} of radius $r$ and centered at $o$. Its boundary, the \emph{metric sphere}, will be denoted by $S(r)$. Let $a\colon\partial K\longrightarrow\mathbb R_+$ be defined by the equation \begin{displaymath} -a(p)p\in\partial K, \end{displaymath} so the letter $a$ refers to the antipodal point. It is an easy exercise to check that metric spheres are parameterized by the boundary $\partial K$ as \begin{displaymath} S(r)=\bigl\{\phi(p,r)\colon p\in\partial K\bigr\}, \end{displaymath} where \begin{align}\label{param_spheres} \phi\colon \partial K\times\mathbb R_+ & \to \inte{K}\\ (p,r) & \mapsto a\frac{e^{2r}-1}{a e^{2r}+1}\ p.\nonumber \end{align} The Hilbert distance comes from a Finsler metric on the interior of $K$. Given $x \in \inte K$ and $v \in T_xV$, the Finsler norm of $v$ is given by \begin{equation} \label{eq_finsler_norm} \|v\|_x=\frac12 \left(\frac{1}{t_1}+\frac{1}{t_2}\right), \end{equation} where $t_1,t_2 >0 $ are such that $x\pm t_iv\in \partial K$. Again, we do not exclude that one of the $t_i$'s is infinite. Equivalently, if $F_x$ is the Minkowski functional of $K-x$, then \begin{displaymath} \Vert v\Vert_x=\frac{1}{2}\bigl(F_x(v)+F_x(-v)\bigr). \end{displaymath} The Finsler metric makes it possible to measure the length of a differentiable curve $c:I \to \inte K$ by \begin{displaymath} l(c):=\int_I \bigl\|c'(t)\bigr\|_{c(t)} dt. \end{displaymath} It is less trivial to measure the area (or volume) of higher dimensional subsets of $\inte K$. In fact, different notions of volume are being used. The most important ones are the Busemann definition (which equals the Hausdorff $n$-dimensional measure) and the Holmes-Thompson definition. In the following, only the axioms of a \emph{volume} as defined in \cite{alth04} will be used. We will make use of the following properties: \begin{itemize} \item $\vol$ is a Borel measure on $\inte K$ which is absolutely continuous with respect to Lebesgue measure. \item If $A \subset K \subset L$, where $K,L$ are compact convex sets, then the measure of $A$ with respect to $K$ is larger than the measure of $A$ with respect to $L$. \item If $K$ is an ellipsoid, then $\vol(A)$ is the hyperbolic volume of $A$. \end{itemize} The following projective invariants of convex bodies will be our main subjects of investigation. \begin{definition} The \emph{upper (resp. lower) volume entropy} of $K$ is \begin{align*} \overline{\ent}(K) & :=\limsup_{r\to\infty}\frac{\log\bigl(\vol B(r)\bigr)}{r}, \\ \underline{\ent}(K) & :=\liminf_{r\to\infty}\frac{\log\bigl(\vol B(r)\bigr)}{r}. \end{align*} If the upper and lower volume entropies of $K$ coincide, their common value is called volume entropy of $K$ and denoted by $\ent K$. \end{definition} Note that these invariants are independent of the choice of the center and of the choice of the volume definition. \subsection{Busemann's density} For simplicity, we restrict ourselves to Busemann's volume, although all results remain true for every other choice of volume. The reason is that the proofs of the crucial propositions \ref{proposition_parabola} and \ref{prop_pointwise_limit} below do not use any particular property of Busemann's volume, but only the axioms satisfied by every definition of volume. The density of Busemann's volume (with respect to some Lebesgue measure $\mathcal L$) is given by \begin{displaymath} \sigma(x)=\frac{\omega_n}{\mathcal L(B_x)}, \end{displaymath} where $B_x$ is the tangent unit ball of the Finsler metric at $x$ and $\omega_n$ is the (Euclidean) volume of the unit ball in $\mathbb R^n$. The volume of a Borel subset $A \subset \inte K$ is thus given by \begin{displaymath} \vol(A)=\int_A \sigma \ d\mathcal L. \end{displaymath} We now state and prove some propositions concerning upper bounds and asymptotic behaviors of Busemann's densities for points which are close to the boundary of particular convex sets. We will make use of an auxiliary inner product, calling $\mathcal L$ and $\mu$ the corresponding Lebesgue measure and volume $n$-form. Busemann densities are defined with this particular choice of measure. \begin{proposition} \label{proposition_truncation} Let $K,K'$ be closed convex sets not containing any straight line and $\sigma:\inte K \to \mathbb R$, $\sigma':\inte K' \to \mathbb R$ their corresponding Busemann densities. Let $p \in \partial K$, $E_0$ a support hyperplane of $K$ at $p$ and $E_1$ a hyperplane parallel to $E_0$ intersecting $K$. Suppose that $K$ and $K'$ have the same intersection with the strip between $E_0$ and $E_1$ (in particular $p \in \partial K'$). Then \begin{displaymath} \lim_{y \to p} \frac{\sigma(y)}{\sigma'(y)}=1. \end{displaymath} \end{proposition} \proof Let $d$ be the distance between $E_0$ and $E_1$ and $(y_i)$ a sequence of points of $\inte K$ converging to $p$. We may suppose that the distance $d_i$ between $y_i$ and $E_0$ is strictly less than $d$. For every fixed point $y_i$ and non-zero tangent vector $v \in T_{y_i}K$, let $t_1,t_2 \in \mathbb R_+ \cup \{\infty\}$ be such that $y_i \pm t_{1,2}v \in \partial K$; let $t_1',t_2'$ be the corresponding numbers for $K'$. Since at least one of $y_i+t_1v$ and $y_i-t_2v$ is inside the strip, say $y_i+t_1v$, we must have $t_1=t_1'$. Either $t_2=t_2'$ and $\|v\|_i=\|v\|_i'$, or $t_2 \neq t_2'$, in which case \begin{displaymath} \frac{t_1}{t_2}, \frac{t_1'}{t_2'}\leq \frac{d_i}{d-d_i}. \end{displaymath} Therefore, \begin{displaymath} \frac{d-d_i}{d}\leq\frac{\|v\|_i}{\|v\|_i'}=\frac{1+\frac{t_1}{t_2}}{1+\frac{t_1'}{t_2'}}\leq\frac{d}{d-d_i} \end{displaymath} which shows that, as functions on $\mathbb R P^{n-1}$, $\Vert\cdot\Vert_i/\Vert\cdot\Vert_i'$ uniformly converge to 1. Hence, for every $\epsilon$ and every $i$ large enough, \begin{displaymath} (1-\epsilon)B_{y_i}\subset B_{y_i}'\subset(1+\epsilon)B_{y_i}, \end{displaymath} which implies the convergence of $\sigma/\sigma'$ to 1. \endproof \begin{proposition} \label{proposition_parabola} Let $V=\mathbb R^n$ with its usual scalar product. Let $P$ be the convex set bounded by the parabola $y=\sum_{i=1}^{n-1} \frac{c_i}{2} x_i^2, c_1,\ldots,c_{n-1}>0$. Then \begin{equation} \sigma(0,\ldots,0,1-\lambda)=\frac{\sqrt{c}}{\bigl(2(1-\lambda)\bigr)^{\frac{n+1}{2}}}, \end{equation} where $c=\prod_{i=1}^{n-1} c_i$. \end{proposition} \proof By the invariance of the Hilbert metric under projective transformations, the tangent unit sphere at any point of $\inte P$ is an ellipse. At the point $(0,\ldots,0,1-\lambda)$, the symmetry implies that the principal axes of this ellipse are parallel to the coordinate axes. Hence \begin{displaymath} \sigma=\frac{1}{\prod_{i=1}^n l_i}, \end{displaymath} where the $l_i$'s, $i=1,\ldots,n$, are the Euclidean lengths of the principal half-axes. Now $l_i=\sqrt{\frac{2(1-\lambda)}{c_i}}, i=1,\ldots,n-1$ and $l_n=2(1-\lambda)$. \endproof \begin{proposition} \label{prop_pointwise_limit} Assume the origin $o$ is inside $\inte{K}$. For a smooth point $p$ of $\partial K$, let $n(p)$ be the outward normal vector and let $k(p)$ be the Gauss curvature of $\partial K$ at $p$. Then \begin{equation} \lim_{\lambda \to 1} \sigma(\lambda p) (1-\lambda)^\frac{n+1}{2}=\frac{\sqrt{k(p)}}{\Bigl(2\bigl\langle p,n(p)\bigr\rangle\Bigr)^\frac{n+1}{2}}. \end{equation} \end{proposition} \proof Let us choose a frame $(p;v_1,\ldots,v_{n-1},v_n)$ where $v_1,\ldots,v_{n-1} \in T_p \partial K$ are unit vectors tangent to the principal curvature directions of $\partial K$ at $p$ and $v_n=-p$. In these coordinates, the boundary of $K$ is locally the graph of a function: $y=\sum_{i=1}^{n-1} \frac{c_i}{2} x_i^2+R(|x|)$ with $R(|x|)=o(|x|^2)$ and $c_1,\ldots,c_{n-1} \geq 0$. We set \begin{displaymath} c:=\prod_{i=1}^{n-1} c_i. \end{displaymath} A small computation shows that \begin{displaymath} dx_1 \wedge \ldots \wedge dx_{n-1} \wedge dy=\frac{1}{m} \mu, \end{displaymath} where $\mu$ is the Euclidean $n$-form and $m:=\mu(v_1,\ldots,v_n)=\bigl\langle p,n(p)\bigr\rangle$. Also, the Gauss curvature at $p$ is given by \begin{displaymath} k(p)=cm^{n-1}. \end{displaymath} Let us fix $\epsilon>0$. Locally, the parabola defined by \begin{displaymath} y=\sum_{i=1}^{n-1} \frac{c_i+\epsilon}{2}x_i^2 \end{displaymath} lies inside $K$. Cutting it with some horizontal hyperplane, we obtain a convex body $K'$ inside $K$. In particular, the metric of $K'$ is greater than or equal to the metric of $K$, hence $\sigma'(\lambda p) \geq \sigma(\lambda p)$ for $\lambda$ near $1$. Then by propositions \ref{proposition_truncation} and \ref{proposition_parabola}, \begin{align} \label{eq_limsup} \nonumber \limsup_{\lambda \to 1} \sigma(\lambda p)(1-\lambda)^\frac{n+1}{2} & \leq \lim_{\lambda \to 1} \sigma'(\lambda p)(1-\lambda)^\frac{n+1}{2}\\ & = \frac{\sqrt{\prod_{i=1}^{n-1} (c_i+\epsilon)}}{2^\frac{n+1}{2}m}. \end{align} Note that $\sigma>0$, hence this already settles the case $k=c=0$ since $\epsilon$ was arbitrary small. If $c>0$ and $0<\epsilon<\min\{c_1,\ldots,c_{n-1}\}$, the parabola $P$ defined by \begin{displaymath} y=\sum_{i=1}^{n-1} \frac{c_i-\epsilon}{2}x_i^2 \end{displaymath} locally contains $K$. Cutting it with some horizontal hyperplane, we obtain a convex body $K'$ inside $P$. By propositions \ref{proposition_truncation} and \ref{proposition_parabola} again, \begin{align} \nonumber \liminf_{\lambda \to 1} \sigma(\lambda p)(1-\lambda)^\frac{n+1}{2} & \geq \liminf_{\lambda \to 1} \sigma'(\lambda p)(1-\lambda)^\frac{n+1}{2} \\ & = \frac{\sqrt{\prod_{i=1}^{n-1} (c_i-\epsilon)}}{2^\frac{n+1}{2}m}. \label{eq_liminf} \end{align} From \eqref{eq_limsup} and \eqref{eq_liminf} (with $\epsilon \to 0$) we get \begin{displaymath} \lim_{\lambda \to 1} \sigma(\lambda p)(1-\lambda)^\frac{n+1}{2} = \frac{\sqrt{c}}{2^\frac{n+1}{2}m}. \end{displaymath} \endproof To state precisely our main theorem in section \ref{sec_entropy_bounds} we need to introduce the pseudo-Gauss curvature of the boundary of a convex set $K$ in $\mathbb R^n$. For a smooth point $p\in \partial K$, let $n(p)$ be the outward normal of $\partial K$ at $p$. For each unit vector $e \in T_p \partial K$, let $H_e(p)$ be the affine plane containing $p$ and directed by the vectors $e$ and $n(p)$. We define $R_e$ as the radius of the biggest disc containing $p$ inside $K_e:=K \cap H_e(p)$. \begin{definition}\label{pseudogauss} The \textit{pseudo Gauss-curvature} $\bar k(p)$ of $\partial K$ at $p$ is the minimum of the numbers \begin{displaymath} \prod_{i=1}^{n-1} R_{e_i}(p)^{-1}, \end{displaymath} where $e_1,\ldots,e_{n-1}$ ranges over all orthonormal bases of $T_p \partial K$. \end{definition} \begin{proposition}\label{proposition_blaschke_trick} Let $V$ be a Euclidean vector space of dimension $n$. Let $K$ be a convex body containing the unit ball $B$. Then for $\frac12 \leq \lambda < 1$ and $p \in \partial K$ \begin{equation} \sigma(\lambda p) \leq \frac{\omega_n n!}{2^n(1-\lambda)^\frac{n+1}{2}} \bar k(p)^{1/2}\text{.} \end{equation} \end{proposition} \proof We use the same notation as in the definition of $\bar k$. We may suppose that for all $i$, $R_i:=R_{e_i}(p)>0$, otherwise the statement is trivial. By definition of $R_i$, there is a $2$-disc $B_i(p)$ of radius $R_i$ inside $K_{e_i}$ containing $p$. Let us denote by $B(e_i)$ the intersection of $B$ with the affine plane $p+H_{e_i}$. Since $B(e_i),B_i(p)\subset K$, one has \begin{displaymath} \hat C_i:=conv\left(B(e_i)\times\{0\}\cup B_i(p)\times\{1\}\right)\subset K_{e_i}\times[0,1]. \end{displaymath} Note that $\hat C_i$ is a truncated cone. Let $E_i$ be the plane containing the line that is parallel to $T_p\partial K_{e_i}$ and that passes through the points $o\times\{0\}$ and $p\times\{1\} $. With $\pi:V \times [0,1] \to V$ the projection on the first component, $C_i:=\pi(E_i\cap\hat C_i)\subset K$ is bounded by a truncated conic. In the non-orthogonal frame $(o;p,e_i)$, $C_i$ is given by \begin{displaymath} (2R_i-1)x^2+2(1-R_i)x+ y_1^2 \leq 1,\quad 0 \leq x\leq 1. \end{displaymath} Now let $C$ be the convex hull of the union of the $C_i$. Then the polytope $P$ with vertices \begin{displaymath} \left(\lambda, 0,\ldots,\pm \sqrt{(1-\lambda)(2\lambda R_i-\lambda+1)},0,\ldots,0\right), (1,\vec0), (2\lambda-1,\vec0) \end{displaymath} lies inside $C$, with all but the last vertex being on the boundaries of the respective $C_i$'s. Its volume is given by \begin{align} \nonumber \mathcal L(P) & =\frac{2^n \bigl\langle p,n(p)\bigr\rangle }{n!}(1-\lambda)^\frac{n+1}{2}\prod_{i=1}^{n-1}(2\lambda R_i-\lambda+1)^\frac{1}{2}\\ \nonumber & \geq \frac{2^n}{n!}(1-\lambda)^\frac{n+1}{2} (R_1\cdot R_2\cdots R_{n-1})^{\frac{1}{2}}\\ & = \frac{2^n}{n!}(1-\lambda)^\frac{n+1}{2} \bar k^{-\frac{1}{2}}(p). \end{align} The factor $\bigl\langle p,n(p)\bigr\rangle$ in the first line is due to the fact that our coordinate system is not orthonormal. Since the unit ball is contained in $K$, this factor is at least $1$. From $P \subset C \subset K$ and the fact that $P$ is centered at $\lambda p$, we deduce that \begin{displaymath} \sigma(\lambda p) \leq \frac{\omega_n}{\mathcal L(P)} \leq \frac{\omega_n n!}{2^n} (1-\lambda)^{-\frac{n+1}{2}} \bar k^{\frac{1}{2}}(p). \end{displaymath} \endproof The next proposition will be needed in the construction of a convex body with entropy between $0$ and $1$. \begin{proposition} \label{proposition_density_triangle} Let $K=oab$ be a triangle with $1 \leq oa,ob \leq 2$ and such that the distance from $o$ to the line passing through $a$ and $b$ is at least $1$. Let $p$ be a point in the interior of the side $ab$ and suppose that $\min\{ap,bp\}\geq \epsilon >0$. Then for $\lambda \geq \frac12$ Busemann's density of $K$ at $\lambda p$ is bounded above by \begin{displaymath} \sigma(\lambda p) \leq 32 \pi\max\left\{\frac{1}{\epsilon (1-\lambda)},\frac{1}{\epsilon^2}\right\}. \end{displaymath} \end{proposition} \proof The hypothesis on the triangle implies that $\sin(abo),\sin(bao) \geq \frac12$. Let $a'$ be the intersection of the line passing through $a$ and $z:=\lambda p$ with $ob$ and define $b'$ similarly. The unit tangent ball at $z$ is a hexagon centered at $z$. The length of one of its half-diagonals is the harmonic mean of $za$ and $za'$; the length of the second half-diagonal is the harmonic mean of $zb$ and $zb'$ and the third half-diagonal has length $\frac{2 op}{\frac{1}{\lambda}+\frac{1}{1-\lambda}} \geq 1-\lambda$. An easy geometric argument shows that $za',zb \geq \frac12 pb \sin(abo) \geq \frac14 \epsilon$ and $za,zb' \geq \frac12 pa \sin(bao) \geq \frac14 \epsilon$. The area $A$ of the hexagon is at least half of the minimal product of two of its half-diagonals, hence \begin{displaymath} A \geq \min\left\{\frac18 \epsilon (1-\lambda),\frac{1}{32}\epsilon^2\right\}. \end{displaymath} \endproof \subsection{Volume entropy of spheres} By definition, the entropy controls the volume growth of metric balls in Hilbert geometries. We show in this section that it coincides with the growth of areas of metric spheres. Again, there are several definitions of area of hypersurfaces in Finsler geometry. For simplicity, we consider Busemann's definition which gives the Hausdorff $(n-1)$-measure of these hypersurfaces. We will need the following two lemmas: \begin{lemma}[Rough monotonicity of area] There exist a monotone function $f$ and a constant $C_1>1$ such that for all $r>0$ \begin{equation} \label{eq_comparison_holmes_thompson} C_1^{-1} f(r) \leq Area(S(r)) \leq C_1 f(r). \end{equation} \end{lemma} \proof Let $f(r)$ be the Holmes-Thompson area of $S(r)$. Since all area definitions agree up to some universal constant, inequality \eqref{eq_comparison_holmes_thompson} is trivial. It remains to show that $f$ is monotone. If $\partial K$ is $C^2$ with everywhere positive Gaussian curvature then the tangent unit spheres of the Finsler metric are quadratically convex. According to \cite[theorem~1.1 and remark~2]{alfe98} there exists a Crofton formula for the Holmes-Thompson area, from which the monotonicity of $f$ easily follows. Such smooth convex bodies are dense in the set of all convex bodies for the Hausdorff topology (see e.g. \cite[lemma~2.3.2]{hor94}). By approximation, it follows that $f$ is monotone for arbitrary $K$. \endproof \begin{lemma}[Co-area inequalities] There exists a constant $C_2>1$ such that for all $r>0$ \begin{displaymath} C_2^{-1} Area(S(r)) \leq \frac{\partial}{\partial r}\vol(B(r)) \leq C_2 Area(S(r)). \end{displaymath} \end{lemma} \proof Let $\mu:=\sigma dx_1\land\dots\land dx_n$ be the volume form, and let $\alpha$ be the $n-1$-form on $S(r)$ whose integral equals the area. Since \begin{displaymath} \vol(B(r))=\int_0^r\int_{S(s)}i_{\partial_r}\mu\ ds, \end{displaymath} where $\partial_r$ at $\lambda p\in S(s)$ is the tangent vector multiple of $\vec{op}$ with unit Finsler norm, we have to compare $i_{\partial_r}\mu$ and $\alpha$. We will assume that $S(r)$ is differentiable at $\lambda p$. The section of the unit tangent ball by the tangent space $T_{\lambda p}S(r)$ will be called $\gamma$. By definition of Busemann area, the area of $\gamma$ measured with the form $\alpha$ is the constant \begin{displaymath} \alpha(\gamma)=\omega_{n-1}. \end{displaymath} In the same way, calling $\Gamma$ the half unit ball containing $\partial_r$ and bounded by $\gamma$, one has \begin{displaymath} \mu(\Gamma)=\frac{1}{2}\omega_n. \end{displaymath} Since $\Gamma$ is convex it contains the cone with base $\gamma$ and vertex $\partial_r$. Therefore, \begin{equation}\label{upper} \frac{1}{n}i_{\partial_r}\mu(\gamma)\leq\frac{1}{2}\omega_n. \end{equation} By Brunn's theorem (see e.g. \cite[theorem 2.3]{kold2005}), the sections of the tangent unit ball with hyperplanes parallel to $\gamma$ have an area lesser than or equal to the area of $\gamma$. Also the tangent unit ball has a supporting hyperplane at $\partial_r$ which is parallel to $\gamma$. Therefore, by Fubini's theorem, the cylinder $\gamma\times([0,1]\cdot\partial_r)$ has a volume greater than or equal to the volume of $\Gamma$ (even if it generally does not contain $\Gamma$). Hence, \begin{equation}\label{lower} \frac{1}{2}\omega_n\leq i_{\partial_r}\mu(\gamma). \end{equation} Inequalities ~\eqref{upper} and \eqref{lower} give \begin{displaymath} \frac{1}{2}\frac{\omega_n}{\omega_{n-1}}\alpha(\gamma)\leq i_{\partial_r}\mu(\gamma)\leq\frac{n}{2}\frac{\omega_n}{\omega_{n-1}}\alpha(\gamma), \end{displaymath} from which the result easily follows. \endproof \begin{theorem} The spherical entropy coincides with the entropy. More precisely, \begin{align*} \limsup_{r \to \infty} \frac{\log \ar(S(r))}{r} & =\overline{\ent}K,\\ \liminf_{r \to \infty} \frac{\log \ar(S(r))}{r} & =\underline{\ent}K. \end{align*} \end{theorem} \proof For convenience, let \begin{align*} V(r) & :=\vol B(r),\\ A(r) & :=\ar S(r). \end{align*} Using the previous two lemmas, one has for all $r>0$ \begin{multline*} V(r)=\int_0^r V'(s)ds \leq C_2 \int_0^r A(s)ds \leq C_1C_2\int_0^r f(s)ds\\ \leq C_1C_2 f(r)r \leq C_1^2C_2 A(r)r. \end{multline*} It follows that \begin{multline*} \overline{\ent}K=\limsup_{r \to \infty} \frac{\log V(r)}{r} \leq \limsup_{r \to \infty} \frac{\log C_1^2C_2A(r)r}{r}\\=\limsup_{r \to \infty} \frac{\log \ar(S(r))}{r}. \end{multline*} Similarly, for each $\epsilon>0$ \begin{multline*} V(r(1+\epsilon)) = \int_0^{r(1+\epsilon)} V'(s)ds \geq C_1^{-1}C_2^{-1}\int_0^{r(1+\epsilon)} f(s)ds\\ \geq C_1^{-1}C_2^{-1} \int_r^{r(1+\epsilon)} f(s)ds \geq C_1^{-1}C_2^{-1} f(r) r \epsilon \geq C_1^{-2}C_2^{-1} A(r) r \epsilon \end{multline*} and hence \begin{multline*} (1+\epsilon)\overline{\ent} K = (1+\epsilon) \limsup_{r \to \infty} \frac{\log V(r(1+\epsilon))}{r(1+\epsilon)} \geq \limsup_{r \to \infty} \frac{\log C_2^{-1}C_1^{-2} A(r)r\epsilon}{r} \\ = \limsup_{r \to \infty} \frac{\log \ar(S(r))}{r}. \end{multline*} Letting $\epsilon \to 0$ gives the first equality. The second one follows in a similar way. \endproof \section{Entropy bounds} \label{sec_entropy_bounds} \subsection{Upper entropy bound in arbitrary dimension} We may now state and prove our first main theorem. \begin{theorem} \label{thm_main_thm} Let $K$ be an $n$-dimensional convex body and $o \in \inte K$. For any point $p \in \partial K$ we denote by $\bar k(p)$ its pseudo-Gauss curvature as in definition \ref{pseudogauss}. If \begin{equation} \label{eq_blaschke_hypothesis} \int_{\partial K} {\bar k}^{\frac12}(p)dp<\infty, \end{equation} then \begin{equation} \label{eq_entropy_coefficient} \lim_{r \to \infty} \frac{\vol B(o,r)}{\sinh^{n-1}r}=\frac{1}{n-1} \mathcal A_p(K). \end{equation} In particular, \begin{displaymath} \overline{\ent} K \leq n-1, \end{displaymath} and if $\mathcal A_p(K) \neq 0$, then $\overline{\ent} K=n-1$. \end{theorem} \proof Using the parameterization~\eqref{param_spheres}, the volume of metric balls is given by \begin{displaymath} \vol(B(r))=\int_0^r\int_{\partial K}F(p,r)\ d\mathcal H^{n-1}, \end{displaymath} where \begin{displaymath} F(p,r):=\sigma \bigl(\phi(p,r)\bigr)\Jac \phi(p,r). \end{displaymath} The Jacobian may be explicitly computed: \begin{displaymath} \Jac \phi(p,r)=\frac{(e^{2r}-1)^{n-1} e^{2r}}{(ae^{2r}+1)^{n+1}} 2a^n (1+a) \bigl\langle p,n(p)\bigr\rangle. \end{displaymath} In particular, \begin{equation} \label{eq_limit_jac} \lim_{r \to \infty} e^{2r} \Jac \phi(p,r)=\frac{2(1+a)\bigl\langle p,n(p)\bigr\rangle}{a}. \end{equation} On the other hand, for each smooth boundary point $p$ we have, by proposition \ref{prop_pointwise_limit}, \begin{equation} \label{eq_limit_kappa} \lim_{r \to \infty} \frac{\sigma\bigl(\phi(p,r)\bigr)}{e^{(n+1)r}}=\frac{\sqrt{k(p)}}{\Bigl(2\bigl\langle p,n(p)\bigr\rangle\Bigr)^\frac{n+1}{2}} \frac{a^\frac{n+1}{2}}{(1+a)^\frac{n+1}{2}}. \end{equation} Then, by proposition \ref{proposition_blaschke_trick} and the hypothesis \eqref{eq_blaschke_hypothesis}, \begin{align}\label{eq_change_order} \lim_{r\to\infty}\frac{1}{e^{(n-1)r}}\int_{\partial K}F(p,r)d\mathcal H^{n-1} & =\int_{\partial K}\lim_{r\to\infty}\frac{F(p,r)}{e^{(n-1)r}}d\mathcal H^{n-1}\\ & =\int_{\partial K}\lim_{r\to\infty}\frac{\sigma \bigl(\phi(p,r)\bigr)}{e^{(n+1)r}} \lim_{r \to \infty} e^{2r} \Jac \phi(p,r) d\mathcal H^{n-1}\\ &= \int_{\partial K} \frac{\sqrt{k(p)}}{\Bigl(2\bigl\langle p,n(p)\bigr\rangle\Bigr)^\frac{n-1}{2}} \left(\frac{a}{1+a}\right)^\frac{n-1}{2} d\mathcal H^{n-1}\nonumber\\ &= \frac{1}{2^{n-1}}\mathcal A_p(K).\nonumber \end{align} By L'Hospital's rule we get \begin{displaymath} \lim_{r \to \infty} \frac{\vol\bigl(B(r)\bigr)}{e^{(n-1)r}}=\lim_{r\to\infty}\dfrac{\int_0^r\int_{\partial K}F(p,s)d\mathcal H^{n-1}ds}{(n-1)\int_0^r e^{(n-1)s}ds}=\dfrac{1}{2^{n-1}(n-1)}\mathcal A_p(K). \end{displaymath} \endproof {\bf Remark:} The metric balls $B(r)$ are projective invariants of $K$. There is an affine version of the previous theorem using the affine balls $B_a(r):=\tanh(r)K$ (where multiplication is with respect to the center $o$). Under the same assumptions as in theorem \ref{thm_main_thm}, we obtain that \begin{displaymath} \lim_{r \to \infty} \frac{\vol B_a(r)}{e^{(n-1)r}}=\frac{1}{2^{n-1}(n-1)} \mathcal A_a(K) \end{displaymath} where $\mathcal A_a(K)$ is the centro-affine area (see section~\ref{sec_centro_proj}). The proof goes as the previous one by replacing the function $a$ by $1$. \begin{corollary}\label{corollary1} Suppose $K$ is an $n$-dimensional convex body of class $C^{1,1}$. Then \begin{displaymath} \ent K=n-1. \end{displaymath} \end{corollary} \proof For any $p\in\partial K$, $R(p)$ is the biggest radius of a ball in $K$ containing $p$. By proposition~\ref{c11}, there exists a constant $R>0$ such that $R(p) \geq R$ for all $p \in \partial K$. It follows that the hypothesis \eqref{eq_blaschke_hypothesis} is satisfied and therefore $\ent K \leq n-1$. The Gauss map $\mathcal G\colon\partial K\rightarrow S^{n-1}$ is well-defined and continuous. As a consequence of theorem~2.3 in Hug~\cite{hugII} and equation~2.7 in Hug~\cite{hugI}, the standard measure on the unit sphere is the push-forward of $k\cdot d\mathcal H^{n-1}$, i.e. \begin{displaymath} \mathcal G_*(k\cdot d\mathcal H^{n-1}\mbox{}_{|\partial K})=d\mathcal H^{n-1}|_{S^{n-1}}\ , \end{displaymath} hence the curvature has a positive integral. Therefore, $\mathcal A_p(K)>0$, and equation~\eqref{eq_entropy_coefficient} implies that $\ent K=n-1$. \endproof \begin{corollary} If $K$ is an arbitrary $n$-dimensional convex body with $\mathcal A_p(K) \neq 0$, then $\underline{\ent}K \geq n-1$. \end{corollary} \proof Arguing as in the proof of theorem \ref{thm_main_thm} and using Fatou's lemma instead of the dominated convergence theorem gives the result. \endproof \subsection{The plane case} Let us now assume that $n=2$. By theorem \ref{thm_schuett_werner}, the hypothesis \eqref{eq_blaschke_hypothesis} is satisfied for each convex body $K$. Therefore \begin{equation} \label{eq_easy_bound_plane} \overline{\ent} K \leq 1 \end{equation} and \begin{displaymath} \lim_{r \to \infty} \frac{\vol B(o,r)}{\sinh r}=\mathcal A_p(K). \end{displaymath} Next, we are going to prove a better bound for $\overline{\ent} K$. In order to state our main result, we need to recall some basic notions of measure theory in a Euclidean space and refer to P.~Mattila~\cite{mat99} for details. For a non-empty bounded set $A$, let $N(A,\epsilon)$ be the minimal number of $\epsilon$-balls needed to cover $A$. Then the upper Minkowski dimension of $A$ is defined as \begin{displaymath} \overline{\dim} A:=\inf\left\{s:\limsup_{\epsilon \to 0} N(A,\epsilon)\epsilon^s=0\right\}. \end{displaymath} One should note that this dimension is invariant under bi-Lipschitz maps. In particular, it does not depend on a particular choice of inner product and moreover it is invariant under projective maps provided the considered subsets are bounded. Recall that a point $p\in K$ is called \emph{extremal} if it is not a convex combination of other points of $K$. The set of extremal points is a subset of $\partial K$, which we denote by $\ex K$. \begin{theorem} \label{thm_minkowski_bound} Let $K$ be a plane convex body and $d$ be the upper Minkowski dimension of $\ex K$. Then the entropy of $K$ is bounded by \begin{displaymath} \overline{\ent K} \leq \frac{2}{3-d} \leq 1. \end{displaymath} \end{theorem} \proof Since the entropy is independent of the choice of the center, we may suppose that the Euclidean unit ball around $o$ is the maximum volume ellipsoid inside $K$. Then $K$ is contained in the ball of radius $2$ (see \cite{barv02}). Set $\epsilon:=e^{-\alpha r}$, where $\alpha \leq 1$ will be fixed later. Divide the boundary of $K$ into two parts: \begin{displaymath} \partial K= \mathcal B \cup \mathcal G, \end{displaymath} where $\mathcal B$ (the bad part) is the closed $\epsilon$-neighborhood around the set of extremal points of $K$ and $\mathcal G$ (the good part) is its complement. Using proposition~\ref{proposition_blaschke_strong} and equalities \eqref{eq_limit_jac}, \eqref{eq_limit_kappa}, we get the following upper bound for large values of $r$, \begin{equation} \int_{\frac{r}{2}}^r \int_{\mathcal B} \sigma \bigl(\phi(p,s)\bigr) \Jac \phi(p,s) d\mathcal H^1 ds \leq O\left(e^r \sqrt{\mathcal H^1(\mathcal B)}\right). \label{eq_bad_part} \end{equation} Next, let $p \in \mathcal G$. The endpoints of the maximal segment in $\partial K$ containing $p$ are extremal points of $K$ and hence of distance at least $\epsilon$ from $p$. Therefore $K$ contains a triangle as in proposition~\ref{proposition_density_triangle} and if $s \geq r/2$ and $r$ is sufficiently large \begin{displaymath} \sigma(\phi(p,s))=\sigma(\lambda \cdot p) \leq 32 \max\left\{\frac{1}{\epsilon (1-\lambda)},\frac{1}{\epsilon^2}\right\} =\frac{32}{\epsilon (1-\lambda)}. \end{displaymath} Integrating this from $r/2$ to $r$ yields \begin{equation} \int_\frac{r}{2}^r \int_{\mathcal G} \sigma \bigl(\phi(p,s)\bigr)\Jac \phi(p,s) d\mathcal H^1 ds = O\left(e^{\alpha r}\right). \label{eq_good_part} \end{equation} Let $d$ be the upper Minkowski dimension of the set of extremal points of $K$. Then, for each $\eta>0$, $N(\ex K,\epsilon)=o(\epsilon^{-d-\eta})$ as $\epsilon \to 0$. By definition of $N$, there is a covering of $\ex K$ by $N(\ex K,\epsilon)$ balls of radius $\epsilon$. Hence there is a covering of $\mathcal B$ by $N(\ex K,\epsilon)$ balls of radius $2\epsilon$. The intersection of a $2\epsilon$-ball with $\partial K$ has length less than $4\pi \epsilon$. It follows that \begin{displaymath} \mathcal{H}^1(\mathcal B) = o(\epsilon^{-d-\eta+1}). \end{displaymath} Since the volume of $B(r/2)$ is bounded by $O(e^{r/2})$ (see \eqref{eq_easy_bound_plane}), the volume of $B(r)$ is bounded by \begin{align*} \vol B(r) & =\vol B(r/2)+\int_{\frac{r}{2}}^r \int_{\mathcal B} \sigma \bigl(\phi(p,s)\bigr) \Jac \phi(p,s) d\mathcal H^1 ds \\ & \quad +\int_\frac{r}{2}^r \int_{\mathcal G} \sigma \bigl(\phi(p,s)\bigr)\Jac \phi(p,s) d\mathcal H^1 ds \\ & = O(e^\frac{r}{2}) + O\bigl(e^{r (1-\frac{\alpha (1-d-\eta)}{2})}\bigr) + O\left(e^{\alpha r}\right). \end{align*} We fix $\alpha$ such that $1-\alpha \frac{1-d-\eta}{2}=\alpha$, i.e. $\alpha:=\frac{2}{3-d-\eta} >\frac23$. Then \begin{displaymath} \vol B(r)=O(e^{\alpha r}), \end{displaymath} which implies that the (upper) entropy of $K$ is bounded by $\alpha$. Since $\eta>0$ was arbitrary, the result follows. \endproof \subsection{An example of non-integer entropy} We will construct an example of a plane convex body with piecewise affine boundary whose entropy is strictly between $0$ and $1$. Let us choose a real number $s>2$ and set $\alpha_i:=\frac{C_s}{i^s}$ where $C_s>0$ is sufficiently small such that \begin{equation*} \label{eq_sum_angles} 3\sum_{i=1}^\infty \alpha_i < \pi. \end{equation*} Consider a centrally symmetric sequence $E$ of points on $S^1$ such that the angles between consecutive points are $\alpha_1,\alpha_1,\alpha_1,\alpha_2,\alpha_2,\alpha_2,\ldots$ (each angle appearing three times). \begin{theorem} The entropy of $K=conv(E)$ is bounded by \begin{displaymath} 0<\frac{1}{s} \leq \underline{\ent} K \leq \overline{\ent} K \leq \frac{2s-2}{3s-4}<1. \end{displaymath} \end{theorem} \proof {\bf Lower bound}\\ The unit sphere of radius $r$ in the Hilbert geometry $K$ is $\tanh r K$ and consists of an infinite number of segments. An easy geometric computation shows that the middle segment $S_i(r)$ corresponding to $\alpha:=\alpha_i$ has for each $r \geq 0$ length bounded from below by \begin{displaymath} l\bigl(S_i(r)\bigr) \geq \log\left(\frac{\tanh r}{1-\tanh r} \frac{2\sin \alpha/2 \sin(2\alpha)}{\cos \alpha/2} +1\right). \end{displaymath} Set \begin{displaymath} i_0(r):=\left\lfloor (2C_s)^{\frac{1}{s}} e^\frac{r}{s} \right\rfloor. \end{displaymath} Then, for sufficiently large $r$, \begin{displaymath} \frac{\tanh r}{1-\tanh r} \frac{2\sin \alpha_i/2 \sin(2\alpha_i)}{\cos \alpha_i/2} \leq 1 \quad \forall i \geq i_0(r). \end{displaymath} By concavity of the $\log$-function, we have $\log(1+x) \geq x \log 2 \geq \frac{x}{2}$ for $0 \leq x \leq 1$. Therefore \begin{displaymath} l\bigl(S(r)\bigr) \geq \frac12 \sum_{i=i_0}^\infty \frac{\tanh r}{1-\tanh r} \frac{2\sin \alpha_i/2 \sin(2\alpha_i)}{\cos \alpha_i/2}. \end{displaymath} For sufficiently large $r$, the first factor is bounded from below by $\frac{e^{2r}}{4}$, while the second is bounded from below by $\alpha_i^2$. We thus get \begin{displaymath} l\bigl(S(r)\bigr) \geq \frac{e^{2r}}{8} \sum_{i=i_0}^\infty \alpha_i^2=C_s^2 \frac{e^{2r}}{8} \sum_{i=i_0}^\infty \frac{1}{i^{2s}} \geq C_s^2 \frac{e^{2r}}{8} \int_{i_0}^\infty \frac{1}{x^{2s}}dx= C_s^2 \frac{e^{2r}}{8(2s-1)i_0^{2s-1}}. \end{displaymath} Replacing our explicit value for $i_0$ gives \begin{displaymath} l(S(r)) \geq C e^\frac{r}{s} \end{displaymath} for sufficiently large $r$ and some constant $C$ (again depending on $s$). Hence $\underline{\ent} K \geq \frac{1}{s}$. {\bf Upper bound}\\ For the upper bound in the statement, we apply our main theorem. For this, we have to find an upper bound on the Minkowski dimension of $\ex K=E$. Since the Minkowski dimension is invariant under bi-Lipschitz maps, we may replace distances on the unit circle by angular distances. $E$ has two accumulation points $\pm x_0$. For $\epsilon>0$, let $N(\epsilon)$ be the number of $\epsilon$-balls needed to cover $E$. We take one such ball around $\pm x_0$ and one further ball for each point in $E$ not covered by these two balls. The three points corresponding to the angle $\alpha_i$ are certainly in the $\epsilon$-neighborhood of $\pm x_0$ provided \begin{displaymath} 3 \sum_{j=i}^\infty \alpha_j \leq \epsilon. \end{displaymath} Now we compute that \begin{displaymath} \sum_{j=i}^\infty \alpha_j =C_s \sum_{j=i}^\infty \frac{1}{j^s} \leq C_s \int_{i-1}^\infty \frac{1}{x^s}dx=\frac{C_s}{s-1} \frac{1}{(i-1)^{s-1}}. \end{displaymath} It follows that all $i \geq i_0:= \left(\frac{3C_s}{s-1}\right)^\frac{1}{s-1} \epsilon^{\frac{1}{1-s}}+1$ satisfy the inequality above and hence \begin{displaymath} N(\ex K,\epsilon) \leq 6i_0+2 \leq C \epsilon^{-\frac{1}{s-1}}. \end{displaymath} It follows that the upper Minkowski dimension is not larger than $\frac{1}{s-1}$. The upper bound of theorem \ref{thm_minkowski_bound} gives \begin{displaymath} \overline{\ent} K \leq \frac{2s-2}{3s-4}. \end{displaymath} \endproof \section{Centro-projective and centro-affine areas} \label{sec_centro_proj} In this section, we will take a closer look at the centro-projective area which was introduced (in a non-intrinsic way) in definition \ref{def_cp_area}. \subsection{Basic definitions and properties} Geometrically speaking, both centro-affine and centro-projective areas are Riemannian volumes of the boundary $\partial K$. We first give intrinsic definitions of the centro-affine metric and area. Let $K$ be a convex body with a distinguished interior point which we may suppose to be the origin $o$ of $V$. The Minkowski functional of $K$ is the unique positive function $F$ that is homogeneous of degree one and whose level set at height $1$ is the boundary $\partial K$. This function is convex and, according to Alexandroff's theorem, has almost everywhere a quadratic approximation. \begin{definition} Let $v$ be a tangent vector to $\partial K$ at a smooth point $p$. Then the \emph{centro-affine semi-norm} of $v$ is \begin{displaymath} \Vert v\Vert_a:=\sqrt{Hess_pF(v,v)}. \end{displaymath} \end{definition} The square of the centro-affine semi-norm is a quadratic function on the tangent, hence we may define as usual a volume form, say $\omega_a$ (which vanishes if $\|\cdot\|_a$ is not definite). \begin{definition} The \emph{centro-affine area} of $K$ is \begin{displaymath} \mathcal A_a(K):=\int_{\partial K}|\omega_a|. \end{displaymath} \end{definition} It easily follows from the definitions that the centro-affine area is indeed an affine invariant of pointed convex bodies. Moreover, it is finite and vanishes on polytopes. The next proposition relates our definitions with the classical ones, its proof is a straightforward computation. \begin{proposition} If the space is equipped with a Euclidean inner pro\-duct, then the centro-affine area is given by \begin{displaymath} \mathcal A_a(K)=\int_{\partial K}\frac{\sqrt{k}}{\langle n,p\rangle^{\frac{n-1}{2}}}\ dA, \end{displaymath} where $k$ is the Gaussian curvature of $\partial K$ at $p$, $n$ the unit vector normal to $T_p\partial K$ and $dA$ the Euclidean area. \end{proposition} In order to introduce the centro-projective area, we will consider a compact convex subset of the (real) $n$-dimensional projective space. Here the word ``convex'' means that each intersection with a projective line is connected. The definitions of the centro-projective semi-norm and area are merely the same as the centro-affine ones, but one has to replace the Minkowski functional by a projectively invariant function. \begin{definition} Let $K\subset \mathbb P^n$ be a convex body and $o \in \inte K$. The \emph{projective gauge function} is \begin{align*} G_K\colon \mathbb P^n \setminus \{o\} & \to \mathbb R \cup \{\infty\},\\ x & \mapsto2[q_1,o,x,q_2] \end{align*} where $q_1$ and $q_2$ are the two intersections of $\partial K$ with the line going through $o$ and $x$. \end{definition} Since the order of $q_1$ and $q_2$ is not fixed, this function is multi-valued (in fact $2$-valued). Identifying $\mathbb{R} \cup \{\infty\}$ with $\mathbb{P}^1$, this function is continuous. If $p$ belongs to the boundary of $K$, then the two values of $G_K(p)$ are different, one of them being $2$, the other being $\infty$. Hence there is some neighborhood $U$ of $p$ such that the restriction of $G_K$ to $U$ is the union of two continuous (in fact smooth) functions $G_K^+, G_K^-$ on $U$, where $G_K^+(p)=2$ and $G_K^-(p)=\infty$. Let $v$ be a tangent vector to $\partial K$ at a smooth point $p$. Since the restriction of $G_K^+$ to $\partial K \cap U$ is constant, the derivative of $G_K^+$ in the direction of $v$ vanishes. Therefore, the Hessian of the restriction of $G_K^+$ to the tangent line is well-defined. \begin{definition} The \emph{centro-projective semi-norm} of $v$ is \begin{displaymath} \Vert v\Vert_p:=\sqrt{Hess_pG_K^+(v,v)}. \end{displaymath} Calling $\omega_p$ the induced volume form on $\partial K$, the \emph{centro-projective area} of $K$ is \begin{displaymath} \mathcal A_p(K):=\int_{\partial K}|\omega_p|. \end{displaymath} \end{definition} As a consequence of the definition, one has \begin{proposition}\label{euklid} In a Euclidean space, \begin{equation*} \label{eq_centro_proj_eucl} \mathcal A_p(K)=\int_{\partial K}\frac{\sqrt{k}}{\langle n,p\rangle^{\frac{n-1}{2}}}\left(\frac{2a}{1+a}\right)^{\frac{n-1}{2}}\ dA. \end{equation*} In particular, the intrinsic definition of $\mathcal A_p$ agrees with the definition given in the introduction. \end{proposition} \proof An easy computation shows that \begin{displaymath} [q_1,o,x,q_2]=\frac{1+a(q_2)}{F(x)+a(q_2)}F(x). \end{displaymath} Then, if $p$ is a smooth point of $\partial K$ and $v\in T_p\partial K$, \begin{displaymath} Hess_pG_K(v,v)=\frac{2a(p)}{1+a(p)}Hess_pF(v,v). \end{displaymath} \endproof \subsection{Properties of the centro-projective area} Both centro-affine and centro-projective areas vanish on polytopes, hence they are not continuous with respect to the Hausdorff topology on (pointed) bounded convex bodies. Nevertheless, the centro-affine area is upper-semi continuous (see \cite{lut96}). The same holds true for the centro-projective area as shown in the next theorem. \begin{theorem} \label{thm_properties_ap} The centro-projective area is finite, invariant under projective transformations and upper-semicontinuous. \end{theorem} \proof From the above intrinsic definition, it follows that $\mathcal A_p$ is invariant under projective transformations. Also, since the function $a$ on the boundary is bounded and positive and since the centro-affine area is finite, it follows from proposition~\ref{euklid} that the centro-projective area is also finite. It remains to show that it is upper-semicontinuous. Our proof is based on the fact that the centro-affine surface area $\mathcal{A}_a$ is semicontinuous, see E.~Lutwak~\cite{lut96}. Let $K$ be a bounded convex body containing the origin in its interior and $(K_i)$ a sequence of convex bodies with the same properties converging to $K$. Set \begin{displaymath} \tau(p):=\left(\frac{2a(p)}{1+a(p)}\right)^{\frac{n-1}{2}}, \quad p \in \partial K \end{displaymath} which is a continuous function on $\partial K$. For each $i$, if $a_i$ is the function corresponding to $K_i$ and $p_i$ is the radial projection of $p$ on $\partial K_i$, define $\tau_i \in C(\partial K)$ by \begin{displaymath} \tau_i(p):=\left(\frac{2a_i(p_i)}{1+a_i(p_i)}\right)^{\frac{n-1}{2}}\text{.} \end{displaymath} Since $K_i \to K$, $\tau_i$ converges uniformly to $\tau$. Therefore, for fixed $\epsilon>0$ and all sufficiently large $i$, \begin{displaymath} \|\tau_i-\tau\|_\infty <\epsilon \end{displaymath} Take a triangulation of the sphere and let $\partial K=\cup_{j=1}^m \Delta_j$ (resp. $\partial K_i=\cup_{j=1}^m \Delta_{ij}$) be its radial projection. Choosing this triangulation sufficiently thin, there exist $t_1,\ldots,t_m \in \mathbb R_+$ with \begin{displaymath} |\tau(p)-t_j|<\epsilon \end{displaymath} on $\Delta_j$. By the triangle inequality, $|\tau_i(p)-t_j|<2\epsilon$ on $\Delta_{ij}$. We define \begin{displaymath} \mathcal{A}_p(K_i,\Delta_{ij}):=\int_{\Delta_{ij}} \frac{\sqrt{k(x)}}{\bigl\langle n(x),x\bigr\rangle^{\frac{n-1}{2}}} \tau_i d\mathcal{H}^{n-1}(x). \end{displaymath} Clearly, $\mathcal{A}_p(K_i)=\sum_{j=1}^m \mathcal{A}_p(K_i,\Delta_{ij})$. In a similar way, we define $\mathcal{A}_p(K,\Delta_{j})$, $\mathcal{A}_a(K_i,\Delta_{ij})$ and $\mathcal{A}_a(K,\Delta_{j})$. Fix $p_j$ in the interior of $\Delta_j$ and consider the convex hull $\widehat{\Delta_{i}}$ (resp. $\widehat{\Delta_{ij}}$) of $\Delta_j$ (resp. $\Delta_{ij}$) and $-p_j$. The boundary of $\widehat{\Delta_{ij}}$ is a union of $\Delta_{ij}$ and line segments, hence $\mathcal{A}_a(K_i,\Delta_{ij})= \mathcal{A}_a(\widehat{\Delta_{ij}})$. By the semicontinuity of $\mathcal{A}_a$, we obtain \begin{displaymath} \limsup_{i \to \infty} \mathcal{A}_a(K_i,\Delta_{ij})= \limsup_{i \to \infty} \mathcal{A}_a(\widehat{\Delta_{ij}})\leq \mathcal{A}_a(\widehat{\Delta_j}) = \mathcal{A}_a(K,\Delta_j). \end{displaymath} It follows that \begin{align*} \limsup_{i \to \infty} \mathcal{A}_p(K_i) & =\limsup_{i \to \infty} \sum_{j=1}^m \mathcal{A}_p(K_i,\Delta_{ij}) \\ & \leq \limsup_{i \to \infty} \sum_{j=1}^m \mathcal{A}_a(K_i,\Delta_{ij})(t_j+2\epsilon) \\ & \leq \sum_{j=1}^m \mathcal{A}_a(K,\Delta_{j})(t_j+2\epsilon) \end{align*} On the other hand, \begin{displaymath} \mathcal{A}_p(K) = \sum_{j=1}^m \mathcal{A}_p(K,\Delta_j) \geq \sum_{j=1}^m \mathcal{A}_a(K,\Delta_j)(t_j-\epsilon) \end{displaymath} from which we deduce that \begin{displaymath} \limsup_{i \to \infty} \mathcal{A}_p(K_i) \leq \mathcal{A}_p(K)+3\epsilon \mathcal{A}_a(K). \end{displaymath} \endproof The centro-affine surface area has the following important properties: \begin{enumerate} \item $\mathcal A_a$ is a valuation on the space of compact convex subsets of $V$ containing $o$ in the interior. This means that whenever $K,L, K \cup L$ are such bodies, then \begin{displaymath} \mathcal A_a(K \cup L)=\mathcal A_a(K)+\mathcal A_a(L)-\mathcal A_a(K \cap L). \end{displaymath} \item $\mathcal A_a$ is upper semi-continuous with respect to the Hausdorff topology. \item $\mathcal A_a$ is invariant under $GL(V)$. \end{enumerate} A recent theorem by M.~Ludwig \& M.~Reitzner \cite{lurei08} states that the vector space of functionals with these three properties is generated by the constant valuation and $\mathcal A_a$. The centro-projective surface area satisfies the last two conditions, but is not a valuation.
1,116,691,501,318
arxiv
\section{Introduction} Dark matter is an essential ingredient in a good fraction of the literature on extragalactic astronomy and cosmology. Since dark matter cannot be made of any of the usual standard-model particles (as we will discuss below), dark matter is also a central focus of elementary-particle physics. The purpose of this review is to provide a pedagogical introduction to the principle astrophysical evidence for dark matter and to some of the particle candidates. Rather than present a comprehensive survey of the vast and increasingly precise measurements of the amount and distribution of dark matter, we will present very simple (``squiggly-line'') arguments for the existence of dark matter in clusters and galaxies, as well as the arguments for why it is nonbaryonic. The motivation will be to provide insight into the evidence and arguments, rather than to summarize results from the latest state-of-the-art applications of the techniques. Likewise, construction of particle-physics models for dark matter has become a huge industry, accelerated quite recently, in particular, with anomalous cosmic-ray and diffuse-background results \cite{haze,pamela}. Again, we will not attempt to survey these recent developments and focus instead primarily on the basic arguments for particle dark matter. In particular, there has developed in the theoretical literature over the past twenty years a ``standard'' weakly-interacting massive particle (WIMP) scenario, in which the dark-matter particle is a particle that arises in extensions (e.g., supersymmetry \cite{Jungman:1995df} or universal extra dimensions \cite{UEDs}) of the standard model that are thought by many particle theorists to provide the best prospects for new-physics discoveries at the Large Hadron Collider (LHC). We therefore describe this basic scenario. More detailed reviews of weakly-interacting massive particles, the main subject of this article, can be found in Refs.~\cite{Jungman:1995df,Bergstrom:2000pn,Bertone:2004pz}. After describing the standard WIMP scenario, we provide a brief sampling of some ideas for ``non-minimal'' WIMPs, scenarios in which the WIMP is imbued with some additional properties, beyond simply those required to account for dark matter. We also briefly discuss some other attractive ideas (axions and sterile neutrinos) for WIMPs. Exercises are provided throughout. \section{Astrophysical evidence} It has been well established since the 1930s that there is much matter in the Universe that is not seen. It has also been long realized, and particularly since the early 1970s, that much of this matter must be nonbaryonic. The evidence for a significant quantity of dark matter accrued from galactic dynamics, the dynamics of galaxy clusters, and applications of the cosmic virial theorem. The evidence that much of this matter is nonbaryonic came from the discrepancy between the total matter density $\Omega_m\simeq 0.2-0.3$ (in units of the critical density $\rho_c=3 H_0^2/8\pi G$, where $H_0$ is the Hubble parameter), obtained from such measurements, and the baryon density $\Omega_b\simeq 0.05$ required for the concordance between the observed light-element (H, D, $^3$He, $^4$He, $^7$Li) abundances with those predicted by big-bang nucleosynthesis \cite{Iocco:2008va}, the theory for the assembly of light elements in the first minutes after the big bang. Rather than review the historical record, we discuss the most compelling arguments for nonbaryonic dark matter today as well as some observations most relevant to astrophysical phenomenology of dark matter today. \subsection{Galactic rotation curves} The flatness of galactic rotation curves has provided evidence for dark matter since the 1970's. These measurements are particularly important now not only for establishing the existence of dark matter, but particularly for fixing the local dark-matter density, relevant for direct detection of dark matter. We live in a typical spiral galaxy, the Milky Way, at a distance $\sim 8.5 \textrm{ kpc}$ from its center. The visible stars and gas in the Milky Way extend out to a distance of about 10 kpc. From the rotation curve, the rotational velocity $v_c(r)$ of stars and gas as a function of Galactocentric radius $r$, we can infer the mass $M_{<}(r)$ of the Galaxy enclosed within a radius $r$. If the visible stars and gas provided all the mass in the Galaxy, one would expect that the rotation curve should decline at radii larger than the 10~kpc extent of the stellar disk according to the Keplerian relation $v_c^2 = G M_{obs}/r$. Instead, one observes that $v_c(r)$ remains constant (a flat rotation curve) out to much larger radii, indicating that $M_{<}(r) \propto r$ for $r \gg 10~{\rm kpc}$ and thus that the Galaxy must contain far more matter than contributed by the stars and the gas. \begin{figure}[b] \centering \includegraphics[scale=0.5]{rotationcurve} \caption{Measured rotation curve of NGC6503 with best fit and contributions from halo, disk and gas. From Ref.~\protect\cite{Begeman91}} \label{fig:rotationcurve} \end{figure} Assuming a spherically symmetric distribution of matter, the mass inside a radius $r$ is given by \begin{equation} M_{<}(r) = 4 \pi \int_0^r \rho(r') r'^2 dr'. \end{equation} An estimate for the distribution of dark matter in the Galaxy can be obtained from the behavior of the rotation curve in the inner and outer galaxy. For example, the density distribution for the cored isothermal sphere, given by, \begin{equation} \label{isothermal} \rho(r) = \rho_0 \frac{R^2 + a^2}{r^2 + a^2} \, , \end{equation} where $R \sim 8.5~{\rm kpc}$ is our distance from the Galactic center and $\rho_0$ is the local dark-matter density, provides a qualitatively consistent description of the data. For large $r$, $\rho \sim r^{-2} \, \Rightarrow \, M(r) \propto r \, \Rightarrow \, v \sim {\rm const}$, while for small $r$, $\rho \sim \textrm{const} \, \Rightarrow \, M(r) \propto r^3 \, \Rightarrow \, v \propto r$. Eq.~(\ref{isothermal}) describes a 2-parameter family of density profiles and by fitting the observed data one finds a scale radius $a \sim 3-5 \textrm{ kpc}$ and local matter density $\rho_0 \sim 0.4 \, \textrm{GeV} \, \textrm{cm}^{-3}$; the uncertainties arise from standard error in the rotation-curve measurements and from uncertainties in the contribution of the stellar disk to the local rotation curve. Because the dark matter is moving in the same potential well, the velocity dispersion of the dark matter can be estimated to be $\left\langle v_{{\rm dm}}^2 \right\rangle^{1/2} \sim 300~{\rm km}/{\rm sec}$. The simplest assumption is that the dark matter has a Maxwell-Boltzmann distribution with $f(\vec{v}) \sim e^{- v^2/2 \bar v^2}$, where $\bar v \sim 220\, {\rm km}/{\rm sec}$. \medskip \noindent {\sl Exercise 1. Explain/estimate how $\rho_0$ would be affected if \begin{itemize} \item (a) the halo were flattened, keeping the rotation curve unaltered; \item (b) the profile were of the Navarro-Frenk-White (NFW) type: $\rho(r) \propto \rho_c/[r (r + r_c)^2]$, keeping the local rotation speed the same; \item (c) the stellar contribution to the rotation curve was either increased or decreased. \end{itemize} } \medskip \subsection{Galaxy Clusters} Galaxy clusters are the largest gravitationally bound objects in the Universe. They were first observed as concentrations of thousands of individual galaxies, and early application of the virial theorem $v^2\sim GM/R$ (relating the observed velocity dispersion $v^2$ to the observed radius $R$ of the cluster) suggested that there is more matter in clusters than the stellar component can provide \cite{Zwicky:1933}. It was later observed that these galaxies are embedded in hot x-ray--emitting gas, and we now know that clusters are the brightest objects in the x-ray sky. The x rays are produced by hot gas excited to virial temperatures $T \sim \textrm{keV}$ of the gravitational potential well of the dark matter, galaxies, and gas. A virial temperature $T \sim \textrm{keV}$ corresponds to a typical velocity for the galaxies of $v \sim 10^3\, \textrm{km/s}$. Observations of clusters come from optical and x-ray telescopes and more recently via the Sunyaev-Zeldovich effect \cite{Sunyaev:1980vz}. Several independent lines of evidence from clusters indicate that the total mass required to explain observations is much larger than can be inferred by the observed baryonic content of galaxies and gas. \subsubsection{Lensing} Galaxy clusters exhibit the phenomenon of gravitational lensing \cite{Einstein:1936,Zwicky:1937}. Because the gravitational field of the cluster curves the space around it, light rays emitted from objects behind the cluster travel along curved rather than straight paths on their way to our telescopes \cite{Blandford:1991xc}. If the lensing is strong enough, there are multiple paths from the same object, past the cluster, that arrive at our location in the Universe; this results in multiple images of the same object (e.g., a background galaxy or active galactic nucleus). Furthermore, because the light from different sides of the same galaxy travels along slightly different paths, the images of strongly lensed sources are distorted into arcs. For instance, HST observations of Abell 2218 show arcs and multiple images as shown in Fig.~\ref{fig:abell2218}. If the lensing is weak, the images may become slightly elongated, even if they are not multiply imaged. \begin{figure}[b] \centering \includegraphics[scale=0.5]{a2218image} \caption{Image of the galaxy cluster Abel 2218. Credits: NASA, Andrew Fruchter and the ERO team.} \label{fig:abell2218} \end{figure} For a lensing cluster with total mass $M$ and impact parameter $d$ the deflection angle is of order \begin{equation} \label{defangle} \alpha \sim \left( \frac{G M}{d c^2} \right)^{1/2}. \end{equation} Thus, from measurements of the deflection angle and impact parameter (which can be inferred by knowing the redshift to the lensing cluster and source), one can infer that the total mass $M$ of a cluster is much larger than the observed baryonic mass $M_b$. \medskip \noindent {\sl Exercise 2. Suppose a massive particle with velocity $v$ is incident, with impact parameter $b$, on a fixed deflector of mass $M$. Calculate the deflection angle (using classical physics) due to scattering of this particle via gravitational interaction with the deflector. Show that you recover $\alpha = \left( G M/d c^2\right)^{1/2}$ in the limit $v\rightarrow c$, the velocity at which light rays propagate. Actually, the correct general-relativistic calculation recovers this expression, but with an extra factor of 2.} \medskip \noindent {\sl Exercise 3. Estimate the deflection angle $\alpha$ for lensing by a cluster of $M \sim 10^{15} M_{\odot}$ and for an impact parameter of 1 Mpc.} \medskip \subsubsection{Hydrostatic equilibrium} In a relaxed cluster, the temperature profile $T(r)$ of gas, as a function of radius $r$, can be inferred using the strength of the emission lines, and the electron number density $n_e(r)$ can be inferred using the the x-ray luminosity $L(r)$. Combined, these observations give an estimate of the radial pressure profile $p(r) \propto n_e(r) k_B T(r)$. In steady state, a gravitating gas will satisfy the equation of hydrostatic equilibrium, \begin{align} \label{euler} \frac{d p}{d r} = - G \, \frac{M_{<}{(r)} \, \rho_{{\rm gas}}(r)}{r^2}\,. \end{align} Here, $M_<(r)$ is the total (dark matter and baryonic gas) mass enclosed by a radius $r$ and $\rho_{\rm gas}(r)$ is the density at radius $r$. Eq.~(\ref{euler}) can be used to determine the total mass $M$ of the cluster. Comparison with the observed baryonic mass $M_b$ again shows that $M \gg M_b$. In particular, observations using the x-ray satellites XMM-Newton and Chandra indicate that the ratio of baryonic matter to dark matter in clusters is ${\Omega_b}/{\Omega_m} \sim {1}/{6}$. Additional constraints to the cluster-gas distribution can be obtained from the Sunyaev-Zeldovich (SZ) effect. This is the upscattering of cosmic microwave background (CMB) photons by hot electron gas in the cluster; the magnitude of the observed CMB-temperature change is then proportional to the integral of the electron pressure through the cluster (see, e.g., \cite{Sunyaev:1980vz}). \medskip \noindent {\sl Exercise 4. Estimate, in order of magnitude, the x-ray luminosity $L_{{\rm X}}$ for a cluster with total mass $M \sim 10^{15} M_{\odot}$ and a baryon fraction 1/6 in hydrostatic equilibrium with maximum radius $R \sim {\rm Mpc}$.} \medskip \noindent {\sl Exercise 5. Assume the cluster in Exercise 4. is isothermal ($T(r) =T =$\, const.) with a dark-matter distribution consistent with an NFW profile with $r_c\simeq R/10$. Neglecting the self-gravity of the gas: \begin{itemize} \item (a) Show the properly normalized dark-matter density profile is approximately $\rho(r) \simeq (233/45) M_c/[r(r+r_c)^2]$, where $M_c=M_{<}(r_c)$ is the mass enclosed within the scale radius $r_c$. Determine $M_{<}(r)$ and $M_c$ and in terms of $M$ for this cluster. \item (b) Using your results from (a) solve Eq.~\ref{euler} and show that the gas density profile in such an NFW cluster takes the form $\rho_{gas}(r) \propto (1+r/r_c)^{\Gamma r_c/r}$, where $\Gamma \propto (G M_c \mu m_p/r_c)/(k_B T)$. \end{itemize} } \medskip \subsubsection{Dynamics} According to the virial theorem, the velocity dispersion of galaxies is approximately $v^2(r) \sim G M_{<}(r)/r$, where $M_{<}(r)$ is the mass enclosed within a radius $r$. Therefore, from measurements of the velocity dispersion and size of a cluster (which can be determined if the redshift and angular size of the cluster are known), one can infer the total mass $M$. Once again, the total mass is much larger than the baryonic mass $M \gg M_b$. Cluster measurements are by now well established, with many well-studied and very well-modeled clusters, and there is a good agreement of estimates of $M$ from dynamics, lensing, X-ray measurements, and the SZ effect. The current state of the art actually goes much further: one can now not only establish the existence of dark matter, but also map its detailed distribution within the cluster. \medskip \noindent {\sl Exercise 6. Following Zwicky \cite{coma1937}, use the virial theorem to find an approximate formula relating the average mass of a galaxy in a galaxy cluster to the observed size and velocity dispersion of the cluster assuming that the system is self-gravitating (and assuming only that the observed galaxies contribute to the mass of the system). What answer would Zwicky have found for the Coma cluster with modern data?} \medskip \subsection{Cosmic Microwave Background and Large-Scale Structure} \begin{figure}[t] \centering \includegraphics[scale=0.6]{Cls} \caption{Dependence of the CMB power spectrum on the cosmological parameters. From Ref.~\protect\cite{Kam07}.} \label{fig:Cls} \end{figure} Measurements of the cosmic microwave background (CMB) radiation and large-scale structure (LSS) of the Universe provide perhaps the most compelling evidence that the dark matter is non-baryonic and the most precise measurements of its abundance. One obtains from CMB maps the angular power spectrum $C_\ell$ of CMB temperature anisotropies as a function of multipole $\ell$. If the temperature $T(\mathbf{\hat{n}})$ is measured as a function of position $\mathbf{\hat{n}}$ on the sky, then one can obtain the spherical-harmonic coefficients $a_{\ell m} = \int d\mathbf{\hat{n}} T(\mathbf{\hat{n}}) Y_{\ell m}^*(\mathbf{\hat{n}})$. The $C_\ell$'s are then simply the variance of the spherical-harmonic coefficients: $C_\ell = \langle |a_{\ell m}|^2\rangle$. Theoretical predictions for the power spectrum depend on the values of cosmological parameters like the matter density $\Omega_m h^2$, the baryon density $\Omega_b h^2$, the cosmological constant $\Lambda$, the scalar spectral index $n_s$, the optical depth $\tau$ due to reionization, and the Hubble parameter $H_0$. One can thus determine these cosmological parameters by fitting precise measurements of the $C_\ell$s to the theoretical predictions~\cite{Jungman:1995bz}. Current measurements provide detailed information on $C_\ell$ over the range $2 <l<{\cal O}(1000)$, thus providing precise constraints to the cosmological parameters. In the year 2000, data from the Boomerang and MAXIMA experiments (with supernova measurements) gave $\Omega_m h^2 = 0.13 \pm 0.05$ with error bars that shrink to $\pm 0.01$ taking into account other measurements or assumptions (e.g., LSS, Hubble-constant, and supernova measurements, and/or the assumption of a flat Universe) \cite{Jaffe:2000tx}. Now, with WMAP, $\Omega_m h^2 = 0.133 \pm 0.006$ and $\Omega_b h^2 = 0.0227 \pm 0.0006$ \cite{wmap5}. \medskip \noindent {\sl Exercise 7. Suppose that the temperature is measured with a Gaussian noise $\sigma_T\simeq 25~\mu{\mathrm K}$ in $N_{\rm pix} \sim 10^6$ pixels on the sky. Estimate the rms temperature $\left\langle \left(\delta T/T \right)^2 \right\rangle^{1/2}$ that results.} \medskip \section{Basic properties of dark matter} Having established the existence of dark matter and presented the case that it is nonbaryonic, we now consider the requirements for a dark-matter candidate and discuss some possibilities. Every dark-matter candidate should satisfy several requirements: \begin{itemize} \item Dark matter must be \emph{dark}, in the sense that it must generically have no (or extremely weak) interactions with photons; otherwise it might contribute to the dimming of quasars, create absorption lines in the spectra of distant quasars \cite{Profumo:2006im}, or emit photons. One way to quantify this is by assuming that dark-matter particles have a tiny charge $fe$ (where $e$ is the electron charge and $f\ll1$), which can be quantitatively constrained \cite{Davidson:2000hf}. \item Self-interactions of the dark matter should be small. We can estimate the cross section for DM-DM scattering in the following way: if DM particles scatter less than once in the history of the Universe, then the mean free path is less than $\lambda = v_{DM} H_0^{-1} \sim \left( 3 \times 10^7 \textrm{cm}/\sec \right) \left( 10^{17} \, \sec \right) \sim 3 \times 10^{24} \, \textrm{cm}$. Then, if the galactic-halo density is $\rho_{DM} \sim 10^{-24} \textrm{g}/\textrm{cm}^3$, the opacity for self-scattering in the galactic halo is $\kappa = (\rho_{DM} \lambda)^{-1} = \sigma/m \sim \textrm{cm}^2/\textrm{g}$. Thus, if the elastic-scattering cross section is $\sigma \mathrel{\mathpalette\fun >} 10^{-24}\,(m/\textrm{GeV}) \, \textrm{cm}^2 $, then $\kappa\gtrsim1$ and the typical halo--dark-matter particle scatters more than once during the history of the Universe. If dark matter self-scattered, it would suffer \emph{gravothermal catastrophe}: that is, in binary interactions of two dark-matter particles, one particle can get ejected from the halo, while the other moves to a lower-energy state at smaller radius. As this occurs repeatedly much of the halo evaporates and the remaining halo shrinks. Although a variety of arguments can constrain dark-matter self-interactions, stringent and very transparent constraints come from observations of the Bullet Cluster, the merger of two galaxy clusters, in which it is seen (from gravitational-lensing maps of the projected matter density) that the two dark-matter halos have passed through each other while the baryonic gas has shocked and is located between the two halos \cite{Randall:2007ph}. \item Interactions with baryons must also be weak. Suppose baryons and dark matter interact. As an overdense region collapses to form a galaxy, baryons and dark matter would fall together, with photons radiated from this baryon-DM fluid. This would result in a baryon-DM disk, in contradiction with the more diffuse and extended dark-matter halos that are observed. If DM interacted with baryons other than gravitationally in the early Universe, the baryon-photon fluid would be effectively heavier (have a higher mass loading relative to radiation pressure) even before recombination, so that the baryon acoustic oscillations in the matter power spectrum and the CMB angular power spectrum would be modified \cite{Sigurdson:2004zp}. \item Dark matter cannot be made up of Standard Model (SM) particles, since most leptons and baryons are charged. The only potentially suitable SM candidate is the neutrino, but it cannot be dark matter because of the celebrated Gunn-Tremaine bound~\cite{GT}, which imposes a lower bound on the masses of dark-matter particles that decoupled when relativistic. The argument is the following: The momentum distribution in the Galactic halo is roughly Maxwell-Boltzmann with a momentum uncertainty $\Delta p \sim m_\nu \langle v \rangle$ ($\langle v \rangle \sim 300\, \textrm{km/sec}$), while the mean spacing between neutrinos is $\Delta x \sim n_\nu^{- 1/3} \sim \left(\rho_\nu/m_\nu \right)^{- 1/3}$. The Heisenberg uncertainty principle gives $\Delta x \, \Delta p \mathrel{\mathpalette\fun >} \hbar$, which translates into a lower bound $m_\nu \mathrel{\mathpalette\fun >} 50\, \textrm{eV}$. (This Heisenberg bound can actually be improved by a factor of 2 by using arguments involving conservation of phase space.) Stronger bounds ($m_\nu \mathrel{\mathpalette\fun >} 300\, \textrm{eV}$) can be obtained from dwarf galaxies which have higher phase-space densities. As discussed below, there will be a cosmological density of neutrinos left over from the big bang, with a density $\Omega_{\nu} h^2 \sim 0.1\, (m_\nu/10\, \textrm{eV})$. The neutrinos of mass $m_\nu \mathrel{\mathpalette\fun >} 300\, \textrm{eV}$ consistent with the Gunn-Tremaine bound would overclose the Universe. Thus, neutrinos are unable to account for the dark matter. \end{itemize} \section{Weakly Interacting Massive Particles (WIMPs)} Perhaps the most attractive dark-matter candidates to have been considered are weakly-interacting massive particles (WIMPs). Many theories for new physics at the electroweak scale (e.g., supersymmetry, universal extra dimensions) introduce a new stable, weakly-interacting particle, with a mass of order $M_\chi \sim 100 \, \textrm{GeV}$. For example, in supersymmetric (SUSY) theories, the WIMP is the neutralino \begin{equation} \tilde{\chi} = \xi_\gamma \tilde{\gamma} + \xi_Z \tilde{Z}^0 + \xi_{h1} \tilde{h}^0_1+\xi_{h2} \tilde{h}^0_2, \end{equation} a linear combination of the supersymmetric partners of the photon, $Z^0$ boson, and neutral Higgs bosons. Neutralinos are neutral spin-$1/2$ Majorana fermions. In theories with universal extra dimensions there are Kaluza-Klein (KK) states $\gamma_{KK}$, $Z^0_{KK}$, $H^0_{KK}$, which are neutral KK bosons. The candidates are stable (or quasi-stable; i.e., lifetimes greater than the age of the Universe $\tau \gg t_U$) and particle-theory models suggest masses $M_\chi \sim 10 - 10^3 \, \textrm{GeV}$. In typical theories two WIMPs can annihilate to SM particles. For example, for a neutralino we have the tree-level diagram in Fig.~\ref{fig:annihilation}, \begin{figure}[t] \centering \includegraphics[scale=1.0]{annihilation} \caption{An example of a Feynman diagram for annihilation of two WIMPs $\chi$ (neutralinos in this case) to fermion-antifermion pairs (where the fermions are either quarks $q$ or leptons $l$) via exchange of an intermediate-state squark $\tilde q$ or slepton $\tilde l$.} \label{fig:annihilation} \end{figure} where $m_{\tilde{q}, \tilde{l}} \sim 100 \, \textrm{GeV}$, so that $\sigma \sim \alpha^2 m_{\tilde{q}, \, \tilde{l}}^{-4} M_\chi^2 \sim 10^{-8} \, \textrm{GeV}^{-2}$. \subsection{WIMP Freezeout in Early Universe} We now estimate the relic abundance of WIMPs in the standard scenario of thermal production (see, e.g., Ref.~\cite{KolbTurner}). In the early Universe, at temperatures $T\gg M_\chi$, WIMPs are in thermal equilibrium and are nearly abundant as lighter particles, like photons, quarks, leptons, etc. Their equilibrium abundance is maintained via rapid interconversion of $\chi\chi$ pairs and particle-antiparticle pairs of Standard Model particles. When the temperature falls below the WIMP mass, however, the WIMP abundances become Boltzmann suppressed, and WIMPs can no longer find each other to annihilate. The remaining WIMPs constitute a primordial relic population that still exists today. We now step through a rough calculation. To do so, we assume that the WIMP is a Majorana particle, its own antiparticle (as is the case for the neutralino, for example), although the calculation is easily generalized for WIMPs with antiparticles (e.g., KK WIMPs). The annihilation rate for WIMPs is $\Gamma(\chi \chi \leftrightarrow q \bar{q} , \, \ell \bar{\ell} , \, \dots) = n_\chi \langle \sigma v \rangle$, where $\sigma$ is the cross section for annihilation of two WIMPs to all lighter standard-model particles, $v$ is the relative velocity, and the angle brackets denote a thermal average. The expansion rate of the Universe is $H = \left( 8 \pi G \rho/3 \right)^{1/2} \sim T^2/M_{\mathrm{Pl}}$ during the radiation era, where $\rho \propto T^4$. In the spirit of ``squiggly lines'' we have neglected factors like the effective number of relativistic degrees of freedom $g_*$ in the expansion rate, which the careful reader can restore for a more refined estimate. By comparing these two rates, one can identify two different regimes: \begin{itemize} \item At early times, when $T \gg M_\chi$, $n_\chi \propto T^3$ and $\Gamma \gg H$: particles scatter and annihilate many times during an Hubble time and this maintains chemical equilibrium. \item At late times, when $T \ll M_\chi$, $n_\chi \propto T^{3/2}e^{- M_\chi/T}$ (note that the chemical potential $\mu_X = 0$ in the case of Majorana particles such as the neutralino) and $\Gamma \ll H$: there can be no annihilations, and the WIMP abundance freezes out (the comoving number density becomes constant). \end{itemize} This sequence of events is illustrated in Fig.~\ref{fig:freezeout}, which shows the comoving number density of WIMPs as a function of the inverse temperature in equilibrium (solid curve) and including freezeout (dashed curves). Freezeout occurs roughly when $\Gamma(T_f) \sim H(T_f)$. For nonrelativistic particles, $n_\chi = g_\chi \left( M_\chi T/2 \pi \right)^{3/2} e^{-M_\chi/T}$, so the freezeout condition becomes \begin{equation} \left( M_\chi T_f \right)^{3/2} e^{-M_\chi/T_f} \sim \frac{T_f^2}{M_{Pl}}\quad \Rightarrow \quad \frac{T_f}{M_\chi} \sim \ln \left[ \frac{M_{Pl} M_\chi^{3/2} \langle \sigma v \rangle}{T_f^{1/2}} \right], \end{equation} where the mass parameters are in GeV. Taking $\langle \sigma v \rangle \sim \alpha^2/M_\chi^2$, and taking as a first guess $T_f \sim M_\chi$, we finally find \begin{equation} \frac{T_f}{M_\chi} \sim \left\{ \ln \left[ \frac{M_{Pl} \alpha^2}{(M_\chi T_f)^{1/2}} \right] \right\}^{-1} \sim \left\{ \ln \left[ \frac{10^{19} 10^{-4}}{100} \right] \right\}^{-1} \sim \frac{1}{25} + \textrm{log corrections}, \end{equation} where the numerical values are characteristic electroweak-scale parameters (i.e. $\sigma \sim 10^{-8} \, \textrm{GeV}^{-2}$, $M_\chi \sim 100 \,\textrm{GeV}$). \begin{figure}[h] \centering \includegraphics[scale=0.5]{freezeout} \caption{Equilibrium (solid curve) and relic abundance (dashed curves) of WIMP particles. From Ref.~\protect\cite{Jungman:1995df}.} \label{fig:freezeout} \end{figure} At freezeout, the abundance relative to photons is \begin{equation} \frac{n_\chi}{n_\gamma} = \frac{\Gamma(T_f)/{\langle \sigma v \rangle}}{T_f^3} = \frac{H(T_f)/{\langle \sigma v \rangle}}{T_f^3} \sim \frac{T_f^2}{M_{Pl} \langle \sigma v\rangle T_f^3} \sim \frac{1}{M_{Pl} \langle \sigma v \rangle T_f} \sim \frac{25}{M_{Pl} \langle \sigma v \rangle M_\chi}. \end{equation} Today we know that \begin{equation} \Omega_\chi = \frac{\rho_\chi}{\rho_c} \sim \frac{n_\chi^0}{n_\gamma^0} \frac{M_\chi n_\gamma^0}{\rho_c} \sim \frac{25}{M_{Pl}\langle \sigma v \rangle} \frac{400 \, \textrm{cm}^{-3}}{10^{-6} \, \textrm{GeV} \, \textrm{cm}^{-3}} \, , \end{equation} with no explicit dependence on the particle mass. We thus obtain the observed abundance $\Omega_\chi h^2 \sim 0.1$ for $\sigma \sim 10^4\, (0.1 \times 10^{19} \times 10^{-6})^{-1} \, \textrm{GeV}^{-2} \sim 10^{-8} \, \textrm{GeV}^{-2}$ which turns out to be nearly exact, even though we have been a bit sloppy. A more precise calculation (including all the factors we have dropped) gives \begin{equation} \Omega_\chi h^2 \sim 0.1 \left( \frac{3 \times 10^{-26} \, \textrm{cm}^3/\sec}{\langle \sigma v \rangle} \right) + \textrm{ log corrections}, \label{eqn:omega} \end{equation} a remarkable result, as it implies that if there is a new stable particle at the electroweak scale, it is the dark matter. As an aside, note that partial-wave unitarity of annihilation cross sections requires $\sigma \mathrel{\mathpalette\fun <} M_\chi^{-2}$, which means $\Omega_\chi h^2 \mathrel{\mathpalette\fun >} \left( M_\chi/300 \, \textrm{TeV} \right)^2$. This thus requires $\Omega_\chi h^2 \mathrel{\mathpalette\fun <} 0.1$, $M_\chi \mathrel{\mathpalette\fun <} 100 \, \textrm{TeV}$, without knowing anything about particle physics \cite{Griest:1989wd}. More precisely, this bound applies for point particles and does not apply if dark matter particles are bound states or solitons. If the interactions are strong, $\alpha \sim 1$, the bound is already saturated. Although our arguments have been rough, one finds in SUSY and KK models that there are many combinations of reasonable values for the the SUSY or KK parameters that provide a WIMP with $\Omega_\chi h^2 \sim 0.1$ for $10 \, \textrm{GeV} \mathrel{\mathpalette\fun <} M_\chi \mathrel{\mathpalette\fun <} 1 \, \textrm{TeV}$. \medskip \noindent {\sl Exercise 8. Eq.~(\ref{eqn:omega}) was derived assuming that the annihilation cross section $\ens{\sigma v}$ is temperature-independent. Redo the estimate for $\Omega_\chi h^2$ assuming that $\ens{\sigma v} \propto T^n$, where $n=1,2,3,\cdots$.} \medskip \subsection{Direct detection} If WIMPs make up the halo of the Milky Way, then they have a local spatial density $n_\chi \sim 0.004\,(M_\chi/100\,\textrm{GeV})^{-1} \textrm{cm}^{-3}$ (roughly one per liter), and are moving with velocities $v\sim200$~km~sec$^{-1}$. Moreover, there is a crossing symmetry between the annihilation $\chi \chi \to q \bar{q}$ and the elastic scattering $\chi q \to \chi q$ processes---apart from some kinematic factors the diagrams are more or less the same (as shown in Fig.~\ref{fig:crossing})---so one expects roughly that the cross section $\sigma(\chi q \to \chi q) \sim \sigma(\chi \chi \to q \bar{q}) \sim 10^{-36} \, \textrm{cm}^2$. One can therefore hope to detect a WIMP directly by observing its interaction with some target nucleus in a low-background detector composed, e.g., of germanium, xenon, silicon, sodium, iodine, or some other element. \begin{figure}[h] \centering \includegraphics[scale=0.7]{neutralino} \caption{Crossing symmetry between annihilation and scattering diagrams.} \label{fig:crossing} \end{figure} At low energies, quarks are bound into nucleons and nucleons in turn are bound into nuclei, so the cross section one actually needs is $\sigma(\chi N \to \chi N)$ (where $N$ here stands for a nucleus). The calculation relating the $\chi q$ interaction to the $\chi N$ interaction requires both QCD and nuclear physics. It is complicated but straightforward. Here we will simply assume, for illustration, that $\sigma(\chi N \to \chi N) \sim \sigma(\chi q \to \chi q)$. The rate at which a nucleus in the detector is hit by halo WIMPs is then \begin{equation} R \sim n_\chi \sigma v \sim (0.004 \, \textrm{cm}^{-3}) (10^{-36} \, \textrm{cm}^2) \left(3 \times 10^7 \frac{\textrm{cm}}{\sec} \right) \sim 10^{-24} \textrm{yr}^{-1}; \end{equation} if there are $6 \times 10^{23}\, M/(A \, \textrm{g})$ nuclei in a detector, for an atomic number $A \sim 100$ we expect to see $R \sim 10/\textrm{kg}/ \textrm{ yr}$ events. Let us estimate the recoil energy of a nucleus struck by a WIMP. If a WIMP of $M_\chi \sim 100 \, \textrm{GeV}$ runs into a nucleus with $A \sim 100$, the momentum change is $\Delta p \sim M_\chi v$, and the nucleus recoils with an energy of order $E \sim (\Delta p)^2/2 m \sim (100 \, \textrm{GeV} \, 10^{-3})^2(100 \, \textrm{GeV})^{-1} \sim 100 \, \textrm{keV}$. To do things more carefully, one has to account for the fact that the cross section one actually needs are the interaction cross sections with nuclei, and via the following steps, \begin{displaymath} \sigma(\chi q) \underset{\textrm{QCD}}{\longrightarrow} \sigma(\chi n), \sigma(\chi p) \underset{\textrm{nuclear physics}}{\longrightarrow} \sigma(\chi N), \end{displaymath} some theoretical uncertainties are introduced. One also finds that $\sigma(\chi N)$ is reduced relative to $\sigma(\chi q)$ by several orders of magnitude. Qualitatively, there are two different types of interactions, axial and scalar (or spin-dependent and spin-independent). The first is described by the Lagrangian, \begin{equation} \L_{\textrm{axial}} \propto \bar{\chi} \gamma^\mu \gamma_5 \chi \, \bar{q} \gamma_\mu \gamma_5 q, \end{equation} which couples $\chi$ to the spin of unpaired nucleons; this works only for nuclei with spin, and the coupling is different for unpaired protons or neutrons. Through this interaction one expects $\sigma \propto {\bar{s}}^2$, where $\bar{s}$ is the average spin $\sim 1/2$ of the unpaired proton or neutron in nuclei with odd atomic number. The second interaction is described by the Lagrangian, \begin{equation} \L_{\textrm{scalar}} \propto \bar{\chi} \chi \bar{q} q, \end{equation} which couples $\chi$ to the mass of the nucleus, thus giving a cross section $\sigma \propto M^2 \propto A^2$ (where $M$ and $A$ are the nuclear mass and atomic number), which implies higher cross sections for larger A. However, this scaling is only valid up to a limit. In fact, the momentum exchanged is $\Delta p \sim (100 \, \textrm{GeV}) (10^{-3}) \sim 0.1 \, \textrm{GeV}$, and the nuclear radius is roughly $r \sim A^{1/3} 10^{-13} \, \textrm{cm}$, so from the uncertainty principle one has $r\, \Delta p \mathrel{\mathpalette\fun >} 1$ when \begin{equation} \frac{(0.1 \, \textrm{GeV}) (10^{-13} \, \textrm{cm})}{2 \times 10^{-14} \, \textrm{GeV} \, \textrm{cm}} A^{1/3} \mathrel{\mathpalette\fun >} 1 \, \qquad \Longrightarrow \qquad\, A \mathrel{\mathpalette\fun >} 10. \end{equation} Detailed calculations show that the cross section for WIMP-nucleus elastic scattering does not increase much past $A \mathrel{\mathpalette\fun >} 100$. In experiments, people usually draw exclusion curves for the WIMP-nucleon cross section versus the WIMP mass $M_\chi$. The exclusion curves are less constraining both for low $M_\chi$ because of the low recoil energy, and for large $M_\chi$ because (for fixed local energy density $\rho_\chi$) the number density $n_\chi \propto M_\chi^{-1}$. \begin{figure}[h] \centering \includegraphics[scale=0.45]{exclusion} \caption{Exclusion plot for the spin-independent dark-matter parameter space. The region favored by the DAMA annual modulation is inconsistent with the current bound (solid curve) from CDMS. The broken curves are forecasts for future experiments. We also show, for illustrative purposes only, predictions for a WIMP model with a lightest-Higgs-boson mass of $m_h=150$ GeV.} \label{fig:exclusionplot} \end{figure} To date, only the DAMA experiment has reported a positive signal \cite{Bernabei:2000qi}. They used NaI, in which both nuclei have spin, one with an unpaired proton and the other with an unpaired neutron. The interpretation of their signal in terms of a WIMP with scalar interactions was ruled out by null results (at the time) from CDMS. An interpretation of their signal in terms of a spin-dependent WIMP-neutron interaction was ruled out by the null search in their Xe detector \cite{Ullio00}. While the interpretation in terms of spin-dependent WIMP-proton scattering was consistent with null results from other direct searches \cite{Ullio00}, it was ruled out by null searches for energetic neutrinos from the Sun (see Fig.~\ref{fig:ullio}), as we explain below. The interpretation in terms of spin-dependent scattering is now also ruled out directly by null results from the COUPP experiment \cite{Behnke:2008zza}. \subsection{Energetic $\nu$'s from the Sun} The escape velocity at the surface of the Sun is $v_{s} \sim 600 \, \textrm{km}/\textrm{s}$, while at the center it is $v_{c} \sim 1300 \, \textrm{km}/\textrm{s}$. If in passing through the Sun, a WIMP from the Galactic halo scatters from a nucleus (most likely a proton) therein to a velocity less than the escape velocity, then it is gravitationally trapped within the Sun. As the gravitationally-trapped WIMP passes through the Sun subsequently, it loses energy in additional nuclear scatters and thus settles to the center of the Sun. In this way, the number of WIMPs in the center of the Sun is enhanced. These WIMPs can then annihilate to standard model particles, through the same early-Universe processes that set their relic abundance \cite{Silk:1985ax}. Decays of the annihilation products (e.g., $W^+ W^-, Z^0 Z^0, \tau^+ \tau^-, t \bar{t}, b \bar{b}, c \bar{c}, \dots$) to neutrinos will produce energetic neutrinos that can escape from the center of the Sun. The neutrino energies are $E_\nu \sim \left[ (1/3)-(1/2) \right] M_\chi \sim 100 \, \textrm{GeV}$ and so cannot be confused with ordinary solar neutrinos, which have energies $\sim$MeV. At night, these neutrinos will move up through the Earth. If the neutrino produces a muon through a charged-current interaction in the rock below a neutrino telescope (e.g., super-Kamiokande, AMANDA, or IceCube), the muon may be seen. In this way, one can search for these WIMP-annihilation--induced neutrinos from the Sun. \begin{figure}[ht] \centering \includegraphics[scale=0.7]{sigma} \caption{The shaded region shows the parameter space (in WIMP mass versus SD WIMP-proton cross section) implied by the DAMA annual modulation for a WIMP with exclusively SD interactions with protons and no interaction with neutrons. The solid curve indicates the upper bound to the SD WIMP-proton cross section from null searches for neutrino-induced upward muons from the Sun; thus the cross hatched region is excluded~\protect\cite{Ullio00}.} \label{fig:ullio} \end{figure} \subsection{Cosmic rays from DM annihilation} In the Galactic halo, one expects the annihilation processes $\chi \chi \to \dots \to e^+ e^- , p \bar{p}, \gamma\gamma$; detection of these products can be a signal of the presence of dark matter. \medskip \noindent {\sl Exercise 9. Show that the annihilation process $\chi \chi \to e^+ e^-$ is suppressed for Majorana WIMPs as the relative velocity $v \to 0$.} \medskip \subsubsection{Positrons} Because of Galactic magnetic fields, cosmic-ray positrons and antiprotons do not propagate in straight lines and will thus appear to us as a diffuse background. Continuum $e^+$'s from WIMP annihilation are difficult to separate from ordinary cosmic-ray positrons. It has been argued that indirect processes, such as the annihilation $\chi \chi \to W^+ W^- \to e^+ \nu e^- \bar{\nu}$ \cite{positrons}, will produce a distinctive bump in the positron spectrum at energies $E_e \mathrel{\mathpalette\fun <} M_\chi$ (direct annihilation of Majorana WIMPs to electron-positron pairs is suppressed at Galactic relative velocities), as illustrated in Fig.~\ref{fig:positrons}, and there has been tremendous excitement recently with the reported detection by the PAMELA experiment of such a bump \cite{Adriani:2008zr}. However, it may be that nearby pulsars can also produce a bump in the positron spectrum \cite{Profumo:2008ms}, and more recent results from the Fermi Telescope \cite{Abdo:2009zk} call the PAMELA result into questions. It will thus be important to understand the possible pulsar signal, as well as the data, more carefully before the PAMELA excess can be attributed to WIMP annihilation. \begin{figure}[b] \centering \includegraphics[scale=0.5]{positron} \caption{The positron fraction, as a function of electron-positron energy, from annihilation of a 120 GeV neutralino WIMP to gauge bosons. The different curves are for different cosmic-ray-propagation models, and in both cases, the annihilation rate has been boosted by a factor of ten relative to the canonical (smooth-halo) value. From Ref.~\protect\cite{positrons}.} \label{fig:positrons} \end{figure} \subsubsection{Antiprotons} Likewise, it has also been argued that low-energy antiprotons from WIMP annihilation can be distinguished, through their energy spectrum, from the more prosaic cosmic-ray antiprotons produced by cosmic-ray spallation. Antiprotons can be produced by the decay of the standard WIMP-annihilation products, and the energy spectrum of such antiprotons is relatively flat at low energies. On the other hand, the energy spectrum of low-energy cosmic-ray antiprotons due to cosmic-ray spallation decreases at energies $E\mathrel{\mathpalette\fun <}$GeV. This is because the process $\bar{p} + p_{ISM} \to p + p + \bar{p} + \bar{p}$ has an energy threshold, in the center of mass, of $E_{\mathrm{CM}} > 4 m_p$. This requires the primary cosmic-ray momentum to be very high. Production of an antiproton with $E_{\bar p} \mathrel{\mathpalette\fun <}$GeV therefore requires that the antiproton be ejected with momentum opposite to that of the initial cosmic-ray proton, and the phase-space for this ejection is small. \subsubsection{Gamma rays} A final channel to observe WIMP annihilation is via gamma rays from WIMP annihilation. Direct annihilation of WIMPs to two photons, $\chi \chi \to \gamma\gam$, through loop diagrams such as those shown in Fig.~\ref{fig:loopgamma}, produce monoenergetic photons, with energies equal to the WIMP mass. For $v \sim 10^{-3} c$, the photon energies would be $E_\gamma = E_\chi \left( 1 \pm 10^{-3} \right)$, and one would see a narrow $\gamma$-ray line with $\Delta \nu/\nu \sim 10^{-3}$, superposed on a continuum spectrum produced by astrophysical processes; such a line would be difficult to mimic with traditional astrophysical sources. Decays of WIMP-annihilation products also produce a continuum spectrum of gamma rays at lower energies. \begin{figure}[h] \centering \includegraphics[scale=1.0]{gamma} \caption{Example of a Feynman diagram for annihilation of two neutralinos to two photons through a quark-squark loop.} \label{fig:loopgamma} \end{figure} The other advantage of gamma rays is that they propagate in straight lines. This opens up the possibility to distinguish gamma rays from WIMP annihilation from those from traditional sources through directionality---there should be a higher flux of WIMP-annihilation photons from places where WIMPs are abundant; e.g., the Galactic center. Another possibility is dwarf galaxies, which represent regions of high dark-matter density in the Milky Way halo. In general, the $\gamma$-ray flux (the number of photons per unit time-area--solid-angle) is given by \begin{equation} \frac{d F}{d \Omega} = \frac{\ens{\sigma_{\chi \chi \to \gamma \gamma} v}}{4 \pi M_\chi^2} \int_0^\infty \rho^2(l) dl, \end{equation} where the integral is taken along a given line of sight, $l$ is the distance along that line of sight, and $\rho(l)$ is the dark-matter density at that distance. (Note that if $\rho(r) \propto r^{-1}$ with Galactocentric radius $r$, as in an NFW profile, the intensity formally diverges, but the flux form any finite angular window around $r=0$ is finite.) \medskip \noindent {\sl Exercise 10. Estimate the $\gamma$-ray flux from WIMP annihilation, for a given annihilation cross section (times relative velocity) $\langle \sigma v \rangle_{\mathrm{ann}}$, in an angular window of radius $\sim5$ degrees around the Galactic center. Estimate a characteristic $\langle \sigma v \rangle$ for WIMPs and evaluate your result for the gamma-ray flux for that value. How does it compare, in order of magnitude, with the sensitivity of the Fermi Gamma Ray Telescope?} \medskip \subsubsection{Galactic Substructure and Boost Factors} The rate for annihilation, per unit volume, at any point in the Galactic halo is proportional to $\rho^2$, the square of the density at that point. The total annihilation rate in the halo, or in some finite volume of the halo, is then proportional to $\int dV\, \rho^2$, the integral, over that volume, of the density squared. In the canonical model, the halo density is presumed to vary smoothly with position in the Galaxy with some density profile; e.g., the isothermal profile in Eq.~(\ref{isothermal}). However, a Galactic halo forms as part of a recent stage in a sequence of hierarchical structure formation. In this scenario, small objects undergo gravitational collapse first; they then merge to form more massive objects, which then merge to form even more massive objects, etc. If some of these substructures remain partially intact as they merge into more massive halos, then any given halo (in particular, the Milky Way halo) may have a clumpy distribution of dark matter. This is in fact seen in simulations. What this implies is that the annihilation rate in the halo may be enhanced by a ``boost factor'' $B\propto \langle \rho^2 \rangle/\langle \rho \rangle^2$, where the averages are over volume in the halo \cite{clumping}. It may be possible to see angular variations in the gamma-ray signal from WIMP annihilation, due to this substructure \cite{anisotropy,Lee:2008fm}. It has even been suggested that proper motions of nearby substructures may be visible \cite{Koushiappas:2006qq}, although Ref.~\cite{ando} disputed this claim. As we will see below, the first gravitationally-collapsed objects in WIMP models have masses in the range $10^{-6}-100$ Earth masses \cite{Profumo}. These objects may have densities several hundred times those of the mean halo density today. If so, and if these Earth-mass substructures survive intact through all subsequent generations of structure formation, then the boost factor $B$ may be as large as several hundred, implying much larger cosmic-ray fluxes than the canonical model predicts. Such large boost factors are, however, unlikely. Simulations of recent generations in the structure-formation hierarchy show that while the tightly bound inner parts of halos may survive during merging, the outer parts are stripped. Ref.~\cite{savvas} developed an analytic model, parametrized in terms of a halo-survival fraction, to describe the (nearly) scale-invariant process of hierarchical clustering. This model then provided the boost factor $B$ in terms of that survival fraction. By comparing the results (cf., Fig.~ \ref{fig:pdf}) of the analytic model for the local halo-density probability distribution function with subsequent measurements of the same distribution in simulations (Fig.~1 in Ref.~\cite{Vogelsberger:2008qb}), one infers a small halo-survival fraction. The analytic model of Ref.~\cite{savvas} then suggests for this survival fraction no more than a small boost factor, $B\mathrel{\mathpalette\fun <}$few. \begin{figure}[h] \centering \includegraphics[scale=0.5]{pdf} \caption{The probability distribution function $P(\rho)$, due to substructure, for the local dark-matter density $\rho$, due to substructure, in units of the local halo density for a smooth halo. The different curves are for different substructure-survival fractions. The power-law tail is due to substructures. From Ref.~\protect\cite{savvas}.} \label{fig:pdf} \end{figure} \section{Variations and additions} What we have described so far may be referred to as the minimal-WIMP scenario. In this scenario, the dark matter is a thermal relic with electroweak-scale cross sections. It is neutral and scatters from baryons with cross sections $\sim10^{-40}\,\textrm{cm}^2$ (to within a few orders of magnitude). It has no astrophysical consequences in the post-freezeout Universe beyond its gravitational effects. However, the recent literature is filled with a large number of astrophysical anomalies for which explanations have been proposed in terms of non-minimal WIMPs, WIMPs endowed with extra interactions or properties. This is a vast literature, far too large to review here. We therefore provide here only a brief sampling, focusing primarily on those that we have worked on. \subsection{Enhanced relic abundance} The calculation above of the freezeout abundance is the standard one in which it is assumed that the Universe is radiation-dominated at $T_f \sim 10 - 100 \, \textrm{GeV}$. However, we have no empirical constraints to the expansion rate before big bang nucleosynthesis, which happens later, at $T_{BBN} \sim 1 \, \textrm{MeV}$. One can imagine other scenarios in which the WIMP abundance changes. For instance, suppose the pre-BBN Universe is filled with some exotic matter which has a stiff equation of state, $p_s = \rho_s$. This results in a scaling of the energy density of this stuff $\rho_s \propto a^{-6}$ with scale factor $a$ \cite{relicabundance}. Such an equation of state may arise if the energy density is dominated by the kinetic energy of some scalar field. The equation of motion of a scalar field with a flat potential is \begin{equation} \ddot{\varphi} + 3 H \dot{\varphi} = 0 \, \qquad \Longrightarrow \qquad\, \ln \dot{\varphi} \propto - 3 \ln a \end{equation} which means that \begin{equation} \rho = \frac{1}{2} \dot{\varphi}^2 \propto a^{-6} \, . \end{equation} A stiff equation of state, or something that behaves effectively like it, may also arise, for example, in scalar-tensor theories of gravity or if there is anisotropic expansion in the early Universe. Big-bang nucleosynthesis constrains the energy density of some new component of matter at a temperature $T\sim$MeV to be $(\rho_6/\rho_\gamma) \mathrel{\mathpalette\fun <} 0.1\, \left(T/\textrm{MeV}\right)^2$. Since $\rho_s/\rho_{\mathrm{rad}} \propto T^2$, the expansion rate with this new stiff matter will at earlier times be $H(T) \mathrel{\mathpalette\fun <} H_{\mathrm{st}}(T) \left(T/\textrm{MeV}\right)$, where $H_{\mathrm{st}}(T)$ is the standard expansion rate. Neglecting the logarithmic dependence of the freezeout temperature $T_f \propto \ln[H \rho_6 n_\gamma]$ on the expansion, the WIMP abundance with this new exotic matter will be \begin{equation} \frac{n_\chi}{n_\gamma} = \frac{1}{n_\gamma} \frac{\Gamma}{\sigma v} = \frac{1}{n_\gamma} \frac{H}{\sigma v} \mathrel{\mathpalette\fun <} \left( \frac{n_\chi}{n_\gamma} \right)_{st} \left( \frac{T}{\textrm{MeV}}\right) \sim \left( \frac{n_\chi}{n_\gamma} \right)_{st} \left( \frac{M_\chi/25}{\textrm{MeV}}\right). \end{equation} Thus, for example, the relic abundance of an $M_\chi \sim 150 \, \textrm{GeV}$ WIMP can be increased by as much as $\sim10^4$ in this way \cite{relicabundance,Profumo:2003hq}. \medskip \noindent {\sl Exercise 11. Show that anisotropic expansion gives rise to a Friedmann equation that looks like that for a Universe with a new component of matter with $\rho \propto a^{-6}$. To do so, consider a Universe with metric $ds^2=dt^2-[a_x(t)]^2 dx^2 -[a_y(t)]^2 dy^2-[a_z(t)]^2dz^2$, with $a_x(t)$, $a_y(t)$, and $a_z(t)$ different, and then derive the Friedmann equation for a Universe filled with homogeneous matter of density $\rho$.} \medskip \subsection{Kinetic decoupling} There are two different kinds of equilibrium for WIMPs in the primordial bath. One is chemical equilibrium, which is maintained by the reactions \begin{equation*} \chi \chi \leftrightarrow f \bar{f} \, ; \end{equation*} the other is kinetic equilibrium, maintained by the scattering \begin{equation*} \chi f \leftrightarrow \chi f \, . \end{equation*} The first reaction freezes out before the second, since $n_f \gg n_\chi$, where $f$ is any kind of light degree of freedom. However, $\sigma (\nu \chi \leftrightarrow \nu \chi) \propto E_\nu^2$ since the $\nu$'s are Yukawa coupled, and $\sigma (\gamma \chi \leftrightarrow \gamma \chi) \propto E_\gamma^2$ since the photons are coupled by $\varepsilon_{\mu \nu \rho \sigma} k^\mu k^\nu \varepsilon^\rho \varepsilon^\sigma$ \cite{Chen:2001jz}. This means that $\Gamma(\chi f \leftrightarrow \chi f)$ drops rapidly and so kinetic freezeout happens not too much later than chemical freezeout. Detailed calculations of the kinetic-decoupling temperature $T_{kd}$ show that $T_{kd}$ varies over 6 orders of magnitude in scans of the SUSY and UED parameter spaces \cite{Profumo}. During the time particles are chemically but not kinetically decoupled, they have the same temperature of the thermal bath, which scales as $T_\gamma \propto a^{-1}$, and after that, $T_\chi = p_\chi^2/2 M_\chi \propto a^{-2}$. So, density perturbations $\delta \rho_\chi/\rho_\chi$ are suppressed on $\lambda_{phys} \sim H^{-1}$ while the WIMPs are kinetically coupled. The cutoff in the power spectrum $P(k)$ is at physical wavenumber $k_c = H(T_{kd})$, so if $T_{kd}$ decreases, also $k_c$ decreases. We expect power suppressed at mass scales $M < M_c$, where $M_c \sim 10^{-4} - 10^2 M_\oplus$ is the mass enclosed in the horizon at $T_{kd}$, as shown in Fig.~\ref{fig:kcut} \cite{Profumo}. \medskip \noindent {\sl Exercise 12. Derive the mass $M_{kd}$ enclosed within the horizon at a temperature $T_{kd}$.} \medskip \begin{figure}[h] \centering \includegraphics[scale=0.7]{kcut} \caption{The wavenumber and mass scale at which the primordial power spectrum is cut off due to kinetic decoupling of WIMPs in supersymmetric and UED models for WIMPs. From Ref.~\protect\cite{Profumo}.} \label{fig:kcut} \end{figure} \subsection{ Particle Decay and Suppression of Small Scale Power} It might be the case that dark matter is produced by the decay of a metastable particle that was once in kinetic equilibrium with the thermal bath. For instance, although the dark matter cannot be a charged particle it might be produced by the decay of a charged particle. The growth of perturbation modes that enter the horizon prior to the decay of the charged particle will be suppressed relative to the standard case due to the coupling to the thermal bath: growth of charged-particle density perturbations is suppressed since charged particles cannot move through the baryon-photon fluid. If one has $\chi^+ \to \chi^0 + e^+$, with $\tau \sim 3.5 \textrm{yr}$ ($z \sim 10^7$), then the matter power spectrum $P(k)$ is suppressed on $k \mathrel{\mathpalette\fun >} \textrm{Mpc}^{-1}$ \cite{Sigurdson:2003vy}, while for shorter lifetimes structure will be suppressed for larger $k$ (smaller length scales). Models exhibiting charged-particle decay can be found in the parameter space of standard or minimal extensions of canonical WIMP (e.g., supersymmetric) scenarios \cite{Profumo:2004qt}. While limits on energy injection and the formation of exotic bound states in big bang nucleosynthesis (BBN) constrain the fraction of the Universe bound up in charged particles \cite{bbncpd} the suppression of power due to particle decay in the Universe remains a potentially observable effect of metastable particles. It is possible the metastable particle might remain in kinetic equilibrium via another interaction, or even if the particle is out of kinetic equilibrium the energy released in the decay process may impart the dark-matter particle with a velocity high enough to erase small-scale structure via free streaming \cite{sssdecay}. Future measurements of high-redshift cosmic 21-cm fluctuations may provide a direct probe of modifications to the small-scale dark-matter power spectrum and other aspects of fundamental physics (see, e.g., \cite{Profumo:2004qt,cosmic21cm}). \medskip \noindent {\sl Exercise 13. Derive the comoving wavenumber $k$ that enters the horizon at the time a particle of lifetime $\tau$ decays.} \medskip \subsection{Dipole dark matter} \begin{figure}[h] \centering \includegraphics[scale=0.5]{dipole} \caption{Constraints to the dipole-mass parameter space for dark matter with an electric or magnetic dipole. From Ref.~\protect\cite{Sigurdson:2004zp}.} \label{fig:dipole} \end{figure} While dark matter cannot be a charged particle it may (via higher order interactions) be endowed with an electric or magnetic dipole moment interactions of the form \cite{Sigurdson:2004zp,Profumo:2006im}, \begin{equation} \L_{\textrm{dipole}} \propto \bar{\chi_i} \sigma_{\mu\nu}\left(\mu_{ij} + \gamma_5 {\cal D}_{ij}\right)\chi_j F^{\mu\nu}, \end{equation} Here, diagonal interaction terms ($i=j$) are the magnetic ($\mu$) or electric (${\cal D}$) dipole moments of a particle $\chi$, while off-diagonal terms ($i\neq j$) are referred to as transition moments between the lightest WIMP state $i$ and another, slightly heavier, WIMP state $j$. Such a dipole coupling to photons alters the evolution of dark-matter density perturbations and CMB anisotropies \cite{Sigurdson:2004zp}, although the strongest constraints to dipole moments comes from precision tests of the Standard Model for WIMP masses $M_\chi \mathrel{\mathpalette\fun <} 10$ GeV and direct-detection experiments for $M_\chi \mathrel{\mathpalette\fun >} 10$ GeV \cite{Sigurdson:2004zp,Masso:2009mu}; see Fig.~\ref{fig:dipole} for the full constraints. It may be possible to explain the results of the DAMA experiment using low-mass dipolar dark matter with a transition moment \cite{Masso:2009mu}. It may also be possible to look for the effects of a transition dipole moment in the absorption of high energy photons from distant sources \cite{Profumo:2006im}. \medskip \noindent {\sl Exercise 14. Calculate the cross section for elastic scattering of a particle with an electric dipole moment of magnitude $d$ from a nucleus with charge $Ze$.} \medskip \subsection{Gravitational constraints} It is generally assumed that while dark matter may involve new physics, the gravitational interactions of the dark matter are standard. In other words, it is generally assumed that the gravitational force between two DM particles and between a dark-matter particle and a baryon is the same as that between two baryons. More precisely, the Newtonian gravitational force law between baryons that has been tested in the laboratory and in the Solar System reads $F_{b_1 b_2} = G m_1 m_2/d^2$. We then usually assume that the force between baryons and DM is $F_{bd} = G m_b m_d/d^2$, and also that the gravitational DM-DM force law is $F_{d_1 d_1} = G_d m_{d_1} m_{d_2}/d^2$ with $G_d=G$. However, there is no empirical evidence that this is true at more than the order-unity level \cite{Gradwohl92}, and it has even been postulated that $G_d =2 G$ in order to account for the void abundance \cite{Gubser:2004uh}. A similar behavior (an increase in the DM-DM force law) could also arise if there were a new long-range interaction mediated by a nearly massless scalar field $\varphi$ with Yukawa interactions $\varphi \bar{\psi} \psi$ with the DM field $\psi$. The difficulty in providing empirical constraints to this model is that measurements (e.g., gravitational lensing or stellar/galactic dynamics) of the dark-matter distribution determine only the gravitational potential $\Phi$ due to the dark-matter distribution, represented by some density $\rho_d(\vec r)$, obtained through the Poisson equation $\nabla^2 \Phi = 4 \pi G \rho_d$. However, the same $\Phi$ can be obtained by replacing $\rho_d \to (1/2)\rho_d$ if we simultaneously replace $G \to 2 G$. It turns out, though, that this exotic interaction can be constrained by looking at substructures in the Milky Way halo~\cite{Kesden1,Kesden2}. The Sagittarius dwarf galaxy, is dark-matter dominated, and it follows an elongated orbit around the Milky Way. When the dwarf reaches its point of closest approach to the Milky Way, the tidal forces it experiences in the Milky Way potential are largest. Stars are then stripped from the innermost and furthermost edge of the dwarf. Those from the innermost parts move at slightly larger velocities in the Galactic halo and at slightly smaller Galactocentric radii; they thus subsequently run ahead of the Sagittarius dwarf and form the leading tidal tail of the Sagittarius dwarf that is observed. Conversely, those stripped from the outer edge subsequently lag behind forming the trailing tidal tail that is observed. Observationally, the leading and trailing tails have roughly the same brightness, as expected. Suppose now that the DM-DM force law were modified to $G_d = f G$ with $f>1$. The dark-matter halo of the Sagittarius dwarf would then be accelerated toward the Milky Way center more strongly than the stellar component of the Sagittarius dwarf. The stellar component would then slosh to the furthermost edge. Then, when the dwarf reaches its point of closest approach to the Milky Way, stars are still stripped from the outer edge, forming a trailing tail. However, there are now no stars in the innermost edge to form the leading tail. The evacuation of stars from the leading tail is inconsistent with observations, and this leads, with detailed calculations, to a bound $G_{d} = G \left( 1 \pm 0.1 \right)$ to Newton's constant for DM-DM interactions. In other words, dark matter and ordinary matter fall the same way, to within 10\%, in a gravitational potential well. While Ref.~\cite{Peebles:2009th} has more recently claimed to run a simulation of the tidal tails of the Sagittarius dwarf consistent with $G_d=2G$, Ref.~\cite{Kesden:2009bb} has argued that the initial conditions for that simulation are self-inconsistent. Refs.~\cite{Carroll:2008ub,Carroll:2009dw} argue that a new long-range DM-DM force law implies, under fairly general conditions, a weaker long-range DM-baryon force law, and they discuss and compare possible tests of such a scenario. \subsection{Electromagnetic-like interactions for dark matter?} Another possibility is that dark matter experiences long-range electromagnetic-like forces mediated by a dark massless photon that couples only to gravity. Of course, if the fine-structure constant $\alpha_d$ associated with this dark $U(1)$ symmetry is too large, then long-range dark forces will induce the dark matter to be effectively collisional. This constrains $\alpha_d \mathrel{\mathpalette\fun <} 0.005\, (M_\chi/{\mathrm{TeV}})^{3/2}$ \cite{Ackerman:2008gi}. Far more restrictive constraints may arise from the development of plasma instabilities that may arise if there are (dark) positively and negatively charged dark-matter particles, but precise calculations of these effects remain to be done. See Refs.~\cite{Ackerman:2008gi,Feng:2009mn} for more discussion of these models. \medskip \noindent {\sl Exercise 15. Estimate the relic abundance of a dark-matter particle with dark charge $\alpha_d$ assuming that it annihilates to dark-photon pairs and assuming that the dark sector has the same temperature as the rest of the primordial plasma.} \medskip \section{Some other particle dark-matter candidates} WIMP models are interesting for a number of reasons: (1) The correct relic density arises naturally if there is new physics at the electroweak scale; (2) there are good prospects for detection of these particles, if they are indeed the dark matter; and (3) there is synergy with the goals of accelerator searches (especially at the LHC) for new electroweak-scale physics. Still, there are a large number of other particle candidates for dark matter. Here we discuss two, the sterile neutrino and the axion, which may also arise in extensions of the standard model and for which there are clear paths toward detection if they make up the dark matter. \subsection{Sterile Neutrinos} A convenient mechanism to introduce neutrino masses and explain their smallness by a minimal extension of the Standard Model is to add 3 right-handed neutrinos which are singlets under the SM gauge group. The mass matrix is taken to be of the form (for simplicity we consider only one family), \begin{equation} \begin{split} &\begin{matrix} &\nu_L & \nu_R \end{matrix} \\ \begin{matrix} \nu_L \\ \nu_R \end{matrix} \Bigg( & \begin{matrix} &0 & M_D \\ &M_D & M \end{matrix} \Bigg), \end{split} \end{equation} where the $\nu_L$ and $\nu_R$ are left-handed and right-handed (weakly-interacting and sterile, respectively) fields. In the see-saw mechanism, the Dirac mass is assumed to be tiny compared with the Majorana mass: i.e., $M_D \ll M$. The mass eigenstates then have masses $M_1 \simeq M_D^2/M \ll M$, and $M_2\simeq M$. For our purposes, it is advantageous to map the two-dimensional $M_D$-$M$ parameter space onto the $M_s$-$\theta$ parameter space, where $M_s$ is the mass of the sterile (heavier) neutrino and $\theta$ is the mixing angle between the two states. The active and sterile mass eigenstates can then be written \begin{align} & \ket{\nu_a} = \cos \theta \ket{\nu_L} + \sin \theta \ket{\nu_R}, \\ & \ket{\nu_s} = - \sin \theta \ket{\nu_L} + \cos \theta \ket{\nu_R}, \end{align} where $\theta =M_D/M$. Sterile neutrinos can be produced in the early Universe and have both (1) a lifetime longer than the age of the Universe and (2) a cosmological density $\Omega_s\sim 0.2$ if the sterile-neutrino mass is in the $\sim$keV regime \cite{Dodelson:1993je}. The main decay mode of the sterile neutrino is then $\nu_S \to \nu \nu \bar{\nu}$, through the exchange of a $Z^0$ boson, as shown in Fig.~\ref{fig:steriledecay}. The decay rate and lifetime are \begin{equation} \Gamma = \frac{G_F^2 M_S^5}{96 \pi^3} \theta^2 \qquad \Rightarrow \qquad \tau_S = \frac{\hbar}{\Gamma} \sim 10^{20} \, \sec \left( \frac{M_S}{\textrm{keV}} \right)^5 \theta^{-2}. \end{equation} \begin{figure}[ht] \centering \includegraphics[scale=1.2]{sterile} \caption{Main decay channel for sterile neutrinos.} \label{fig:steriledecay} \end{figure} If the sterile neutrinos constitute the dark matter, then it must be that $\tau_S \gg 10^{17} \, \sec$, which is possible if $M_S \sim O(1) \, \textrm{keV}$. This mass cannot however be too small, because of the Gunn-Tremaine limit from dwarf-spheroidal galaxies, which is $M_S \mathrel{\mathpalette\fun >} 0.3 \, \textrm{keV}$. A stronger constraint to the model comes from the X-ray emission in the radiative decay $\nu_S \to \nu \gamma$, through the diagram in Fig.~\ref{fig:sterileloop}. This produces an x-ray line that can be sought in the spectrum of, e.g., a galaxy cluster. While null searches for such lines (and from the diffuse cosmic x-ray background) provide \cite{Xray1,Xray2} stringent constraints to the model, there are still some regions in the $M_s$-$\theta$ parameter space that remain consistent with current constraints. This region may be probed, however, with future more sensitive x-ray searches. One interesting extended application of sterile neutrino dark matter was its use as a potential mechanism for generating momentum-anisotropy during supernova to drive pulsar kicks \cite{Kusenko04}. See, for instance, Refs.~\cite{Shaposhnikov07,Kusenko:2009up}, for the current status of sterile neutrino dark matter. \begin{figure}[ht] \centering \includegraphics[scale=1.2]{sterileloop} \caption{Loop diagram for the decay $\nu_s \to \nu \gamma$.} \label{fig:sterileloop} \end{figure} \subsection{Axions} Axions arise in the Peccei-Quinn (PQ) solution to the strong-$CP$ problem \cite{PQ}. A global $U(1)_{PQ}$ symmetry is spontaneously broken at a scale $f_a$, and the CP-violating phase $\theta$ in the QCD Lagrangian becomes a dynamical field with a flat potential. At temperatures below the QCD phase transition, nonperturbative quantum effects break explicitly the symmetry and produce a non-flat potential that is minimized at $\theta\rightarrow 0$. The axion is the pseudo-Nambu-Goldstone boson of this near-global symmetry, the particle associated with excitations about the minimum at $\theta=0$. The axion mass is $m_a \simeq\, {\rm eV}\,(10^7\, {\rm GeV}/ f_a)$, and its coupling to ordinary matter is $\propto f_a^{-1}$. The Peccei-Quinn solution works equally well for any value of $f_a$. However, a variety of astrophysical observations and laboratory experiments constrain the axion mass to be $m_a\sim10^{-4}$ eV. Smaller masses would lead to an unacceptably large cosmological abundance. Larger masses are ruled out by a combination of constraints from supernova 1987A, globular clusters, laboratory experiments, and a search for two-photon decays of relic axions. Curiously enough, if the axion mass is in the relatively small viable range, the relic density is $\Omega_a\sim1$, and so the axion may account for the halo dark matter. Such axions would be produced with zero momentum by a misalignment mechanism in the early Universe and therefore act as cold dark matter. During the process of galaxy formation, these axions would fall into the Galactic potential well and would therefore be present in our halo with a velocity dispersion near 270 km~sec$^{-1}$. It has been noted that quantum gravity is generically expected to violate global symmetries, and unless these Planck-scale effects can be suppressed by a huge factor, the Peccei-Quinn mechanism may be invalidated \cite{gravity}. Of course, we have at this point no predictive theory of quantum gravity, and several mechanisms for forbidding these global-symmetry violating terms have been proposed \cite{solutions}. Therefore, discovery of an axion might provide much needed clues to the nature of Planck-scale physics. There is a very weak coupling of an axion to photons through the triangle anomaly, a coupling mediated by the exchange of virtual quarks and leptons. The axion can therefore decay to two photons, but the lifetime is $\tau_{a\rightarrow \gamma\gamma} \sim 10^{50}\, {\rm s}\, (m_a / 10^{-5}\, {\rm eV})^{-5}$ which is huge compared to the lifetime of the Universe and therefore unobservable. However, the $a\gamma\gamma$ term in the Lagrangian is ${\cal L}_{a\gamma\gamma} \propto a {\vec E} \cdot {\vec B}$ where ${\vec E}$ and ${\vec B}$ are the electric and magnetic field strengths. Therefore, if one immerses a resonant cavity in a strong magnetic field, Galactic axions that pass through the detector may be converted to fundamental excitations of the cavity, and these may be observable \cite{sikivie}. Such an experiment is currently underway \cite{axionexperiments} and has already begun to probe part of the cosmologically interesting parameter space (see the Figure in Ref.~\cite{karlles}), and it should cover most of the interesting region parameter space in the next few years. Axions, or other light pseudoscalar particles, may show up astrophysically or experimentally in other ways. For example, the PVLAS Collaboration \cite{pvlas} reported the observation of an anomalously large rotation of the linear polarization of a laser when passed through a strong magnetic field. Such a rotation is expected in quantum electrodynamics, but the magnitude they reported was in excess of this expectation. One possible explanation is a coupling of the pseudoscalar $F \tilde F$ of electromagnetism to a low-mass axion-like pseudoscalar field. The region of the mass-coupling parameter space implied by this experiment violates limits for axions from astrophysical constraints, but there may be nonminimal models that can accommodate those constraints. Ref. \cite{kris} reviews the theoretical interpretation and shows how the interactions of axions and other axion-like particles may be tested with x-ray re-appearance experiments. While the original PVLAS results have now been called into question Ref.~\cite{Chou:2007zzc}, variations of the model may still be worth investigating. \section{Conclusions} Here we have reviewed briefly the basic astrophysical evidence for dark matter, some simple astrophysical constraints to its physical properties, and the canonical WIMP model for dark matter. We then discussed a number of variations of the canonical model, as well as some alternative particle dark-matter candidates. Still, we have only scratched the surface here, surveying only a small fraction of the possibilities for non-minimal dark matter. Readers who are interested in learning more are encouraged to browse the recent literature, where they will find an almost endless flow of interesting possibilities for dark matter, beyond those we have reviewed here. \bigskip \begin{acknowledgement} We thank Sabino Matarrese for initiating this collaboration during the Como summer school at which these lectures were given. We also thank the Aspen Center for Physics, where part of this review was completed. This work was supported at Caltech by DoE DE-FG03-92-ER40701 and the Gordon and Betty Moore Foundation, and at the University of British Columbia by a NSERC of Canada Discovery Grant. \end{acknowledgement}
1,116,691,501,319
arxiv
\section{Introduction} The field of quantum computing has undergone an explosion of activity over the past few years. Several important quantum algorithms are now known. Moreover, prototypical quantum computers have already been built using nuclear magnetic resonance [1, 2] and nonlinear optics technologies [3]. Such devices are far from being general-purpose computers. Nevertheless, they constitute significant milestones along the road to practical quantum computing. A quantum computer is a physical device whose natural evolution over time can be interpreted as the execution of a useful computation. The basic element of a quantum computer is the quantum bit or "qubit", implemented physically as the state of some convenient 2-state quantum system such as the spin of an electron. Whereas a classical bit must be either a 0 or a 1 at any instant, a qubit is allowed to be an arbitrary superposition of a 0 and a 1 simultaneously. To make a quantum memory register we simply consider the simultaneous state of (possibly entangled) tuples of qubits. The state of a quantum memory register, or any other isolated quantum system, evolves in time according to some unitary operator. Hence, if the evolved state of a quantum memory register is interpreted as having implemented some computation, that computation must be describable as a unitary operator. If the quantum memory register consists of $n$ qubits, this operator will be represented, mathematically, as some $2^n \times 2^n$ dimensional unitary matrix. Several quantum algorithms are now known, the most famous examples being Deutsch and Jozsa's algorithm for deciding whether a function is even or balanced [4], Shor's algorithm for factoring a composite integer [5] and Grover's algorithm for finding an item in an unstructured database [6]. However, the field is growing rapidly and new quantum algorithms are being discovered every year. Some recent examples include Brassard, Hoyer, and Tapp's quantum algorithm for counting the number of solutions to a problem [7], Cerf, Grover and Williams quantum algorithm for solving NP-complete problems by nesting one quantum search within another [8] and van Dam, Hoyer, and Tapp's algorithm for distributed quantum computing [9]. The fact that quantum algorithms are describable in terms of unitary transformations is both good news and bad for quantum computing. The good news is that knowing that a quantum computer must perform a unitary transformation allows theorems to be proved about the tasks that quantum computers can and cannot do. For example, Zalka has proved that Grover's algorithm is optimal [10]. Aharonov, Kitaev, and Nisan have proved that a quantum algorithm that involves intermediate measurements is no more powerful than one that postpones all measurements until the end of the unitary evolution stage [11]. Both these proofs rely upon quantum algorithms being unitary transformations. On the other hand, the bad news is that many computations that we would like to perform are not originally described in terms of unitary operators. For example, a desired computation might be nonlinear, irreversible or both nonlinear and irreversible. As a unitary transformation must be linear and reversible we might need to be quite creative in encoding a desired computation on a quantum computer. Irreversibility can be handled by incorporating extra "ancilla" qubits that permit us to remember the input corresponding to each output. But nonlinear transformations are still problematic. Fortunately, there is an important class of computations, the unitary transforms, such as the Fourier transform, Walsh-Hadamard transform and assorted wavelet transforms, that are describable, naturally, in terms of unitary operators. Of these, the Fourier and Walsh-Hadamard transforms have been the ones studied most extensively by the quantum computing community. In fact, the quantum Fourier transform (QFT) is now recognized as being pivotal in many known quantum algorithms [12]. The quantum Walsh-Hadamard transform is a critical component of both Shor's algorithm [5] and Grover's algorithm [6]. However, the wavelet transforms are every bit as useful as the Fourier transform, at least in the context of classical computing. For example, wavelet transforms are particularly suited to exposing the multi-scale structure of a signal. They are likely to be useful for quantum image processing and quantum data compression. It is natural therefore to consider how to achieve a quantum wavelet transform. Starting with the unitary operator for the wavelet transform, the next step in the process of finding a quantum circuit that implements it, is to factor the wavelet operator into the direct sum, direct product and dot product of smaller unitary operators. These operators correspond to 1-qubit and 2-qubit quantum gates. For such a circuit to be physically realizable, the number of gates within it must be bounded above by a polynomial in the number of qubits, $n$. Finding such a factorization can be extremely challenging. For example, although there are known algebraic techniques for factoring an arbitrary $2^n \times 2^n$ operator, e.g. [13], they are guaranteed to produce $O(2^n)$, i.e., exponentially many, terms in the factorization. Hence, although such a factorization is mathematically valid, it is physically unrealizable because, when treated as a quantum circuit design, would require too many quantum gates. Indeed, Knill has {\it proved} that an arbitrary unitary matrix will require exponentially many quantum gates if we restrict ourselves to using only gates that correspond to all 1-qubit rotations and XOR [14]. It is therefore clear that the key enabling factor for achieving an efficient quantum implementation, i.e., with a polynomial time and space complexity, is to exploit the specific structure of the given unitary operator. Perhaps the most striking example of the potential for achieving compact and efficient quantum circuits is the case of the Walsh-Hadamard transform. In quantum computing, this transform arises whenever a quantum register is loaded with all integers in the range 0 to $2^n-1$. Classically, application of the Walsh-Hadamard transform on a vector of length $2^n$ involves a complexity of $O(2^n)$. Yet, by exploiting the factorization of the Walsh-Hadamard operator in terms of the Kroenecker product, it can implemented with a complexity of $O(1)$ by $n$ identical 1-qubit quantum gates. Likewise, the classical FFT algorithm has been found to be implementable in a polynomial space and time complexity, quantum circuit [15] (see also Sec. 2.3). However, exploitation of the operator structure arising in the wavelet transforms (and perhaps other unitary transforms) is more challenging. A key technique, in classical computing, for exposing and exploiting specific structure of a given unitary transform is the use of permutation matrices. In fact, there is an extensive literature in classical computing on the use of permutation matrices for factorizing unitary transforms into simpler forms that enable efficient implementations to be devised (see, for example, [16] and [17]). However, the underlying assumption in using the permutation matrices in classical computation is that they can be implemented easily and inexpensively. Indeed, they are considered so trivial that the cost of their implementation is often not included in the complexity analysis. This is because any permutation matrix can be described by its effect on the ordering of the elements of a vector. Hence, it can simply be implemented by re-ordering the elements of the vector involving only data movement and without performing any arithmetic operations. As is shown in this paper, the permutation matrices also play a pivotal role in the factorization of the unitary operators that arise in the wavelet transforms. However, unlike the classical computing, the cost of implementation of the permutation matrices cannot be neglected in quantum computing. Indeed, the main issue in deriving feasible and efficient quantum circuits for the quantum wavelet transforms considered in this paper, is the design of efficient quantum circuits for certain permutation matrices. Note that, any permutation matrix acting on $n$ qubits can mathematically be represented by a $2^n \times 2^n$ unitary operator. Hence, it is possible to factor any permutation matrix by using general techniques such as [13] but this would lead to an exponential time and space complexity. However, the permutation matrices, due to their specific structure (i.e., sparsity pattern), represents a very special subclass of unitary matrices. Therefore, the key to achieve an efficient quantum implementation of permutation matrices is the exploitation of this specific structure. In this paper, we first develop efficient quantum circuits for a set of permutation matrices arising in the development of the quantum wavelet transforms (and the quantum Fourier transform). We propose three techniques for an efficient quantum implementation of permutation matrices, depending on the permutation matrix considered. In the first technique, we show that a certain class of permutation matrices, designated as {\it qubit permutation matrices}, can directly be described by their effect on the ordering of qubits. This quantum description is very similar to classical description of the permutation matrices. We show that the {\it Perfect Shuffle} permutation matrix, designated as $\Pi_{2^n}$, and the {\it Bit Reversal} permutation matrix, designated as $P_{2^n}$, which arise in the quantum wavelet and Fourier transforms (as well as in many other classical computations) belong to this class. We present a new gate, designated as the {\it qubit swap gate} or $\Pi_4$, which can be used to directly derive efficient quantum circuits for implementation of the qubit permutation matrices. Interestingly, such circuits for quantum implementation of $\Pi_{2^n}$ and $P_{2^n}$ lead to new factorizations of these two permutation matrices which were not previously know in classical computation. A second technique is based on a {\it quantum arithmetic description} of permutation matrices. In particular, we consider the {\it downshift} permutation matrix, designated as $Q_{2^n}$, which plays a major role in derivation of quantum wavelet transforms and also frequently arises in many classical computations [16]. We show that a quantum description of $Q_{2^n}$ can be given as a {\it quantum arithmetic operator}. This description then allows the quantum implementation of $Q_{2^n}$ by using the quantum arithmetic circuits proposed in [18]. A third technique is based on developing totally new factorizations of the permutation matrices. This technique is the most case dependent, challenging, and even counterintuitive (from a classical computing point of view). For this technique, we again consider the permutation matrix $Q_{2^n}$ and we show that it can be factored in terms of FFT which then allows its implementation by using the circuits for QFT. More interestingly, however, we derive a recursive factorization of $Q_{2^n}$ which was not previously known in classical computation. This new factorization enables a direct and efficient implementation of $Q_{2^n}$. Our analysis of though a limited set of permutation matrices reveals some of the surprises of quantum computing in contrast to classical computing. That is, certain operations that are hard to implement in classical computing are much easier to implement on quantum computing and vice versa. As a specific example, while the classical implementation of $\Pi_{2^n}$ and $P_{2^n}$ are much harder (in terms of the data movement pattern) than $Q_{2^n}$, their quantum implementation is much easier and more straightforward than $Q_{2^n}$. Given a wavelet kernel, its application is usually performed according to the packet or pyramid algorithms. Efficient quantum implementation of theses two algorithms requires efficient circuits for operators of the form $I_{2^{n-i}} \otimes \Pi_{2^i}$ and $\Pi_{2^i} \oplus I_{2^n - 2^i}$, for some $i$, where $\otimes$ and $\oplus$ designate, respectively, the kronecker product and the direct sum operator. We show that these operators can be efficiently implemented by using our proposed circuits for implementation of $\Pi_{2^i}$. We then consider two representative wavelet kernels, the Haar [17] and Daubechies $D^{(4)}$ [19] wavelets which have previously been considered by Hoyer [20]. For the Haar wavelet, we show that Hoyer's proposed solution is incomplete since it does not lead to a gate-level circuit and, consequently, it does not allow the analysis of time and space complexity. We propose a scheme for design of a complete gate-level circuit for the Haar wavelet and analyze its time and space complexity. For the Daubechies $D^{(4)}$ wavelet, we develop three new factorizations which lead to three gate-level circuits for its implementation. Interestingly, one of this factorization allows efficient implementation of Daubechies $D^{(4)}$ wavelets by using the circuit for QFT. \clearpage \section{Efficient Quantum Circuits for two Fundamental Qubits Permutation Matrices: Perfect Shuffle and Bit-Reversal} In this section, we develop quantum circuits for two fundamental permutation matrices, the perfect shuffle, $\Pi_{2^n}$, and the bit reversal, $P_{2^n}$, permutation matrices, which arise in quantum wavelet and Fourier transforms as well as many classical computations involving unitary transforms for signal and image processing [16]. For quantum computing, these two permutation matrices can directly be described in terms of their effect on ordering of qubits. This enables the design of efficient circuits for their implementation. Interestingly, these circuits lead to the discovery of new factorizations for these two permutation matrices. \subsection{Perfect Shuffle Permutation Matrices} A classical description of $\Pi_{2^n}$ can be given by describing its effect on a given vector. If $Z$ is a $2^n$-dimensional vector, then the vector $Y = \Pi_{2^n}Z$ is obtained by splitting $Z$ in half and then shuffling the top and bottom halves of the deck. Alternatively, a description of the matrix $\Pi_{2^n}$, in terms of its elements $\Pi_{ij}$, for $i$ and $j = 0, 1, \cdots, 2^n-1$, can be given as \begin{equation} \Pi_{ij} = \left\{ \begin{array}{ll} 1 & \mbox{ if $j = i/2$ and $i$ is even, or if $j = (i - 1)/2 +2^{n-1}$ and $i$ is odd} \\ 0 & \mbox{ otherwise} \end{array} \right. \end{equation} As first noted by Hoyer [20], a quantum description of $\Pi_{2^n}$ can be given by \begin{equation} \Pi_{2^n}: \, \vert a_{n-1} \, a_{n-2} \, \cdots \, a_1 \, a_0 \rangle \, \longmapsto \vert a_0 \, a_{n-1} \, a_{n-2} \, \cdots a_1 \rangle \end{equation} That is, for quantum computation, $\Pi_{2^n}$ is the operator which performs the left qubit-shift operation on $n$ qubits. Note that, $\Pi_{2^n}^t$ ($t$ indicates the transpose) performs the right qubit-shift operation, i.e., \begin{equation} \Pi_{2^n}^t: \, \vert a_{n-1} \, a_{n-2} \, \cdots a_1 \, a_0 \rangle \, \longmapsto \vert a_{n-2} \, \cdots a_1 \, a_0 \, a_{n-1} \rangle \end{equation} \subsection{Bit-Reversal Permutation Matrices} A classical description of $P_{2^n}$ can be given by describing its effect on a given vector. If $Z$ is a $2^n$-dimensional vector and $Y = P_{2^n}Z$, then $Y_i = Z_j$, for $i = 0, 1, \cdots, 2^n-1$, wherein $j$ is obtained by reversing the bits in the binary representation of index $i$. Therefore, a description of the matrix $P_{2^n}$, in terms of its elements $P_{ij}$, for $i$ and $j = 0, 1, \cdots, 2^n-1$, is given as \begin{equation} P_{ij} = \left\{ \begin{array}{cc} 1 & \mbox{if $j$ is bit reversal of $i$} \\ 0 & \mbox{otherwise} \end{array} \right. \end{equation} A factorization of $P_{2^n}$ in terms of $\Pi_{2^i}$ is given as [16] \begin{equation} P_{2^n} = \Pi_{2^n}(I_2 \otimes \Pi_{2^{n-1}}) \cdots (I_{2^i} \otimes \Pi_{2^{n-i}}) \, \cdots (I_{2^{n-3}} \otimes \Pi_8)(I_{2^{n-2}} \otimes \Pi_4) \end{equation} A quantum description of $P_{2^n}$ is given as \begin{equation} P_{2^n}: \, \vert a_{n-1} \, a_{n-2}, \cdots a_1 \, a_0 \rangle \, \longmapsto \vert a_0 \, a_1 \, \cdots a_{n-2} \, a_{n-1} \rangle \end{equation} That is, $P_{2^n}$ is the operator which reverses the order of $n$ qubits. This quantum description can be seen from the factorization of $P_{2^n}$, given by (5), and quantum description of permutation matrices $\Pi_{2^i}$. It is interesting to note that for classical computation the term "bit-reversal" refers to reversing the bits in the binary representation of index of the elements of a vector while, for quantum computation, the matrix $P_{2^n}$ literally performs a reversal of the order of qubits. Note that, $P_{2^n}$ is symmetric, i.e., $P_{2^n} = P_{2^n}^t$ [16]. This can be also easily proved based on the quantum description of $P_{2^n}$ since if the qubits are reversed twice then the original ordering of the qubits is restored. This implies that, $P_{2^n}P_{2^n} = I_{2^n}$ and since $P_{2^n}$ is orthogonal, i.e., $P_{2^n}P_{2^n}^t = I_{2^n}$, it then follows that $P_{2^n} = P_{2^n}^t$. \subsection{Quantum FFT and Bit-Reversal Permutation Matrix} Here, we review the quantum FFT algorithm since it not only arises in derivation of the quantum wavelet transforms (see Sec. 4.3) but also it represents a case in which the roles of permutation matrices $\Pi_{2^n}$ and $P_{2^n}$ seems to have been overlooked in quantum computing literature. The classical Cooley-Tukey FFT factorization for a $2^n$-dimensional vector is given by [16] \begin{equation} F_{2^n} = A_n A_{n-1} \cdots A_1 P_{2^n} = {\underline F}_{2^n} P_{2^n} \end{equation} where $A_i = I_{2^{n-i}} \otimes B_{2^i}$, $B_{2^i} = \frac {1} {\sqrt {2}} \left( \begin{array}{cc} I_{2^{i-1}} & \Omega_{2^{i-1}} \\ I_{2^{i-1}} & - \Omega_{2^{i-1}} \end{array} \right) $ and $\Omega_{2^{i-1}} = \mbox {Diag} \{1, \, \omega_{2^i}, \, \omega_{2^i}^2, \, \ldots , \omega_{2^i}^{2^{i-1} -1} \}$ with $\omega_{2^i} = e^{-{2 \iota \pi} \over {2^i}}$ and $\iota = \sqrt {-1}$. We have that $F_2 = W = \frac {1} {\sqrt {2}} \left( \begin{array}{cc} 1 & 1 \\ 1 & - 1 \end{array} \right) $. The operator \begin{equation} {\underline F}_{2^n} = A_n A_{n-1} \cdots A_1 \end{equation} represents the computational kernel of Cooley-Tukey FFT while $P_{2^n}$ represents the permutation which needs to be performed on the elements of the input vector before feeding that vector into the computational kernel. Note that, the presence of $P_{2^n}$ in (7) is due to the accumulation of its factors, i.e., the terms $(I_{2^i} \otimes \Pi_{2^{n-i}})$, as given by (5). The Gentleman-Sande FFT factorization is obtained by exploiting the symmetry of $F_{2^n}$ and transposing the Cooley-Tukey factorization [16] leading to \begin{equation} F_{2^n} = P_{2^n} A_1^t \cdots A_{n-1}^t A_n^t = P_{2^n} {\underline F}_{2^n}^t \end{equation} where \begin{equation} {\underline F}_{2^n}^t = A_1^t \cdots A_{n-1}^t A_n^t \end{equation} represents the computational kernel of the Gentleman-Sande FFT while $P_{2^n}$ represents the permutation which needs to be performed to obtain the elements of the output vector in the correct order. In [15] a quantum circuit for the implementation of ${\underline F}_{2^n}$, given by (8), is presented by developing a factorization of the operators $B_{2^i}$ as \begin{equation} B_{2^i} = \frac {1} {\sqrt {2}} \left( \begin{array}{cc} I_{2^{i-1}} & \Omega_{2^{i-1}} \\ I_{2^{i-1}} & -\Omega_{2^{i-1}} \end{array} \right) = \frac {1} {\sqrt {2}} \left( \begin{array}{cc} I_{2^{i-1}} & I_{2^{i-1}} \\ I_{2^{i-1}} & - I_{2^{i-1}} \end{array} \right) \left( \begin{array}{cc} I_{2^{i-1}} & 0 \\ 0 & \Omega_{2^{i-1}} \end{array} \right) \end{equation} Let $C_{2^i} = \left( \begin{array}{cc} I_{2^{i-1}} & 0 \\ 0 & \Omega_{2^{i-1}} \end{array} \right) $. It then follows that \begin{equation} B_{2^i} = (W \otimes I_{2^{i-1}}) C_{2^i} \end{equation} \begin{equation} A_i = I_{2^{n-i}} \otimes B_{2^i} = (I_{2^{n-i}} \otimes W \otimes I_{2^{i-1}})(I_{2^{n-i}} \otimes C_{2^i}) \end{equation} In [15] a factorization of the operators $C_{2^i}$ is developed as \begin{equation} C_{2^i} = \theta_{n-1, n-i}\theta_{n-2, n-i} \cdots \theta_{n-i+1, n-i} \end{equation} where $\theta_{jk}$ is a two-bit gate acting on $j$th and $k$th qubits. Using (13)-(14) a circuit for implementation of (8) is developed in [15] and presented in Fig. 1. However, there is an error in the corresponding figure in [15] since it implies that, with a correct ordering of the input qubits, the output qubits are obtained in a reverse order. Note that, as can be seen from (7), the operator ${\underline F}_{2^n}$ performs the FFT operation and provides the output qubits in a correct order if the input qubits are presented in a reverse order. The quantum circuit for Gentleman-Sande FFT can be obtained from the circuit of Fig. 1 by first reversing the order of gates that build the operator block $A_i$ (and thus building operators $A_i^t$) and then reversing the order of the blocks representing operators $A_i$. By using the Gentleman-Sande circuit, with the input qubits in the correct order the output qubits are obtained in reverse order. For an efficient and correct implementation of the quantum FFT, one needs to take into account the ordering of the input and output qubits, particularly if the FFT is used as a block box in a quantum computation. If the FFT is used as a stand-alone block or as the last stage in the computation (and hence its output is sampled directly), then it is more efficient to use the Gentleman-Sande FFT since the ordering of the output qubits does not cause any problem. If the FFT is used as the first stage of the computation, then it is more efficient to use the Cooley-Tukey factorization by preparing the input qubits in a reverse order. Note that, as in classical computation, each or a combination of the Cooley-Tukey or Gentleman-Sande FFT factorization can be chosen in a given quantum computation to avoid explicit implementation of $P_{2^n}$ (or, any other mechanism) for reversing the order of qubits and hence achieve a greater efficiency. As an example, in Sec. 4.3 we will show that the use of the Cooley-Tukey rather than the Gentleman-Sande factorization leads to a greater efficiency in quantum implementation by eliminating the need for an explicit implementation of $P_{2^n}$ (or, any other mechanism) for reversing the order of qubits. \subsection{A Basic Quantum Gate for Efficient Implementation of Qubits Permutation Matrices} If a permutation matrix can be described by its effect on the ordering of the qubits then it might be possible to devise circuits for its implementation directly. We call the class of such permutation matrices as "Qubit Permutation Matrices". A set of efficient and practically realizable circuits for implementation of Qubit Permutation Matrices can be built by using a new quantum gate, called {\it the qubit swap gate}, $\Pi_4$, where \begin{equation} \Pi_4 = \left( \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) \end{equation} For quantum computation, $\Pi_4$ is the "qubit swap operator", i.e., \begin{equation} \Pi_4: \, \vert a_1 \, a_0 \rangle \, \longmapsto \, \vert a_0 \, a_1 \rangle \end{equation} The $\Pi_4$ gate, shown in Fig. 2.a, can be implemented with three XOR (or Controlled-NOT) gates as shown in Fig. 2.b. The $\Pi_4$ gate offers two major advantages for practical implementation: \begin{itemize} \item It performs a local operation, i.e., swapping the two neighboring qubits. This locality can be advantageous in practical realizations of quantum circuits, and \item Given the fact that $\Pi_4$ can be implemented using three XOR (or, Controlled-NOT) gates, it is possible to implement conditional operators involving $\Pi_4$, for example, operators of the form $\Pi_4 \oplus I_{2^n - 4}$, by using Controlled$^k$-NOT gates [21]. \end{itemize} A circuit for implementation of $\Pi_{2^n}$ by using $\Pi_4$ gates is shown in Fig. 3. This circuit is based on an intuitively simple idea of successive swapping of the neighboring qubits, and implements $\Pi_{2^n}$ with a complexity of $O(n)$ by using an $O(n)$ number of $\Pi_4$ gates. It is interesting to note that, this circuit leads to a new (to our knowledge) factorization of $\Pi_{2^n}$ in terms of $\Pi_4$ as \begin{equation} \Pi_{2^n} = (I_{2^{n-2}} \otimes \Pi_4)(I_{2^{n-3}} \otimes \Pi_4 \otimes I_2) \cdots (I_{2^{n-i}} \otimes \Pi_4 \otimes I_{2^{i-2}}) \, \cdots \, (I_2 \otimes \Pi_4 \otimes I_{2^{n-3}})(\Pi_4 \otimes I_{2^{n-2}}) \end{equation} This new factorization of $\Pi_{2^n}$ is less efficient than other schemes (see, for example, [16]) for a {\it classical implementation} of $\Pi_{2^n}$. Interestingly, it is derived here as a result of our search for an efficient {\it quantum implementation} of $\Pi_{2^n}$, and in this sense it is only efficient for a quantum implementation. Note also, that a new (to our knowledge) recursive factorization of $\Pi_{2^i}$ directly results from Fig. (3) as \begin{equation} \Pi_{2^i} = (I_{2^{i-2}} \otimes \Pi_4)(\Pi_{2^{i-1}} \otimes I_2) \end{equation} A circuit for implementation of $P_{2^n}$ by using $\Pi_4$ gates is shown in Fig. 4. Again, this circuit is based on an intuitively simple idea, that is, successive and parallel swapping of the neighboring qubits, and implements $P_{2^n}$ with a complexity of $O(n)$ by using $O(n^2)$ $\Pi_4$ gates. This circuit leads to a new (to our knowledge) factorization of $P_{2^n}$ in terms of $\Pi_4$ as \begin{equation} P_{2^n} = ((\underbrace{\Pi_4 \otimes \Pi_4 \cdots \otimes \Pi_4}_{\frac{n}{2}}) (I_2 \otimes \underbrace{\Pi_4 \otimes \cdots \otimes \Pi_4}_{\frac{n}{2} -1} \otimes I_2))^{\frac{n}{2}} \end{equation} for $n$ even, and \begin{equation} P_{2^n} = ((I_2 \otimes \underbrace{\Pi_4 \otimes \cdots \otimes \Pi_4}_{\frac{n-1}{2}}) (\underbrace{\Pi_4 \otimes \cdots \, \Pi_4}_{\frac{n-1}{2}} \otimes I_2))^{\frac{n-1}{2}} (I_2 \otimes \underbrace{\Pi_4 \otimes \cdots \otimes \Pi_4}_{\frac{n-1}{2}}) \end{equation} for $n$ odd. It should be emphasized that this new factorization of $P_{2^n}$ is less efficient than other schemes, e.g., the use of (5) for a {\it classical implementation} (see also [16] for further discussion). However, this factorization is more efficient for a {\it quantum implementation} of $P_{2^n}$. In fact, a quantum implementation of $P_{2^n}$ by using (5) and (17) will result in a complexity of $O(n^2)$ by using $O(n^2)$ $\Pi_4$ gates. As will be shown, the development of {\it complete} and efficient circuits for implementation of wavelet transforms requires a mechanism for implementation of conditional operators of the forms $\Pi_{2^i} \oplus I_{2^n - 2^i}$ and $P_{2^i} \oplus I_{2^n - 2^i}$, for some $i$. The key enabling factor for a successful implementation of such conditional operators is the use of factorizations similar to (17) and (19)-(20) or, alternatively, circuits similar to those in Figures 3 and 4, along with the conditional operators involving $\Pi_4$ gates. \section{Quantum Wavelet Algorithms} \subsection{Wavelet Pyramidal and Packet Algorithms} Given a wavelet kernel, its corresponding wavelet transform is usually performed according to a packet algorithm (PAA) or a pyramid algorithm (PYA). The first step in devising quantum counterparts of these algorithms is the development of suitable factorizations. Consider the Daubechies fourth-order wavelet kernel of dimension $2^i$, denoted as $D^{(4)}_{2^i}$. First level factorizations of PAA and PYA for a $2^n$-dimensional vector are given as \begin{equation} PAA = (I_{2^{n-2}} \otimes D^{(4)}_4)(I_{2^{n-3}} \otimes \Pi_8) \cdots (I_{2^{n-i}} \otimes D^{(4)}_{2^i})(I_{2^{n-i-1}} \otimes \Pi_{2^{i+1}}) \cdots (I_2 \otimes D^{(4)}_{2^{n-1}}) \Pi_{2^n}D^{(4)}_{2^n} \end{equation} \begin{equation} PYA = (D^{(4)}_4 \oplus I_{2^n-4})(\Pi_8 \oplus I_{2^n-8}) \cdots (D^{(4)}_{2^i} \oplus I_{2^n - 2^i})(\Pi_{2^{i+1}} \oplus I_{2^n - 2^{i+1}}) \cdots \Pi_{2^n}D^{(4)}_{2^n} \end{equation} These factorizations allow a first level analysis of the feasibility and efficiency of quantum implementations of the packet and pyramid algorithms. To see this, suppose we have a practically realizable and efficient, i.e., $O(i)$, quantum algorithm for implementation of $D^{(4)}_{2^i}$. For the packet algorithm, the operators $(I_{2^{n-i}} \otimes D^{(4)}_{2^i})$ can be directly and efficiently implemented by using the algorithm for $D^{(4)}_{2^i}$. Also, using the factorization of $\Pi_{2^i}$, given by (17), the operators $(I_{2^{n-i}} \otimes \Pi_{2^i})$ can be implemented efficiently in $O(i)$. For the pyramid algorithm, the existence of an algorithm for $D^{(4)}_{2^i}$ does not automatically imply an efficient algorithm for implementation of the conditional operators $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$. An example of such a case is discussed in Sec. 4.4. Thus, careful analysis is needed to establish both the feasibility and efficiency of implementation of the conditional operators $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$ by using the algorithm for $D^{(4)}_{2^i}$. Note, however, that the conditional operators $(\Pi_{2^i} \oplus I_{2^n - 2^i})$ can be efficiently implemented in $O(i)$ by using the factorization in (17) and the conditional $\Pi_4$ gates. The above analysis can be extended to any wavelet kernel (WK) and summarized as follows: \begin{itemize} \item Packet algorithm: A physically realizable and efficient algorithm for the WK along with the use of (17) leads to a physically realizable and efficient implementation of the packet algorithm. \item Pyramid algorithm: A physically realizable and efficient algorithm for the WK does not automatically lead to an implementation of the conditional operators involving WK (and hence the pyramid algorithm) but the conditional operators $(\Pi_{2^i} \oplus I_{2^n - 2^i})$ can be efficiently implemented by using the factorization in (17) and the conditional $\Pi_4$ gates. \end{itemize} \subsection{Haar Wavelet Factorization and Implementation} The Haar transform can be defined from the Haar functions [17]. Hoyer [20] used a recursive definition of Haar matrices based on the {\it generalized Kronecker product} (see also [17] for similar definitions) and developed a factorization of $H_{2^n}$ as \begin{eqnarray} H_{2^n} = & (I_{2^{n-1}} \otimes W) \cdots (I_{2^{n-i}} \otimes W \oplus I_{2^n - 2^{n-i+1}}) \, \cdots \, (W \oplus I_{2^n - 2}) \times \nonumber \\ & (\Pi_4 \oplus I_{2^n - 4}) \, \cdots \, (\Pi_{2^i} \oplus I_{2^n - 2^i}) \, \cdots \, (\Pi_{2^{n-1}} \oplus I_{2^{n-1}}) \Pi_{2^n} \end{eqnarray} Hoyer's circuit for implementation of (23) is shown in Fig 5. However, this represents an {\it incomplete} solution for quantum implementation and subsequent complexity analysis of the Haar transform. To see this, let \begin{equation} H^{(1)}_{2^n} = (I_{2^{n-1}} \otimes W) \cdots (I_{2^{n-i}} \otimes W \oplus I_{2^n - 2^{n-i+1}}) \, \cdots \, (W \oplus I_{2^n - 2}) \end{equation} \begin{equation} H^{(2)}_{2^n} = (\Pi_4 \oplus I_{2^n - 4}) \, \cdots \, (\Pi_{2^i} \oplus I_{2^n - 2^i}) \, \cdots \, (\Pi_{2^{n-1}} \oplus I_{2^{n-1}}) \Pi_{2^n} \end{equation} Clearly, the operator $H^{(1)}_{2^n}$ can be implemented in $O(n)$ by using $O(n)$ conditional $W$ gates. But the feasibility of practical implementation of the operator $H^{(2)}_{2^n}$ and its complexity (and consequently those of the factorization in (23)) cannot be assessed unless a mechanism for implementation of the terms $(\Pi_{2^i} \oplus I_{2^n - 2^i})$ is devised. However, by using the factorizations and circuits similar to (17) and Figure 3, it can be easily shown that the operators $(\Pi_{2^i} \oplus I_{2^n - 2^i})$ can be implemented in $O(i)$ by using $O(i)$ conditional $\Pi_4$ gates (or, Controlled$^k$-NOT gates). This leads to the implementation of $H^{(2)}_{2^n}$ and consequently $H_{2^n}$ in $O(n^2)$ by using $O(n^2)$ gates. This represents not only the first practically feasible quantum circuit for implementation of $H_{2^n}$ but also the first complete analysis of complexity of its time and space (gates) quantum implementation. Note that, both operators $(I_{2^{n-i}} \otimes H_{2^i})$ and $(H_{2^i} \oplus I_{2^n - 2^i})$ can be directly and efficiently implemented by using the above algorithm and circuit for implementation of $H_{2^i}$. This implies both the feasibility and efficiency of the quantum implementation of the packet and pyramid algorithms by using our factorization for Haar wavelet kernel. \subsection{Daubechies $D^{(4)}$ Wavelet and Hoyer's Factorization} The Daubechies fourth-order wavelet kernel of dimension $2^n$ is given in a matrix form as [22] \begin{equation} D^{(4)}_{2^n} = \left( \begin{array}{ccccccccccc} c_0 & c_1 & c_2 & c_3 \\ c_3 & -c_2 & c_1 & -c_0 \\ & & c_0 & c_1 & c_2 & c_3 \\ & & c_3 & -c_2 & c_1 & -c_0 \\ \vdots & \vdots & & & & & \ddots \\ & & & & & & & c_0 & c_1 & c_2 & c_3 \\ & & & & & & & c_3 & -c_2 & c_1 & -c_0 \\ c_2 & c_3 & & & & & & & & c_0 & c_1 \\ c_1 & -c_0 & & & & & & & & c_3 & -c_2 \end{array} \right) \end{equation} where $c_0 = \frac {(1 + \sqrt {3})} {4 \sqrt {2}}$, $c_1 = \frac {(3 + \sqrt {3})} {4 \sqrt {2}}$, $c_2 = \frac {(3 - \sqrt {3})} {4 \sqrt {2}}$, and $c_3 = \frac {(1 - \sqrt {3})} {4 \sqrt {2}}$. For classical computation and given its sparse structure, the application of $D^{(4)}_{2^n}$ can be performed with an optimal cost of $O(2^n)$. However, the matrix $D^{(4)}_{2^n}$, as given by (26), is not suitable for a quantum implementation. To achieve a feasible and efficient quantum implementation, a suitable factorization of $D^{(4)}_{2^n}$ needs to be developed. Hoyer [20] proposed a factorization of $D^{(4)}_{2^n}$ as \begin{equation} D^{(4)}_{2^n} = (I_{2^{n-1}} \otimes C_1) S_{2^n}(I_{2^{n-1}} \otimes C_0) \end{equation} where \begin{equation} C_0 = 2 \left( \begin{array}{cc} c_4 & -c_2 \\ -c_2 & c_4 \end{array} \right) \mbox{ and } C_1 = \frac{1}{2} \left( \begin{array}{cc} \frac{c_0}{c_4} & 1 \\ 1 & \frac{c_1}{c_2} \end{array} \right) \end{equation} and $S_{2^n}$ is a permutation matrix with a classical description given by \begin{equation} S_{ij} = \left\{ \begin{array}{cc} 1 & \mbox{ if $i = j$ and $i$ is even, or if $i+2 = j$ \, (mod $2^n$)} \\ 0 & \mbox{ otherwise} \end{array} \right. \end{equation} Hoyer's block-level circuit for implementation of (27) is shown in Figure 6. Clearly, the main issue for a practical quantum implementation and subsequent complexity analysis of (27) is the quantum implementation of matrix $S_{2^n}$. To this end, Hoyer discovered a quantum arithmetic description of $S_{2^n}$ as \begin{equation} S_{2^n}: \, \vert a_{n-1} \, a_{n-2} \, \cdots a_1 \, a_0 \rangle \, \longmapsto \vert b_{n-1} \, b_{n-2} \, \cdots b_1 \, b_0 \rangle \end{equation} where \begin{equation} b_i = \left\{ \begin{array}{cc} a_i - 2 \mbox{ \, (mod $n$)}, & \mbox{if $i$ is odd} \\ a_i & \mbox{otherwise} \end{array} \right. \end{equation} As suggested by Hoyer, this description of $S_{2^n}$ then allows its quantum implementation by using quantum arithmetic circuits of [18] with a complexity of $O(n)$. This algorithm can be directly extended for implementation of the operators $(I_{2^{n-i}} \otimes D^{(4)}_{2^i})$ and hence the packet algorithm. However, the feasibility and efficiency of an implementation of the operators $(I_{2^{n-i}} \oplus D^{(4)}_{2^i})$ and thus the pyramid algorithm needs further analysis. \section{Fast Quantum Algorithms and Circuits for Implementation of Daubechies $D^{(4)}$ Wavelet} In this section, we develop a new factorization of the Daubechies $D^{(4)}$ wavelet. This factorization leads to three new and efficient circuits, including one using the circuit for QFT, for implementation of Daubechies $D^{(4)}$ wavelet. \subsection{A New Factorization of Daubechies $D^{(4)}$ Wavelet} We develop a new factorization of the Daubechies $D^{(4)}$ wavelet transform by showing that the permutation matrix $S_{2^n}$ can be written as a product of two permutation matrices as \begin{equation} S_{2^n} = Q_{2^n}R_{2^n} \end{equation} where $Q_{2^n}$ is the {\it downshift permutation matrix} [16] given by \begin{equation} Q_{2^n} = \left( \begin{array}{ccccccc} 0 & 1 \\ 0 & 0 & 1 \\ 0 & 0 & 0 & 1 \\ \vdots & \vdots & \vdots & & \ddots \\ 0 & 0 & \cdots & 0 & 0 & 1 \cr 1 & 0 & \cdots & 0 & 0 & 0 \end{array} \right) \end{equation} and $R_{2^n}$ is a permutation matrix given by \begin{equation} R_{2^n} = \left( \begin{array}{cccccccc} 0 & 1 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ & \ddots & \ddots & \ddots & \ddots \\ & & & & & & 0 & 1 \cr & & & & & & 1 & 0 \end{array} \right) \end{equation} The matrix $R_{2^n}$ can be written as \begin{equation} R_{2^n} = I_{2^{n-1}} \otimes N \end{equation} where $ N = \left( \begin{array}{cc} 0 & 1 \\ 1 & 0 \end{array} \right) $. Substituting (35) and (32) into (27), a new factorization of $D^{(4)}_{2^n}$ is derived as \begin{equation} D^{(4)}_{2^n} = (I_{2^{n-1}} \otimes C_1) Q_{2^n}(I_{2^{n-1}} \otimes N) (I_{2^{n-1}} \otimes C_0) = (I_{2^{n-1}} \otimes C_1) Q_{2^n} (I_{2^{n-1}} \otimes C_0^\prime) \end{equation} where \begin{equation} C_0^\prime = N.C_0 = 2 \left( \begin{array}{cc} -c_2 & c_4 \\ c_4 & -c_2 \end{array} \right) \end{equation} Fig. 7 shows a block-level implementation of (36). Clearly, the main issue for a practical quantum gate-level implementation and subsequent complexity analysis of (36) is the quantum implementation of matrix $Q_{2^n}$. In the following, we present three circuits for quantum implementation of matrix $Q_{2^n}$. \subsection{Quantum Arithmetic Implementation of Permutation Matrix $Q_{2^n}$} A first circuit for implementation of matrix $Q_{2^n}$ is developed based on its description as a {\it quantum arithmetic operator}. We have discovered such a quantum arithmetic description of $Q_{2^n}$ as \begin{equation} Q_{2^n}: \, \vert a_{n-1} \, a_{n-2} \, \cdots a_1 \, a_0 \rangle \, \longmapsto \vert b_{n-1} \, b_{n-2} \, \cdots b_1 \, b_0 \rangle \end{equation} where \begin{equation} b_i = a_i - 1 \mbox { \, (mod $n$)} \end{equation} This description of $Q_{2^n}$ allows its quantum implementation by using quantum arithmetic circuit of [18] with a complexity of $O(n)$. Note, however, that the arithmetic description of $Q_{2^n}$ is simpler than that of $S_{2^n}$ since it does not involve conditional quantum arithmetic operations (i.e., the same operation is applied to all qubits). This algorithm for quantum implementation of $Q_{2^n}$ and hence $D^{(4)}_{2^n}$ can be directly extended for implementation of the operators $(I_{2^{n-i}} \otimes D^{(4)}_{2^i})$ and hence the packet algorithm. However, the feasibility and efficiency of an implementation of the operators $(I_{2^{n-i}} \oplus D^{(4)}_{2^i})$ and thus the pyramid algorithm needs further analysis. \subsection{Quantum FFT Factorization of Permutation Matrix $Q_{2^n}$} A direct and efficient factorization and subsequent circuit for implementation of $Q_{2^n}$ (and hence Daubechies $D^{(4)}$ wavelet) can be derived by using the FFT algorithm. This factorization is based on the observation that $Q_{2^n}$ can be described in terms of FFT as [16] \begin{equation} Q_{2^n} = F_{2^n} T_{2^n} F^*_{2^n} \end{equation} where $T_{2^n}$ is a diagonal matrix given as $T_{2^n} = \mbox {Diag} \{1, \, \omega_{2^n}, \, \omega_{2^n}^2, \, \ldots , \omega_{2^n}^{2^n -1} \}$ with $\omega_{2^n} = e^{{-2 \iota \pi} \over {2^n}}$ (* indicates conjugate transpose). As will be seen, it is more efficient to use the Cooley-Tukey factorization, given by (7), and write (40) as \begin{equation} Q_{2^n} = {\underline F}_{2^n} P_{2^n} T_{2^n}P_{2^n}{\underline F}^*_{2^n} \end{equation} It can be shown that the matrix $T_{2^n}$ has a factorization as \begin{equation} T_{2^n} = (G(\omega_{2^n}^{2^{n-1}}) \otimes I_{2^{n-1}}) \cdots (I_{2^{i-1}} \otimes G(\omega_{2^n}^{2^{n-i}}) \otimes I_{2^{n-i}}) \cdots (I_{2^{n-1}} \otimes G(\omega_{2^n})) \end{equation} where $G(\omega_{2^n}^k) = \mbox {Diag} \{1, \, \omega_{2^n}^k \} = \left( \begin{array}{cc} 1 & 0 \\ 0 & \omega_{2^n}^k \end{array} \right) $. This factorization leads to an efficient implementation of $T_{2^n}$ by using $n$ single qubit $G(\omega_{2^n}^k)$ gates as shown in Fig. 8. Together with the circuit for implementation of $P_{2^n}$ (Fig. 4) and the circuit for implementation of FFT (Fig. 1), they represent a complete gate-level implementation of $D^{(4)}_{2^n}$. However, a more efficient circuit can be derived by avoiding the explicit implementation of $P_{2^n}$ by showing that the operator \begin{equation} P_{2^n}T_{2^n}P_{2^n} = P_{2^n}(G(\omega_{2^n}^{2^{n-1}}) \otimes I_{2^{n-1}})\cdots (I_{2^{i-1}} \otimes G(\omega_{2^n}^{2^{n-i}}) \otimes I_{2^{n-i}}) \cdots (I_{2^{n-1}} \otimes G(\omega_{2^n}))P_{2^n} \end{equation} can be efficiently implemented by simply reversing the order of gates in Fig. 8. This is established by the following lemma: \vspace{0.1in} \noindent {\bf Lemma 1.} \vspace{0.1in} \begin{equation} P_{2^n}(G(\omega_{2^n}^{2^{n-1}}) \otimes I_{2^{n-1}}) = (I_{2^{n-1}} \otimes G(\omega_{2^n}^{2^{n-1}}))P_{2^n} \end{equation} \begin{equation} P_{2^n} (I_{2^{n-j}} \otimes G(\omega_{2^n}^{2^{j-1}}) \otimes I_{2^{j-1}}) = (I_{2^{j-1}} \otimes G(\omega_{2^n}^{2^{j-1}}) \otimes I_{2^{n-j}})P_{2^n} \end{equation} \begin{equation} P_{2^n}(I_{2^{n-1}} \otimes G(\omega_{2^n})) = (G(\omega_{2^n}) \otimes I_{2^{n-1}})P_{2^n} \end{equation} \noindent {\bf\it Proof.} This lemma can be easily proved based on the physical interpretation of operations in (44)-(46). The left-hand side of (44) implies first an operation, i.e., application of $G(\omega_{2^n}^{2^{n-1}})$, on the last qubit and then application of $P_{2^n}$ on all the qubits, i.e., reversing the order of qubits. However, this is equivalent to first reversing the order of qubits, i.e., applying $P_{2^n}$, and then applying $G(\omega_{2^n}^{2^{n-1}})$, on the first qubit which is the operation described by the right-hand side of (44). Similarly, the left-hand side of (45) implies first application of $G(\omega_{2^n}^{2^{i-1}})$ on the $(n-i)$th qubit and then reversing the order of qubits. This is equivalent to first reversing the order of qubits and then applying $G(\omega_{2^n}^{2^{i-1}})$ on the $i$th qubit which is the operations described by the right hand side of (45). In a same fashion, the left hand side of (46) implies first application of $G(\omega_{2^n})$ on the first qubit and then reversing the order of qubits which is equivalent to first reversing the order of qubits and then applying $G(\omega_{2^n}^{2^{n-1}})$ on the last qubit, that is, the operations in right-hand side of (46). Applying (44)-(46) to (43) from left to right and noting that, due to the symmetry of $P_{2^n}$, we have $P_{2^n}P_{2^n} = I_{2^n}$, it then follows that \begin{equation} P_{2^n}T_{2^n}P_{2^n} = (I_{2^{n-1}} \otimes G(\omega_{2^n}^{2^{n-1}})) \cdots (I_{2^{n-i}} \otimes G(\omega_{2^n}^{2^{n-i}}) \otimes I_{2^{i-1}}) \cdots (G(\omega_{2^n}) \otimes I_{2^{n-1}}) \end{equation} The circuit for implementation of (47) is shown in Fig.9 which, as can be seen, has been obtained by reversing the order of gates in Fig. 8. Note that, the use of (47), which is a direct consequence of using the Cooley-Tukey factorization, enables the implementation of (40) without explicit implementation of $ P_{2^n}$. Using (40) and (47), the complexity of the implementation of $Q_{2^n}$ and thus $D^{(4)}_{2^n}$ is the same as of the quantum FFT, that is, $O(n^2)$ for an exact implementation and $O(nm)$ for an approximation of order $m$ [15]. Note that, by using (47), (40), and (36) both operators $(I_{2^{n-i}} \otimes D^{(4)}_{2^i})$ and $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$ can be directly implemented. This implies both the feasibility and efficiency of the quantum implementation of the packet and pyramid algorithms by using this algorithm for quantum implementation of $D^{(4)}_{2^n}$. \subsection{A Direct Recursive Factorization of Permutation Matrix $Q_{2^n}$} A new direct and recursive factorization of $Q_{2^n}$ can be derived based on a similarity transformation of $Q_{2^n}$ by using $\Pi_{2^n}$ as \begin{equation} \Pi^t_{2^n}Q_{2^n}\Pi_{2^n} = \left( \begin{array}{cc} 0 & I_{2^{n-1}} \\ Q_{2^{n-1}} & 0 \end{array} \right) \end{equation} which can be written as \begin{equation} \Pi^t_{2^n}Q_{2^n}\Pi_{2^n} = \left( \begin{array}{cc} 0 & I_{2^{n-1}} \\ I_{2^{n-1}} & 0 \end{array} \right) \left( \begin{array}{cc} Q_{2^{n-1}} & 0 \\ 0 & I_{2^{n-1}} \end{array} \right) = (N \otimes I_{2^{n-1}})(Q_{2^{n-1}} \oplus I_{2^{n-1}}) \end{equation} from which $Q_{2^n}$ can be calculated as \begin{equation} Q_{2^n} = \Pi_{2^n}(N \otimes I_{2^{n-1}})(Q_{2^{n-1}} \oplus I_{2^{n-1}})\Pi^t_{2^n} \end{equation} Replacing a similar factorization of $Q_{2^{n-1}}$ into (50), we get \begin{equation} Q_{2^n} = \Pi_{2^n}(N \otimes I_{2^{n-1}}) (\Pi_{2^{n-1}}(N \otimes I_{2^{n-2}})(Q_{2^{n-2}} \oplus I_{2^{n-2}}) \Pi^t_{2^{n-1}} \oplus I_{2^{n-1}})\Pi^t_{2^n} \end{equation} By using the identity \begin{equation} \Pi_{2^{n-1}} A \Pi^t_{2^{n-1}} \oplus I_{2^{n-1}} = (I_2 \otimes \Pi_{2^{n-1}})(A \oplus I_{2^{n-1}})(I_2 \otimes \Pi^t_{2^{n-1}}) \end{equation} for any matrix $A \varepsilon \Re^{2^{n-1} \times 2^{n-1}}$, (51) can be then written as \begin{equation} Q_{2^n} = \Pi_{2^n}(N \otimes I_{2^{n-1}})(I_2 \otimes \Pi_{2^{n-1}}) ((N \otimes I_{2^{n-2}})(Q_{2^{n-2}} \oplus I_{2^{n-2}})\oplus I_{2^{n-1}}) (I_2 \otimes \Pi^t_{2^{n-1}})\Pi^t_{2^n} \end{equation} Using the identity \begin{eqnarray} (N \otimes I_{2^{n-2}})(Q_{2^{n-2}} \oplus I_{2^{n-2}})\oplus I_{2^{n-1}} & = & (N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}})(Q_{2^{n-2}} \oplus I_{2^{n-2}} \oplus I_{2^{n-1}}) \nonumber \\ & = & (N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}})(Q_{2^{n-2}} \oplus I_{3.2^{n-2}}) \end{eqnarray} (53) is now written as \begin{equation} Q_{2^n} = \Pi_{2^n}(N \otimes I_{2^{n-1}})(I_2 \otimes \Pi_{2^{n-1}}) (N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}})(Q_{2^{n-2}} \oplus I_{2^n - 2^{n-2}}) (I_2 \otimes \Pi^t_{2^{n-1}})\Pi^t_{2^n} \end{equation} Repeating the same procedures for all $Q_{2^i}$, for $i = n-3$ to 1, and noting that $Q_2 = N$, it then follows \begin{eqnarray} Q_{2^n} & = \Pi_{2^n}(N \otimes I_{2^{n-1}})(I_2 \otimes \Pi_{2^{n-1}}) (N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}})(I_4 \otimes \Pi_{2^{n-2}}) (N \otimes I_{2^{n-3}} \oplus I_{2^n - 2^{n-2}}) \cdots \nonumber \\ & (I_{2^{n-2}} \otimes \Pi_4)(N \otimes I_2 \oplus I_{2^n -4}) (N \oplus I_{2^n -2})(I_{2^{n-2}} \otimes \Pi^t_4) \cdots (I_2 \otimes \Pi^t_{2^{n-1}})\Pi^t_{2^n} \end{eqnarray} The above expression of $Q_{2^n}$ can be further simplified by exploiting the fact that (see Appendix for the proof) every operator of the form $(I_{2^i} \otimes \Pi_{2^{n-i}})$, for $i = n-2$ to $1$, commutes with all operators of the form $(N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}})$, for $j = i$ to $1$. Using this commutative property, (56) can be now written as \begin{eqnarray} Q_{2^n} & = \Pi_{2^n}(I_2 \otimes \Pi_{2^{n-1}})(I_4 \otimes \Pi_{2^{n-2}}) \cdots (I_{2^{n-2}} \otimes \Pi_{4}) (N \otimes I_{2^{n-1}}) (N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}}) \cdots \nonumber \\ & \qquad (N \otimes I_2 \oplus I_{2^n -4}) (N \oplus I_{2^n -2})(I_{2^{n-2}} \otimes \Pi^t_4) \cdots (I_2 \otimes \Pi^t_{2^{n-1}})\Pi^t_{2^n} \end{eqnarray} Using the factorization of $P_{2^n}$ given in (5), we then have \begin{equation} Q_{2^n} = P_{2^n}(N \otimes I_{2^{n-1}})(N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}}) \cdots (N \otimes I_2 \oplus I_{2^n -4})(N \oplus I_{2^n -2})P_{2^n} \end{equation} Substituting (58) into (36), a factorization of $D^{(4)}_{2^n}$ is then obtained as \begin{equation} D^{(4)}_{2^n} = (I_{2^{n-1}} \otimes C_1) P_{2^n}(N \otimes I_{2^{n-1}})(N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}}) \cdots (N \otimes I_2 \oplus I_{2^n -4})(N \oplus I_{2^n -2})P_{2^n} (I_{2^{n-1}} \otimes C_0^\prime) \end{equation} Using Lemma 1, it then follows that \begin{equation} D^{(4)}_{2^n} = P_{2^n}(C_1 \otimes I_{2^{n-1}})(N \otimes I_{2^{n-1}})(N \otimes I_{2^{n-2}} \oplus I_{2^{n-1}}) \cdots (N \otimes I_2 \oplus I_{2^n -4})(N \oplus I_{2^n -2})(C_0^\prime \otimes I_{2^{n-1}})P_{2^n} \end{equation} A circuit for implementation of $D^{(4)}_{2^n}$, based on (60), is shown in Fig. 10. Together with the circuit for implementation of $P_{2^n}$, shown in Fig. 4, they represent a complete gate-level circuit for implementation of $D^{(4)}_{2^n}$ with an optimal complexity of $O(n)$. Using (60) and (19)-(20), the operators $(I_{2^{n-i}} \otimes D^{(4)}_{2^i})$ can be directly and efficiently implemented with a complexity of $O(i)$. This implies both the feasibility and efficiency of the implementation of the packet algorithm by using this algorithm for $D^{(4)}_{2^n}$ wavelet kernel. However, this algorithm is less efficient for implementation of the operators $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$ and hence the pyramid algorithm. To see this, note that, the implementation of the operators $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$, by using (60), requires the implementation of the conditional operators $(P_{2^i} \oplus I_{2^n - 2^i})$. However, these conditional operators cannot be directly implemented by using (19) and (20). An alternative solution is to use the factorization of $P_{2^i}$ in (5) and the conditional operators $(\Pi_{2^i} \oplus I_{2^n - 2^i})$. However, this leads to a complexity of $O(i^2)$ for implementation of operators $(P_{2^i} \oplus I_{2^n - 2^i})$ and hence the operators $(D^{(4)}_{2^i} \oplus I_{2^n - 2^i})$. Therefore, while (60) is optimal for implementation of $D^{(4)}_{2^i}$ and the packet algorithm, it is not efficient for implementation of the pyramid algorithm. It should be emphasized that this recursive factorization of $Q_{2^n}$, originated by the similarity transformation in (48) and given by (56) and (58), was not previously known in classical computing. Note that, the permutation matrices $\Pi_{2^n}$ and, particularly, $P_{2^n}$ are much harder (in terms of data movement pattern) for a classical implementation than $Q_{2^n}$. In this sense, such a factorization of $Q_{2^n}$ is rather counterintuitive from a classical computing point of view since it involves the use of permutation matrices $\Pi_{2^n}$ and $P_{2^n}$ and thus it is highly inefficient for a classical implementation. \section{Discussion and Conclusion} In this paper, we developed fast algorithms and efficient circuits for quantum wavelet transforms. Assuming an efficient quantum circuit for a given wavelet kernel and starting with a high level description of the packet and pyramid algorithms, we analyzed the feasibility and efficiency of the implementation of the packet and pyramid algorithms by using the given wavelet kernel. We also developed efficient and complete gate-level circuits for two representative wavelet kernels, the Haar and Daubechies $D^{(4)}$ kernels. We gave the first complete time and space complexity analysis of the quantum Haar wavelet transform. We also described three complete circuits for Daubechies $D^{(4)}$ wavelet kernel. In particular, we showed that Daubechies $D^{(4)}$ kernel can be implemented by using the circuit for QFT. Given the problem of decoherence, exploitation of parallelism in quantum computation is a key issue in practical implementation of a given computation. To this end, we are currently analyzing the algorithms of this paper in terms of their parallel efficiency and developing more efficient parallel quantum wavelet algorithms. As shown in this paper, permutation matrices play a pivotal role in the development of quantum wavelet transforms. In fact, not only they arise explicitly in the packet and pyramid algorithms but also they play a key role in factorization of wavelet kernels. For classical computing, the implementation of permutation matrices is trivial. However, for quantum computing, it represents a challenging task and demands new, unconventional, and even counterintuitive (from a classical computing view point) techniques. For example, note that most of the factorizations developed in paper for permutation matrices $\Pi_{2^n}$, $P_{2^n}$, and $Q_{2^n}$ were not previously known in classical computing and, in fact, they are not at all efficient for a classical implementation. Also, implementation of the permutation matrices reveals some of the surprises of quantum computing in contrast to classical computing. In the sense that, certain operations that are hard to implement in classical computing are easier to implement in quantum computing and vice versa. As a concrete example, note that while the classical implementation of permutation matrices $\Pi_{2^n}$ and (particularly) $P_{2^n}$ is much harder (in terms of data movement pattern) than the permutation matrix $Q_{2^n}$, their quantum implementation is much easier and more straightforward than $Q_{2^n}$. In this paper, we focussed on the set of permutation matrices arising in the development of quantum wavelet transforms and analyzed three techniques for their quantum implementation. However, it is clear that the permutation matrices will also play a major role in deriving compact and efficient factorizations, i.e., with polynomial time and space complexity, for other unitary operators by exposing and exploiting their specific structure. Therefore, we believe strongly that a more systematic study of permutation matrices is needed in order to develop further insight into efficient techniques for their implementation in quantum circuits. Such a study might eventually lead to the discovery of new and more efficient approaches for the implementation of unitary transformations and therefore quantum computation. \clearpage \noindent {\bf Acknowledgement} The research described in this paper was performed at the Jet Propulsion Laboratory (JPL), California Institute of Technology, under contract with National Aeronautics and Space Administration (NASA). This work was supported by the NASA/JPL Center for Integrated Space Microsystems (CISM), NASA/JPL Advanced Concepts Office, and NASA/JPL Autonomy and Information Technology Management Program. \vspace{0.1in} \noindent {\bf Appendix: Commutation of the Operators $I_{2^i} \otimes \Pi_{2^{n-i}}$ with $N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}}$} We first prove that every operator of the form $I_{2^i} \otimes \Pi_{2^{n-i}}$, for $i = n-2$ to $1$, commutes with all the operators of the form $N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}}$, for $j = i$ to $2$, by simply showing that \begin{equation} (I_{2^i} \otimes \Pi_{2^{n-i}})(N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^(n-j+1}) = (N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}})(I_{2^i} \otimes \Pi_{2^{n-i}}) \end{equation} The matrix $I_{2^i} \otimes \Pi_{2^{n-i}}$ is a block diagonal matrix and therefore can be written as \begin{equation} I_{2^i} \otimes \Pi_{2^{n-i}} = I_2 \otimes \Pi_{2^{n-j}} \oplus I_{2^j - 2} \otimes \Pi_{2^{n-j}} \end{equation} It can be then shown that \begin{equation} (I_2 \otimes \Pi_{2^{n-j}} \oplus I_{2^j - 2} \otimes \Pi_{2^{n-j}})(N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}}) = N \otimes \Pi_{2^{n-j}} \oplus I_{2^j - 2} \otimes \Pi_{2^{n-j}} \end{equation} and \begin{equation} (N \otimes I_{2^{n-j}} \oplus I_{2^n - 2^{n-j+1}})(I_2 \otimes \Pi_{2^{n-j}} \oplus I_{2^j - 2} \otimes \Pi_{2^{n-j}}) = N \otimes \Pi_{2^{n-j}} \oplus I_{2^j - 2} \otimes \Pi_{2^{n-j}} \end{equation} It now remains to show that every operator of the form $I_{2^i} \otimes \Pi_{2^{n-i}}$ commutes with the operator $N \otimes I_{2^{n-1}}$. This is simply proved by first using the fact that \begin{equation} I_{2^i} \otimes \Pi_{2^{n-i}} = I_2 \otimes (I_{2^{i-1}} \otimes \Pi_{2^{n-i}}) \end{equation} and then showing that \begin{equation} (I_2 \otimes (I_{2^{i-1}} \otimes \Pi_{2^{n-i}}))( N \otimes I_{2^{n-1}}) = (N \otimes I_{2^{n-1}})(I_2 \otimes (I_{2^{i-1}} \otimes \Pi_{2^{n-i}})) = N \otimes I_{2^{i-1}} \otimes \Pi_{2^{n-i}} \end{equation} \vspace{0.1in}
1,116,691,501,320
arxiv
\section{On the completeness of two existing formulations} \label{sec:Appendix} In this section, we present the edge assignment formulation and the extended edge formulation including NDDs, as proposed by \cite{Constantino2013}. We show that for these formulations to model chains correctly, the inclusion of infeasible-cycle-breaking constraints is required, which was not mentioned in their paper. \subsection{Edge Assignment Formulation} \label{sec:EAF} Following an equivalent notation to that used by \cite{Constantino2013}, we proceed to introduce some notation. Let $D = (V, A)$ be a digraph representing compatibilities among donors (single or paired) and PDPs. The set of vertices $V = \{1,..., \vert \mathscr{P} \vert + \vert \mathscr{N} \vert \}$ has $\vert \mathscr{P} \vert$-many PDPs\ and $\vert \mathscr{N} \vert$-many NDDs. Let vertices $\{1,...,\vert \mathscr{N} \vert\}$ represent NDDs\ and vertices $\{\vert \mathscr{N} \vert + 1,..., \vert \mathscr{N} \vert + \vert \mathscr{P} \vert\}$ represent PDPs. An arc $(i,j) \in A$ exists if the donor in vertex $i$ is compatible with patient in vertex $j$. Assume that a dummy patient is associated to each NDD, so that paired donors $j \in \{\vert \mathscr{N} \vert + 1, ..., \vert \mathscr{N} \vert + \vert \mathscr{P} \vert\}$ are compatible with each dummy patient $i \in \{1,..., \vert \mathscr{N}\vert \}$. Also, consider $\vert V \vert$ as an upper bound on the number of cycles and chains in any feasible solution. For each vertex $\ell \in \{\vert \mathscr{N} \vert + 1,...,\vert \mathscr{P} \vert + \vert \mathscr{N} \vert\}$, let $V^{\ell} = \{i \in V \mid i \ge \ell \}$ be the set of vertices forming cycles with index higher or equal to $\ell$, whereas for each index $\ell \in \{1,..., \vert \mathscr{N} \vert\}$ let $V^{\ell} = \{i \in \mathscr{P}\} \cup \{\ell\}$ the set of vertices forming part of a chain started by the $\ell$-th NDD. Notice that only vertices $i \ge \ell$ are included in each vertex set to remove multiplicity of solutions. Moreover, it can happen that for some $\ell \in \{1,..., \vert V \vert \}$, $V^{\ell}= \emptyset$, e.g., if all vertices pointing or receiving an arc from the vertex with the lowest index are removed. Thus, denote by $\mathscr{L}$ the set of indices for which $V^{\ell} \ne \emptyset$. Lastly, consider the following decision variables: $$ x_{ij} = \left\{ \begin{array}{ll} 1, & \mbox{if arc } (i,j) \mbox{ is selected in a cycle or chain} \\ 0, & \mbox{otherwise} \end{array} \right. $$ $$ y_{i}^{\ell} = \left\{ \begin{array}{ll} 1, & \mbox{if node } i \mbox{ is assigned to the } \ell\mbox{-th}\mbox{ cycle (chain)} \\ 0, & \mbox{otherwise} \end{array} \right. $$ Then, the edge assignment formulation is defined as follows: \begin{subequations} \label{sub:EAF} \begin{align} \max &\sum_{(i,j) \in A} w_{ij}x_{ij} \label{eq:ObjEAF}\\ &\sum_{j:(j,i) \in A} x_{ji} = \sum_{j:(i,j) \in A} x_{ij} && i \in V \label{eq:Balance}\\ &\sum_{j:(i,j) \in A} x_{ij} \le 1&& i \in V \label{eq:MaxOutput}\\ &\sum_{i \in \{\ell\} \cup \{\vert \mathscr{N} \vert + 1, ..., \vert \mathscr{P} \vert + \vert \mathscr{N} \vert \}} y_{i}^{\ell} \le L && \ell \in \{1,..., \vert \mathscr{N} \vert\} \label{eq:ChainSize}\\ &\sum_{i \ge \vert \mathscr{N} \vert + 1} y_{i}^{\ell} \le K && \ell \in \{\vert \mathscr{N} \vert + 1,..., \vert \mathscr{N} \vert + \vert \mathscr{P} \vert\} \label{eq:CycleSize}\\ &\sum_{\ell \in \mathscr{L}: i \in V^{\ell}} y^{\ell}_{i} = \sum_{j: (i,j) \in A} x_{ij} && i \in V \label{eq:activate}\\ &y_{i}^{\ell} + x_{ij} \le 1 + y_{j}^{\ell}&& (i,j) \in A, \ell \in \mathscr{L}, i \in V^{\ell} \label{eq:assignment}\\ &y^{\ell}_{i} \le y^{\ell}_{\ell}&& \ell \in \mathscr{L}, i \in V^{\ell} \label{eq:vlvertex}\\ &y^{\ell}_{i} \in \{0,1\} && \ell \in \mathscr{L}, i \in V^{\ell} \label{eq:integrality1}\\ &x_{ij} \in \{0,1\}&& (i,j) \in A \label{eq:integrality2} \end{align} \end{subequations} Constraints \eqref{eq:Balance} assure that patient $i$ receives a kidney if and only if donor $j$ donates a kidney. Constraints \ref{eq:MaxOutput} allow at most one donation. Constraints \eqref{eq:ChainSize} and \eqref{eq:CycleSize} limit the length of chains and cycles. Constraints \eqref{eq:activate} ensure that vertex $i$ is in a cycle (chain) if and only if there is an assignment of $i$ to some $\ell$. Constraints \eqref{eq:assignment} state that if vertex $i$ is in cycle (chain) $\ell$ and donor $i$ gives a kidney to recipient $j$, then vertex $j$ must also be in the $\ell$-th cycle (chain). Constraints \eqref{eq:vlvertex} establish that a vertex $i \in V^{\ell}$ can be assigned to the $\ell$-th cycle (chain) only if vertex $\ell$ is also assigned. Constraints \eqref{eq:integrality1} and \eqref{eq:integrality2} indicate decision variables' domain. Now, we proceed to show a solution example satisfying \eqref{sub:EAF} and yet infeasible to the KEP. Consider Figure \ref{fig:Copies2} and assume $K = 3$ and $L = 6$. Notice that in the solution, $y^{2}_{2} = y^{2}_{3} = y^{2}_{4} = y^{2}_{5} = y^{2}_{6} = y^{3}_{3} = y^{3}_{4} = y^{3}_{5} =y^{3}_{6} = y^{4}_{4} = y^{4}_{5} = y^{4}_{6} = y^{5}_{5} = y^{5}_{6} = x_{46} = x_{54} = x_{56} = x_{65} = x_{52} = x_{62} = x_{26} = 0$ and $y^{1}_{1} = y^{1}_{2} = y^{1}_{3} = y^{1}_{4} = y^{1}_{5} = y^{1}_{6} = x_{53} = x_{36} = x_{64} = x_{45} = x_{12} = x_{21} = 1$. \begin{figure}[ht] \caption{Counter example where \eqref{sub:EAF} and \eqref{sub:EEF} provide an infeasible solution. An altruistic donor is represented by a square and blue vertices correspond to the vertex with lowest index in the $\ell$-th cycle or chain. Dashed arcs indicate compatibility of paired donors with a dummy patient associated to the altruistic donor, solid arcs indicate compatibilities among real donors and patients, and bold arcs represent the ones selected in a solution.} \vspace*{0.25cm} \begin{subfigure}{.20\textwidth} \centering \caption{$D = (V,A)$} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{altru}=[rectangle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[transition] (waiting) at (0,2) {$5$}; \node[transition] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \node[altru] (alt) at (2,1) {$1$}; \node[transition] (leave critical) at (1,1) {$2$}; \node[transition] (enter critical) at (-1,1) {$3$}; \draw [dashed, ->] (waiting) to [bend left=45] (alt); \draw [dashed, ->] (leave critical) [bend right=45] to (alt); \draw [dashed, ->] (semaphore) [bend right=45] to (alt); \draw [dashed, ->] (critical) [bend left=45] to (alt); \draw [dashed, ->] (enter critical) [bend left=45] to (alt); \draw [-{Classical TikZ Rightarrow}] (alt) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend left=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:PCompleteGraph2} \end{subfigure} \begin{subfigure}{.20\textwidth} \centering \caption{$\ell = 1$. Chain 1} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{altru}=[rectangle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[transition] (waiting) at (0,2) {$5$}; \node[transition] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \node[altru] (alt) at (2,1) {$1$}; \node[transition] (leave critical) at (1,1) {$2$}; \node[transition] (enter critical) at (-1,1) {$3$}; \draw [-{Classical TikZ Rightarrow}, ultra thick] (alt) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (leave critical) to [bend right=45] (alt); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend left=45] (leave critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}, ultra thick] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:FirstCopy2} \end{subfigure} \begin{subfigure}{.20\textwidth} \centering \caption{$\ell = 2$. Cycle 1} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{blank}=[circle,draw=white,fill=white,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[blank] (alt) at (2,1) {}; \node[transition] (waiting) at (0,2) {$5$}; \node[transition] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \node[place] (leave critical) at (1,1) {$2$}; \node[transition] (enter critical) at (-1,1) {$3$}; \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend left=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:FirstCopy3} \end{subfigure} \begin{subfigure}{.20\textwidth} \centering \caption{$\ell = 3$. Cycle 2} \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{blank}=[circle,draw=white,fill=white,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[blank] (alt) at (2,1) {}; \node[transition] (waiting) at (0,2) {$5$}; \node[transition] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \node[place] (enter critical) at (-1,1) {$3$}; \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \end{tikzpicture} \label{fig:FirstCopy4} \end{subfigure} \begin{subfigure}{0.3\textwidth} \centering \caption{$\ell = 4$. Cycle 3} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[transition] (waiting) at (0,2) {$5$}; \node[place] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \end{tikzpicture} \label{fig:FirstCopy5} \end{subfigure} \begin{subfigure}{0.3\textwidth} \centering \caption{$\ell = 5$. Cycle 4} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[place] (waiting) at (0,2) {$5$}; \node[transition] (semaphore) at (0,0) {$6$}; \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \end{tikzpicture} \label{fig:FirstCopy6} \end{subfigure} \begin{subfigure}{0.3\textwidth} \centering \caption{Complete solution} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{altru}=[rectangle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[transition] (waiting) at (0,2) {$5$}; \node[transition] (critical) at (0,1) {$4$}; \node[transition] (semaphore) at (0,0) {$6$}; \node[altru] (alt) at (2,1) {$1$}; \node[transition] (leave critical) at (1,1) {$2$}; \node[transition] (enter critical) at (-1,1) {$3$}; \draw [-{Classical TikZ Rightarrow}, ultra thick] (alt) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (leave critical) to [bend right=45] (alt); \draw [-{Classical TikZ Rightarrow}, ultra thick] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}, ultra thick] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}, ultra thick] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}, ultra thick] (semaphore) to [bend right=45] (critical); \end{tikzpicture} \label{fig:FirstCopy7} \end{subfigure} \label{fig:Copies2} \end{figure} \subsection{EEF: Extended Edge Formulation} Consider $\mathscr{P} + \mathscr{N}$ copies of the graph $D$, $D^{\ell} = (V^{\ell}, A^{\ell})$, where $V^{\ell}$ is as defined in Section \ref{sec:EAF} and $A^{\ell} = \{(i,j) \in \mathscr{A} \mid i,j \in V^{\ell} \}$ is the set of arcs in the $\ell$-th copy. Recall that $\mathscr{P}$ and $\mathscr{N}$ is an upper bound on the number of cycles and chains in a feasible solution, respectively. In each copy $\ell \in \{1,..., \vert \mathscr{N} \vert\}$ chains are triggered by the $\ell$-th NDD\ and at most $L$ arcs can be selected. In each copy $\ell \in \{\vert \mathscr{N} \vert + 1,..., \vert \mathscr{N} \vert + \vert \mathscr{P} \vert\}$, all cycles include the vertex with lowest index in that copy (e.g., see Figure \ref{fig:Copies2}). Let $x_{ij}^{\ell}$ be an arc variable taking the value one if the arc $(i,j) \in A^{\ell}$ is selected in the $\ell$-th copy and zero, otherwise. Similarly to EAF, consider $\mathscr{L}$ as the set of indices for which $V^{\ell} \ne \emptyset$. Then, the extended edge formulation can be formulated as follows: \begin{subequations} \label{sub:EEF} \begin{align} \max &\sum_{\ell \in \mathscr{L}}\sum_{(i,j) \in A^{\ell}} w_{ij}x_{ij}^{\ell} \label{eq:ObjEEF}\\ &\sum_{j:(j,i) \in A^{\ell}} x_{ji}^{\ell} = \sum_{j:(j,i) \in A^{\ell}} x_{ij}^{\ell} && \ell \in \mathscr{L}, i \in V^{\ell} \label{eq:EBalance}\\ &\sum_{\ell \in \mathscr{L}} \sum_{j:(i,j) \in A^{\ell}} x_{ij}^{\ell} \le 1&& i \in V \label{eq:EMaxOutput}\\ &\sum_{(i,j) \in A^{\ell}} x_{ij}^{\ell} \le L && \ell \in \{1,..., \vert \mathscr{N} \vert\} \label{eq:EChainSize}\\ &\sum_{(i,j) \in A^{\ell}} x_{ij}^{\ell} \le K && \ell \in \{\vert \mathscr{N} \vert + 1,..., \vert \mathscr{P} \vert + \vert \mathscr{N} \vert\} \label{eq:ECycleSize}\\ &\sum_{j:(i,j) \in A^{\ell}} x_{ij}^{\ell} \le \sum_{j:(\ell,j) \in A^{\ell}} x_{\ell j}^{\ell} && \ell \in \mathscr{L}, i \in V^{\ell} \label{eq:ESymmetry}\\ &x_{ij}^{\ell} \in \{0,1\}&& \ell \in \mathscr{L}, (i,j) \in A^{\ell} \label{eq:Eintegrality} \end{align} \end{subequations} Constraints \eqref{eq:EBalance} assure that a patient in the $\ell$-th graph copy receives a kidney if his or her paired donor donates one. Constraints \eqref{eq:EMaxOutput} allow every donor (paired or singleton) to donate at most one kidney in only one copy of the graph. Constraints \eqref{eq:EChainSize} and \eqref{eq:ECycleSize} guarantee the maximum length allowed for chains and cycles in terms or arcs. Constraints \eqref{eq:ESymmetry} assure that a copy is selected, only if the vertex with the lowest index is selected. Lastly, constraints \eqref{eq:Eintegrality} define the nature of the decision variables. The same counter example used for the EAF defined in Section \ref{sec:EAF} applies to the EEF. For the example given in Figure \eqref{fig:Copies2}, consider $K = 3$ and $L = 6$. Notice that only the chain copy is selected, but due to the presence of subtours of length superior to the cycle size limit, both formulations can provide an infeasible solution. Therefore, infeasible-cycle-breaking constraints are required for every chain copy. \section{Proofs} \label{sec:Proofs} In this section, we provide the proofs of the results on the complexity of finding a positive-price column via MDDs. For the sake of consistency, the numeration of the following propositions coincides with that used in the main body of the paper. \setcounter{theorem}{2} \begin{prop} Given the reduced costs $\hat{r}_{c}^{i}$ and $\bar{r}^{i}_{p}$ expressed as an arc-separable function for all $(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}$ and $(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}$, a positive-price cycle, if one exists, can be found in time $\mathcal{O}(\sum_{i \in \hat{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert)$. Similarly, a positive-price chain can be found in $\mathcal{O}(\sum_{i \in \bar{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert)$. \end{prop} \proof{Proof.} For every arc $(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}$ and $(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}$, $\vert \delta^{\fm}_{-}((n_{s}, n_{s'})) \vert$ comparisons need to be performed to obtain $\hat{\eta}^{\fm}((n_{s}, n_{s'}))$ and $\bar{\eta}^{\fm}((n_{s}, n_{s'}))$, respectively in \eqref{eq:recu1a}. Therefore, for the $i$-th MDD of a cycle copy, $\sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert$ comparisons are required to compute $\hat{\eta}^{\fm}$, whereas for the $i$-th MDD of a chain copy, there are the same number of comparisons plus $\vert \mathcal{\bar{A}}^{\fm} \vert$ comparisons of all arcs, in \eqref{eq:2b}, before obtaining $\bar{\eta}^{\fm}$. Because there are $\vert \hat{I} \vert$ cycle MDDs and $\vert \bar{I} \vert$ chain MDDs, it follows that the time complexity is as shown above. \hfill$\square$ \begin{prop} The size of the input $\sum_{i \in \hat{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert$ grows as $\vert \hat{\mathscr{V}}^{i} \vert ^{K + 1}$ does. \end{prop} \proof{Proof.} Without lost of generality, assume $\mathscr{D}$ is complete. As stated before, the layer of an arc $a: (n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}$ is the layer to which its source node belongs, i.e., if node $n_{s'}$ is on layer $k$, then $\ell(a) = k$. Moreover, let $\mathcal{\hat{A}}^{\fm}_{k} := \{a \in \mathcal{\hat{A}}^{\fm} \mid \ell(a) = k \}$ be the set of arcs that belong to layer $k$. By construction of the MDDs, $\mathbf{r}$ has only one outgoing arc such that $val((\mathbf{r}, n_1)) = v^{*}_{i}$. In the second layer, $\mathcal{L}_{2}$, the cardinality of $\mathcal{\hat{A}}^{\fm}_{2}$ can be up to $\vert \hat{\mathscr{V}}^{i} \vert - 1$ corresponding to the vertices $v \in \hat{\mathscr{V}}^{i} \setminus \{v^{*}_{i}\}$ that can be selected in the second position of a cycle. Notice that since there can be the same number of 2-way cycles, an arc $a \in \mathcal{\hat{A}}^{\fm}_{2}$ has a sink node $n$ such that $(n, \mathbf{t}) \in \mathcal{\hat{A}}^{\fm}$. If the length of a cycle is higher than two, then the cardinality of $\mathcal{\hat{A}}^{\fm}_{3}$ can be up to $\vert \hat{\mathscr{V}}^{i} \vert - 2$, representing the $\vert \hat{\mathscr{V}}^{i} \vert - 2$ vertices $v \in \hat{\mathscr{V}}^{i} \setminus \{v^{*}_{i}\}$ that can be selected in the third position of a cycle. The same process is repeated until in layer $\mathcal{L}_K$ there are $\vert \hat{\mathscr{V}}^{i} \vert - (K - 1)$ vertices $v \in \hat{\mathscr{V}}^{i}$ to choose, and thus, $\vert \hat{\mathscr{V}}^{i} \vert - (K - 1)$ arcs $a \in \mathcal{\hat{A}}^{\fm}$ pointing to $n$. Thus, $\vert \mathcal{\hat{A}}^{\fm} \vert$ equals \begin{subequations} \begin{align} \prod_{k =2}^{K} \vert \hat{\mathscr{V}}^{i} \vert - (k - 1) + \sum_{k = 2}^{K - 1} (\vert \hat{\mathscr{V}}^{i} \vert - (k - 1) ) < \vert \hat{\mathscr{V}}^{i} \vert ^{K - 1} + K \vert \hat{\mathscr{V}}^{i} \vert \end{align} \end{subequations} The second sum on the left-hand side corresponds to the number of arcs at every layer whose sink node is $n$, thus, closing up cycles using fewer than $K$ arcs. Therefore, under a worst-case scenario in which $ \vert \delta^{\fm}_{-}(n_{s}) \vert$ and $\vert \hat{I} \vert$ tend to $\vert \hat{\mathscr{V}}^{i} \vert$, the complexity of finding a positive-price cycle becomes $\mathcal{O}(\vert \hat{\mathscr{V}}^{i} \vert ^{K + 1})$. \hfill$\square$ \begin{prop} The size of the input $\sum_{i \in \bar{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert$ grows as $\vert \bar{\mathscr{V}}^{i} \vert ^{L + 2}$ does for bounded chains and as $\vert \bar{\mathscr{V}}^{i} \vert !$ when $L \rightarrow \infty$. \end{prop} \proof{Proof.} A similar reasoning to the previous proposition can be followed, except that a chain can be cut short if by visiting a new vertex $v \in \bar{\mathscr{V}}^{i}$ in a sequence of the state transition graph at least one PDP\ is present more than once, thereby violating the condition of being a simple path. We know that for a path to have $L$-many arcs, it is necessary to have a sequence with $L$ PDPs, thus, $\vert \mathcal{\hat{A}}^{\fm} \vert$ tends to $\vert \bar{\mathscr{V}}^{i} \vert^{L}$ and $\sum_{i \in \bar{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert \approx \vert \hat{\mathscr{V}}^{i} \vert^{L + 2}$. Therefore, for bounded chains, finding a positive-price chain can be done in time $\mathcal{O}(\vert \hat{\mathscr{V}}^{i} \vert ^{L+ 2})$. The second part follows by the fact that after we visit $\ell$ vertices, there are still $\vert \hat{\mathscr{V}}^{i} \vert - \ell$ ways to choose the next one, until only one can be chosen, thus, the time to find a positive-price column when $L$ is unbounded is exponential. \hfill$\square$ \section{Additional Results} \label{sec:OtherResults} Figure \ref{fig:cols.pdf} depicts the type and number of columns found in each phase for individual runs across all the $K$-$L$ combinations. The x-axis represents the total time (in minutes) to solve the pricing problem of a single run, i.e., an instance on a $K$-$L$ setting, during the three phases, denoted by PH1, PH2 and PH3. For every x-point there may be multiple y-points representing the number of columns found in a specific phase (sub phase) in thousands and whether they are cycle or chain columns. Therefore, for the same x-point, multiple y-points may correspond to the same instance, maybe on different settings, if the markers share the same size and color, regardless of the shape. For similar total pricing-problem times, markers may overlap. As an example, consider the time interval from 2min to 10min. In the cycle subplot there are big (purple) circle markers indicating that from 10k up to 30k cycle columns were found in PH1, as apposed to the chain subplot where no such markers appear. This means that this subset of instances correspond to $L = 0$. In another example, some instances with 2252 PDPs\ and 204 NDDs\ whose pricing time is 15.6min and have a green circle around 12k and 6k in the cycle and chain subplots, respectively, plus a triangle indicating one chain found through \eqref{objLP}. Thus, the total number of columns of a run is the sum over the number of columns indicated by all markers in the y-axis with respect to the same x-point, provided that markers share color and size. In this case there are 17,396 columns. Overall, most markers are circles, indicating that the majority of columns are found via MDDs across all runs, while PH2 and PH3 mostly help certificate that no more positive-price columns exist. \begin{figure}[h!] \centering \caption{Classification of columns by run, phase and type. Marker types indicate different (sub) phases of column generation. Marker sizes are correlated with the number of PDPs, while marker colors indicate the number of NDDs.} \label{fig:cols.pdf} \includegraphics[scale=0.42]{Figures/Columns.pdf} \end{figure} \section{Introduction.}\label{intro} \input{Intro} \input{LitReview} \input{ProblemDefn} \input{LagD} \input{BP-Methodology} \input{SolApproach} \input{Results} \input{Conclusion} \ACKNOWLEDGMENT{% This work has been supported by the University of Toronto Centre for Healthcare Engineering. \setlength\bibsep{0.2pt} \bibliographystyle{informs2014} \section{Branch and Price} \label{sec:BP} In this section we present which is, to the best of our knowledge, the only B\&P \ for the KEP valid for long chains. For completeness, we first discuss the general motivation behind B\&P. We then proceed to detail our proposed B\&P \ implementation. Particularly, we focus on solving the pricing problems via multi-valued decision diagrams, a solution method novel to cycle and path packing problems in digraphs, and only used before in a transportation scheduling problem \citep{Arvind2018}, to the best of our knowledge. \subsection{Background} For large instances the cardinality of $\mathscr{\hat{C}}^{\fm}_{\Kcycle}$ and $\mathscr{\bar{C}}^{\fm}_{\Lchain}$ becomes prohibitive up to the point that we cannot exhaustively state all decision variables in \eqref{objDis} or constraints in \eqref{objLag2opt}. Instead of considering the full set of variables in a linear program, \textit{column generation} works with a small subset of variables (\textit{columns}), forming the well-known \textit{restricted master problem} (RMP). If by duality theory a column is missing in the RMP, a cycle or chain with positive reduced cost must be found and added to the RMP to improve its current objective value. To find such a column(s) one can solve tailored \textit{pricing problems}, which return either a ``positive-price" cycle (chain) or a certificate that none exists. RMP and pricing problems are solved iteratively, typically, until strong duality conditions are satisfied. As the solution of the RMP may not be integer, column generation is embedded into a branch-and-bound algorithm to obtain an optimal solution, yielding a B\&P \ algorithm. \subsection{Restricted master problem} \eqref{objDis} or its dual counterpart \eqref{objLag2opt} can serve as a master problem in our decomposition. To estimate the performance of both formulations, we solved \eqref{objDis} and \eqref{objLag2opt} via a column generation algorithm and a cutting plane method, respectively, for each of the ten PrefLib instances with 256 PDPs\ with no NDDs\ and $K = 3$. In each case, subproblems were solved as shown in Section \ref{sec:SolAp}. Both algorithms solved their corresponding master problem below a second, thus, there is no significant difference among the two, although in 7 out of 10 cases \eqref{objDis} was faster. Therefore, we selected \eqref{objDis} as our RMP. \subsection{MDDs for Pricing Problems} Finding an assignment of vertices to a graph copy while minimizing penalization is equivalent to finding a positive-price cycle or chain. Therefore, whether we take the primal or dual problem of our RMP, subproblems can take the form of \eqref{sprbm:cycles} and \eqref{sprbm:chains}. Notice that subproblems can be formulated differently (see, e.g., \cite{Anderson2015, Duncan2019}), or not being solved through MIP at all, provided that there is a faster algorithm. Solution methods to pricing problems in the literature for the cycle-only version include heuristics \citep{Abraham2007}, MIP \citep{Roth2007}, or a combination of exact and heuristic algorithms \citep{Klimentova2014, Lam2020}. Also, \cite{Glorie2014} and \cite{Plaut2016b} tackled this version by using a modified Bellman-Ford algorithm in a reduced graph, which applies for cycles but not for chains \citep{Plaut2016}. Pricing algorithms including long chains have been less studied. Next, we show how to solve the pricing problems by means of MDDs for the general version of the KEP. \subsubsection{Decision Diagrams} A decision diagram is a graphical data structure, used in optimization to represent the solution set of a given problem. Particularly, a decision diagram is a rooted, directed and acyclic layered graph, where (if \textit{exact}), every chain from the root node $\mathbf{r}$ to a terminal node $\mathbf{t}$ has a one-to-one correspondence with a solution in the feasible space of the optimization problem. If the objective function is arc separable, then a shortest-path-style algorithm can be used to find the optimal objective value. When the decision variables represented in the diagram are binary, the resulting one is a \textit{binary decision diagram} (BDD). Some applications of BDDs in the literature include finding improved optimization bounds \citep{Bergman2014}, solving two-stage stochastic programs \citep{Lozano2018} and solving pricing problems in graph coloring \citep{Morrison2016}. On the other hand, when the domain of decision variables represented in the diagram includes three or more values, the decision diagram is an MDD. \cite{Cire2013} proposed solving sequencing problems using MDDs and showed primary applications in scheduling and routing, (see also \citet{Kinable2017, Castro2020}). The only work we are aware of, in which MDDs are used to solve pricing problems, is by \cite{Arvind2018}. They tackle a last-mile transportation problem in which passengers reach their final destination by using a last-mile service system linked to the terminal station of a mass transit system. It is assumed that a desired arrival time of each passenger is known in advance. For each destination, the authors build MDDs for the subset of passengers who share the same destination. They distinguish between one-arcs and zero-arcs, to refer to a passenger being aboard a vehicle or not, respectively. A path in these MDDs represents a partition of passengers who belong to the same trip and the time at which they depart to their final destination. Instead of a partition of elements, we find feasible cycles or chains in each graph copy, as described next. \subsubsection{Construction of MDDs for the KEP} Decision diagrams can be constructed by finding the \textit{state transition graph} of a dynamic programming (DP) formulation and reducing it afterwards (see \cite{Cire2013, Hooker2013} for more details). We model the pricing problems through DP by formulating two models; one for cycles and the other for chains. Particularly, a DP model is formulated for each cycle copy in \eqref{sprbm:cycles}, an for each chain copy in \eqref{sprbm:chains}. A \textit{state} in these models represents the vertices $v \in \mathscr{V}$ visited at previous \textit{stages}, where a stage corresponds to the position of a vertex in a cycle or chain. \textit{Controls} $\hat{h}^{\vert K \vert}$ and $\bar{h}^{\vert L \vert} $ take the index value of a vertex $v \in \mathscr{P}$ and vertex $v \in \mathscr{V}$, indicating that it is assigned to a cycle or chain, respectively, in the position given by the control index, i.e., in position $k \le K$ of a cycle or position $\ell \le L$ of a chain. As the domain of $\hat{h}$ and $\hat{h}$ variables contains three or more values, the resulting diagram is an MDD. It is in this context where the FVS found in Section \ref{sec:Lagrange} becomes particularly relevant. The goal is to create as many \textit{cycle MDDs} as cycle copies, where each MDD includes feasible cycles with their corresponding vertex $v^{*}_{i} \in \fvs$. As for chains, a \textit{chain MDD} is created for every chain copy so as to find positive-price chains from every NDD. The MDD $\mgraph = (\mnodes, \marcs)$ for the $i$-th cycle copy of $\mathscr{D}$ has its node set $\mathcal{\hat{N}}^{\fm}$ partitioned into $K$ layers, $\mathcal{L}_{1},...,\mathcal{L}_{K}$, corresponding to the decision of which PDP\ belongs to the $k$-th position in a cycle, denoted by $\pi_{k}$, plus two terminal layers $\mathcal{L}_{K + 1}$ and $\mathcal{L}_{K + 2}$ representing the close-up of a feasible cycle. Layers $\mathcal{L}_{1}$, $\mathcal{L}_{2}$, $\mathcal{L}_{K + 1}$ and $\mathcal{L}_{K + 2}$ have a single node each. Every arc $a \in \mathcal{\hat{A}}^{\fm}$ has an associated label $val(a) \in \hat{\mathscr{V}}^{i}$ such that $\pi_{k} = val(a)$ corresponds to assigning vertex $val(a)$ to the $k$-th position of a cycle. Since a cycle in the $i$-th cycle copy starts with the $i$-th feedback vertex, then $\pi_{1} = v^{*}_i$. Thus, an arc-specified path ($a_1, ..., a_{k}$) from nodes $\mathbf{r}$ to $\mathbf{t}$ defines the PDP\ sequence $(\pi_{1},,...,\pi_{k}) = (val(a_1), ..., val(a_{k}))$, equivalent to a feasible cycle in $\mathscr{D}$. On the other hand, the set of nodes $\mathcal{\bar{N}}^{\fm}$ for the $i$-th MDD of a chain copy, $\mgraphch = (\mnodesch, \marcsch)$, is partitioned into $L + 2$ layers, with a single node in the first and last layer, representing the start and end of a chain, respectively. Recall that for a chain to involve $L$ transplants, $L + 1$ vertices $v \in \mathscr{V}$ are required. Likewise, an arc $a \in \mathcal{\bar{A}}^{\fm}$ has a label $\pi_{\ell} = val(a)$ indicating the vertex $v \in \bar{\mathscr{V}}^{i}$ at the $\ell$-th position in a chain, noticing that $\pi_{1} = u_{i}$. A path starting at the root node $\mathbf{r}$ and ending at a node on the third layer or higher represents a feasible chain, since its length is at least one. \begin{figure}[ht] \caption{MDDs for the example of Figure \ref{fig:FirstCopy}. Left (vertex $4$ is a PDP). Right (vertex $4$ is an NDD).} \label{fig:MDDs} \vskip 0.2cm \scalebox{.85}{ \begin{subfigure}{.5\textwidth} \centering \caption{Exact decision diagram, $K = 4$} \label{sub:exactMDD} \begin{tikzpicture} \begin{scope}[every node/.style={circle, fill=beaublue, inner sep=1pt,minimum size=2pt}] \node (0) at (3,8.5) {$\mathbf{r}$}; \node (1) at (3,7) {$n_{1}$}; \node (2) at (1,5.5) {$n_{2}$}; \node (3) at (2,5.5) {$n_{3}$}; \node (5) at (5,5.5) {$n_{4}$}; \node (11) at (5,4) {$n_{5}$}; \node (20) at (3,2.5) {$n_{6}$}; \node (21) at (3,1) {$\mathbf{t}$}; \end{scope} \node (P1) at (-0.25, 7.75) {$\pi_1$}; \node (P2) at (-0.25, 6.25) {$\pi_2$}; \node (P3) at (-0.25, 4.75) {$\pi_3$}; \node (P4) at (-0.25, 3.25) {$\pi_4$}; \begin{scope}[>={Stealth[black]}, every node/.style={scale=0.8}], every edge/.style={draw=black, thick}], \path [->] (0) edge node[auto] {$4$} (1); \path [->] (1) edge[bend right = 20] node[above] {$5$} (2); \path [->] (1) edge[bend right = 10] node[above] {$3$} (3); \path [->] (1) edge[bend right=20] node[below] {$2$} (5); \path [->] (1) edge[bend left = 20] node[above] {$1$} (5); \path [->] (3) edge[bend right=20] node[left] {$5$} (20); \path [->] (5) edge[bend left = 10] node[auto] {$5$} (11); \path [->] (1) edge[bend right=20] node[left] {$5$} (20); \path [->] (1) edge[bend left=20] node[right] {$3$} (20); \path [->] (2) edge[bend right=35] node[left] {$3$} (20); \path [->] (5) edge[bend right = 10] node[right, above] {$5$} (20); \path [->] (11) edge[bend left = 20] node[right, below] {$3$} (20); \path [->] (20) edge node[auto] {$4$} (21); \end{scope} \end{tikzpicture} \label{fig:DGcycles} \end{subfigure} \begin{subfigure}{.5\textwidth} \centering \caption{Restricted decision diagram, $L = 3$} \label{subfig:MDDchains} \begin{tikzpicture} \node[circle, fill=beaublue, inner sep=1pt,minimum size=2pt] (0) at (3, 8.5){$\mathbf{r}$}; \begin{scope}[every node/.style={circle, fill=beaublue, inner sep=1pt,minimum size=2pt}] \node (1) at (3,7) {$n_{1}$}; \node (5) at (3,5.5) {$n_{4}$}; \node (11) at (3.5,4) {$n_{5}$}; \node (20) at (3,2.5) {$\mathbf{t}$}; \end{scope} \node (L0) at (4.75,8.4) {$\mathcal{L}_1$}; \node (L1) at (4.75, 7) {$\mathcal{L}_2$}; \node (L2) at (4.75, 5.5) {$\mathcal{L}_3$}; \node (L3) at (4.75, 4) {$\mathcal{L}_4$}; \node (L4) at (4.75, 2.5) {$\mathcal{L}_5$}; \node (L5) at (4.75, 1.2) {$\mathcal{L}_6$}; \begin{scope}[>={Stealth[black]}, every node/.style={scale=0.8}], every edge/.style={draw=black, thick}], \path [->] (0) edge node[auto] {$4$} (1); \path [->] (1) edge[bend right=20] node[left] {$2$} (5); \path [->] (1) edge[bend left = 20] node[right] {$1$} (5); \path [->] (5) edge[bend left = 10] node[auto] {$5$} (11); \path [->] (5) edge[bend right = 25] node[right, left] {$5$} (20); \path [->] (11) edge[bend left = 15] node[right, right] {$3$} (20); \end{scope} \end{tikzpicture} \label{fig:DGchains} \end{subfigure} } \label{fig:DG} \end{figure} Figure \ref{sub:exactMDD} depicts all possible sequences of PDPs\ $\pi_{1}, \pi_{2},...,\pi_{k}$ encoding feasible cycles on the graph copy of Figure \ref{fig:FirstCopy}. A value $\pi_{k}$ placed on an arc corresponds to the $k$-th vertex $v \in \mathscr{P}$ in a cycle covered by vertex $4$. For instance, the path $(\mathbf{r},n_1, n_3, n_6, \mathbf{t})$ in Figure \ref{sub:exactMDD} encodes the cycle $c = \{4,3,5,4\}$ in Figure \ref{fig:FirstCopy}. Since exact MDDs can grow exponentially large, it might be necessary to limit its size, turning them into \textit{restricted} decision diagrams, as we also discuss in Section \ref{sec:SolAp}. A decision diagram is called restricted if the set of solutions corresponding to its $\mathbf{r}$-$\mathbf{t}$ paths is a subset of the entire feasible set of solutions. Figure \ref{subfig:MDDchains} shows a restricted MDD as if vertex $4$ in Figure \ref{fig:FirstCopy} were an NDD. In this example, the MDD is restricted to have chains including only two out of the four vertices receiving an arc from vertex $4$; namely vertices $1$ and $2$. \subsubsection{Finding a positive-price column} Let $\delta^{\fm}_{-}(n_{s} ) \subset \mathcal{\hat{A}}^{\fm}$ be the set of incoming arcs to a node $n_{s} $ in $\mathcal{\hat{N}}^{\fm}$ and $\ell(a)$ the layer index of the source node of arc $a$, e.g, in Figure \ref{sub:exactMDD} $\delta^{\fm}_{-}(n_4) = \{(n_1, n_4)^{1}, (n_1, n_4)^{2}\}$ and $\ell((n_1, n_4)^{1}) = 2$, where the superscripts distinguish the two arcs coming into $n_{4}$, one with $\pi_{2} = {1}$ and the other with $\pi_{2} = {2}$. We define the recursive function values of an arc $a = (n_{s}, n_{s'})$ for the $i$-th MDD for cycles and chains, $\hat{\eta}^{\fm}(a)$ and $\bar{\eta}^{\fm}(a)$, respectively, as the maximum reduced cost of all paths ending at $a$: \vspace{-4mm} \begin{subequations} \label{eq:recu1a} \begin{empheq}[left={\hat{\eta}^{\fm}(a) = \bar{\eta}^{\fm}(a) = \empheqlbrace\,}]{align} & 0 & n_{s} = \mathbf{r} \label{eq:1a} \\ & \max_{a' \in \delta^{\fm}_{-}(n_{s})} \left \{ \hat{\eta}^{\fm}(a') + w_{val(a'), val(a)} - \lambda_{val(a')} \right \} & \mbox{otherwise} \label{eq:2a} \end{empheq} \end{subequations} The recursive function \eqref{eq:recu1a} is valid by Bellman's principle of optimality since the reduced cost of a cycle (or chain) in the $i$-th MDD, $\hat{r}^{i}_{c}$ (or $\bar{r}^{i}_{p}$), is arc separable \citep{Glorie2014} and the portion taken by every arc only depends on the previous PDP\ or NDD\ in the sequence. Thus, the maximum reduced cost of a cycle and chain, $\hat{\eta}^{\fm}$ and $\bar{\eta}^{\fm}$, respectively, is given by \vspace{-4mm} \begin{subequations} \label{sub:recucych} \begin{align} \hat{\eta}^{\fm}= &\max \left \{0, \hat{\eta}^{\fm}((n,\mathbf{t})) \right \}& i \in \hat{I} \label{eq:1b} \\ \bar{\eta}^{\fm}= & \max \left \{0, \max_{a \in \mathcal{\bar{A}}^{\fm}: \ell(a) \ge 2} \bar{\eta}^{\fm}(a) - \lambda_{val(a)} \right \}& i \in \bar{I} \label{eq:2b} \end{align} \end{subequations} Recursion \eqref{eq:1b} computes the maximum reduced cost of a cycle at the terminal node $\mathbf{t}$, since all paths need to reach $\mathbf{t}$ to close it up. For chains, on the other hand, any portion of a path in $\mathcal{\bar{G}}^{\fm}$ is a feasible path, for which the longest path can be found at any layer of the MDD where the length of a chain (in terms or arcs in $\bar{\mathscr{A}}^{i}$) is at least one. The term subtracted in $\eqref{eq:2b}$ captures the Lagrange multiplier of the last pair in a chain. We know from \eqref{objLag2opt} that $\lambda_{v} \in \mathbb{R}_{+}$ for all $v \in \mathscr{V}$, thus, if the Lagrange multiplier of a given vertex $v \in \mathscr{V}$ is large enough to lead to a negative-price path at node $\mathbf{t}$, we may need to cut that path short at some previous vertex in the sequence to obtain a positive-price chain. For instance, in Figure \ref{subfig:MDDchains} consider $\lambda_{2} = \lambda_{3} = 5$, all the other Lagrange multipliers set to zero and unitary weights $w_{\uix \vix}$ for all $(\uix, \vix) \in \mathscr{A}$. Sequence $(4,1,5)$ representing a 2-length chain in $\bar{\mathscr{D}}^{i}$ is contained in $(4,1,5, 3)$. Clearly, the former yields the highest reduced cost of a chain in Figure \ref{subfig:MDDchains}. Thus, $\bar{\eta}^{\fm} = \bar{\eta}^{\fm}((n_4, n_5)) = 2$. Next, we show a series of results on the complexity of computing a positive-price column via MDDs. The corresponding proofs are presented in the Appendix \ref{sec:Proofs}. \setcounter{theorem}{2} \begin{prop} Given the reduced costs $\hat{r}_{c}^{i}$ and $\bar{r}^{i}_{p}$ expressed as an arc-separable function for all $(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}$ and $(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}$, a positive-price cycle, if one exists, can be found in time $\mathcal{O}(\sum_{i \in \hat{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert)$. Similarly, a positive-price chain can be found in $\mathcal{O}(\sum_{i \in \bar{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert)$. \end{prop} \begin{prop} The size of the input $\sum_{i \in \hat{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\hat{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert$ grows as $\vert \hat{\mathscr{V}}^{i} \vert ^{K + 1}$ does. \end{prop} \begin{prop} The size of the input $\sum_{i \in \bar{I}} \sum_{(n_{s}, n_{s'}) \in \mathcal{\bar{A}}^{\fm}} \vert \delta^{\fm}_{-}(n_{s}) \vert$ grows as $\vert \bar{\mathscr{V}}^{i} \vert ^{L + 2}$ does for bounded chains and as $\vert \bar{\mathscr{V}}^{i} \vert !$ when $L \rightarrow \infty$. \end{prop} Despite of potentially very large diagram sizes in general, there are three reasons for which finding a positive-price column can still be done efficiently in practice: First, even though $\vert \mathscr{V} \vert$ can be large, arc density of $\mathscr{D}$ for real KEP instances is below 50\%. Second, for small values of $K$ and $L$, it is possible to reduce considerably the size of the input by selecting an FVS, $\fvs$, with small cardinality. Lastly, MDDs are reduced significantly after the state transition graph is obtained \citep{Cire2013}, e.g., see Figure \ref{sub:exactMDD}. \subsection{Branching scheme} \label{sub:branching} It is known that the search tree may an have exponential depth when branching is done on possibly every cycle, thus, branching on arcs is usually preferred. If $\mathscr{D}$ is a complete graph, there can be up to $\vert \mathscr{P} \vert \vert \mathscr{P} - 1 \vert + \vert \mathscr{P} \vert \vert \mathscr{N} \vert$ arcs in $\mathscr{A}$. On the other hand, branching on arcs in $ \hat{\mathscr{A}}^{i}$ and $\bar{\mathscr{A}}^{i}$ implies that there are up to $(\vert \bar{I} \vert + \vert \hat{I} \vert ) \vert \mathscr{P} \vert \vert \mathscr{P} - 1 \vert + \vert \bar{I} \vert \vert \mathscr{P} \vert \vert \mathscr{N} \vert$ arcs across all graph copies. Among the two arc-based schemes, branching on arcs in $\mathscr{A}$ results in a lower depth branching tree. We therefore choose this option as our branching scheme. Particularly, on every fractional node of the search tree we branch on an arc $(\uix, \vix) \in \mathscr{A}$ whose left-hand side of \eqref{eq:fixingarc} is closest to $0.5$. That is, we generate two children, one in which the arc is prohibited and another in which the arc is selected. When banning an arc from the RMP, we modify \eqref{eq:2a} by replacing $w_{\uix \vix}$ with a sufficiently large positive number $M$. By doing so, the length of any path in any copy traversing that arc approaches negative infinity, thereby ruling it out due to the definition of $\hat{\eta}^{\fm}$ and $\bar{\eta}^{\fm}$. On the other hand, enforcing an arc $(\uix, \vix) \in \mathscr{A}$ requires the inclusion of constraint \eqref{eq:fixingarc} in the RMP. \begin{align} \sum_{i \in \hat{I}} \sum_{c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}: (\uix, \vix) \in \mathscr{A}(c)} z_{\cycle}^{\fm} + \sum_{i \in \bar{I}} \sum_{p \in \mathscr{\bar{C}}^{\fm}_{\Lchain}: (\uix, \vix) \in \mathscr{A}(p)} z_{\chain}^{\fm} = 1 && (\mu_{(\uix, \vix)}) \label{eq:fixingarc} \end{align} The addition of constraint \eqref{eq:fixingarc} changes the reduced cost of a chain and cycle. If we let $\Arcs^{*} \subseteq \mathscr{A} $ be the set of selected arcs in a branch-and-bound node, the reduced cost of a column in the $i$-th MDD, is now given by \begin{subequations} \label{eq:NewRedCost} \begin{align} \hat{r}_{c}^{i} = &w_{\cycle} - \sum_{v \in \mathscr{V}(c)}\lambda_{v} - \sum_{(\uix, \vix) \in \Arcs^{*} \cap \mathscr{A}(c)} \mu_{(\uix, \vix)} & c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle} \label{eq:1rc} \\ \bar{r}_{p}^{i} = & w_{\chain} - \sum_{v \in \mathscr{V}(p)} \lambda_{v} - \sum_{(\uix, \vix) \in \Arcs^{*} \cap \mathscr{A}(p)} \mu_{(\uix, \vix)} & p \in \mathscr{\bar{C}}^{\fm}_{\Lchain} \label{eq:2rc} \end{align} \end{subequations} \noindent where $\mu_{(\uix, \vix)}$ is the dual variable of \eqref{eq:fixingarc}. Thus, if $(val(a^{'}), val(a)) \in \Arcs^{*}$, then $\mu_{(val(a^{'}), val(a))}$ can be subtracted from recursive expression \eqref{eq:2a} to account for \eqref{eq:fixingarc} in the RMP. If the solution of the RMP is fractional, we branch, and then apply column generation to the resulting node. After optimally solving the RMP, the upper bound given by its objective value is compared to the best lower bound found. If the former is lower, that branch is pruned. Otherwise, a lower bound is obtained by granting integrality to the decision variables (columns) in the RMP and re-solving it. Whenever a lower bound matches the best upper bound, optimality is achieved. If, due to time limitations, it is not possible to solve the RMP to optimality, \eqref{objLag2} is solved (see Section \eqref{sec:SolAp}) to derive a valid upper bound. To this end, if $\Arcs^{*} \ne \emptyset$, the second summation of \eqref{eq:NewRedCost} is subtracted from the objective function of $\eqref{sprbm:cycles}$ and $\eqref{sprbm:chains}$. \section{Conclusion} \label{sec:Conclusions} In this paper, we addressed the problem of finding a matching in kidney exchange, considering long chains. We first introduced a Lagrange relaxation that allows its decomposition into independent sub-problems, and showed that the Lagrangian dual provides an upper bound as tight as the so-called cycle formulation. We then proposed a B\&P \ algorithm in which the pricing problems for both cycles and chains are primarily solved via MDDs, giving us the advantage of finding positive-price columns by means of a shortest-path algorithm. Although the time complexity can be exponential (due to the size of the input), we showed experimentally that by combining exact and restricted decision diagrams, positive-price cycle and chain columns can be found efficiently for large instances. Given the large and realistic dataset we used to test our algorithm and the state-of-the art approaches, the experimental evidence suggests a remarkable performance of our solution algorithm for real match runs, making it, to the best of our knowledge the first one to solve optimally as many instances from the PrefLib library. For projected multi-hospital (countries) initiatives of KPDPs, instances may grow even larger than the largest instances considered here. In such a case, \textit{relaxed} decision diagrams can be used to solve the Lagrangian subproblems in order to obtain the new upper bound, whereas restricted decision diagrams can still be used to efficiently find columns. \subsection{Pricing problems (PPs)} Finding an assignment of vertices to a graph copy minimizing penalization is equivalent to finding a positive-price cycle or path. Therefore, whether we take the primal or dual problem of our RMP, subproblems can take the form of \eqref{sprbm:cycles} and \eqref{sprbm:chains}. Notice that subproblems can be formulated differently (see, e.g., \cite{Anderson2015, Duncan2019} ) or not being solved through mixed integer programming (MIP) at all, provided that there is a faster algorithm. Solution methods to pricing problems in the literature for the cycle-only version or $K = L$, include heuristics \citep{Abraham2007}, MIP \citep{Roth2007}, or a combination of exact and heuristic algorithms \citep{Klimentova2014, Lam2020}. The general version, considering cycles and long chains have been less studied. \cite{Glorie2014, Plaut2016b} tackled this version by using a modified Bellman-Ford algorithm in a reduced graph, which unfortunately was not correct \citep{Plaut2016}. Next, we show how to solve the pricing problems by means of multi-valued decision diagrams for the general version of the KEP. Decision diagrams have been widely used in a variety of applications (see, e.g., \cite{Castro2020, Kinable2017}) showing successful results. Although in integer programming, decision diagrams are widely used as a cut generation method, our interest is to used them as a solution framework for pricing problems in column generation. A decision diagram is a rooted, directed and acyclic layered graph, when if \textit{exact}, every path from the root $\mathbf{r}$ to a terminal node $\mathbf{t}$ has a one-to-one correspondence with a solution in the feasible space of the optimization problem. If the objective function is arc separable, then a shortest-path algorithm can be used to find the optimal objective value. Decision diagrams can be constructed by finding the \textit{state transition graph} of a dynamic programming (DP) formulation and reducing it afterwards (see \cite{Cire2013, Hooker2013} for more details). The KEP can be modeled through DP by formulating two models; one for cycles and the other for chains. A \textit{state} in these models represents the vertices $v \in \mathscr{V}$ visited at previous \textit{stages} (position of a vertex in a cycle or chain). \textit{Controls} $\dpx^{\vert K \vert}$ and $ \dpx^{\vert L \vert} $ correspond to the vertex $v \in \mathscr{P}$ and vertex $v \in \mathscr{V}$ assigned to the $k$-th and $\ell$-th position in a cycle and chain, respectively. Because the domain of $\dpx$ variables contains three or more values, the resulting decision diagram is a \textit{multi-valued decision diagram} (MDD). \begin{figure}[ht] \caption{MDDs for the example of Figure \ref{fig:FirstCopy}. Left (vertex $4$ is a PDP). Right (vertex $4$ is an NDD)} \label{fig:MDDs} \begin{subfigure}{.5\textwidth} \centering \caption{Exact decision diagram, $K = 4$} \label{sub:exactMDD} \begin{tikzpicture} \begin{scope}[every node/.style={circle, fill=beaublue, inner sep=1pt,minimum size=2pt}] \node (0) at (3,8.5) {$\mathbf{r}$}; \node (1) at (3,7) {$n_{1}$}; \node (2) at (1,5.5) {$n_{2}$}; \node (3) at (2,5.5) {$n_{3}$}; \node (5) at (5,5.5) {$n_{4}$}; \node (11) at (5,4) {$n_{5}$}; \node (20) at (3,2.5) {$n_{6}$}; \node (21) at (3,1) {$\mathbf{t}$}; \end{scope} \node (P1) at (-0.25, 7.75) {$\pi_1$}; \node (P2) at (-0.25, 6.25) {$\pi_2$}; \node (P3) at (-0.25, 4.75) {$\pi_3$}; \node (P4) at (-0.25, 3.25) {$\pi_4$}; \begin{scope}[>={Stealth[black]}, every node/.style={scale=0.8}], every edge/.style={draw=black, thick}], \path [->] (0) edge node[auto] {$4$} (1); \path [->] (1) edge[bend right = 20] node[above] {$5$} (2); \path [->] (1) edge[bend right = 10] node[above] {$3$} (3); \path [->] (1) edge[bend right=20] node[below] {$2$} (5); \path [->] (1) edge[bend left = 20] node[above] {$1$} (5); \path [->] (3) edge[bend right=20] node[left] {$5$} (20); \path [->] (5) edge[bend left = 10] node[auto] {$5$} (11); \path [->] (1) edge[bend right=20] node[left] {$5$} (20); \path [->] (1) edge[bend left=20] node[right] {$3$} (20); \path [->] (2) edge[bend right=35] node[left] {$3$} (20); \path [->] (5) edge[bend right = 10] node[right, above] {$5$} (20); \path [->] (11) edge[bend left = 20] node[right, below] {$3$} (20); \path [->] (20) edge node[auto] {$4$} (21); \end{scope} \end{tikzpicture} \label{fig:DGcycles} \end{subfigure} \begin{subfigure}{.5\textwidth} \centering \caption{Restricted decision diagram, $L = 3$} \label{subfig:MDDchains} \begin{tikzpicture} \node[circle, fill=beaublue, inner sep=1pt,minimum size=2pt] (0) at (3, 8.5){$\mathbf{r}$}; \begin{scope}[every node/.style={circle, fill=beaublue, inner sep=1pt,minimum size=2pt}] \node (1) at (3,7) {$n_{1}$}; \node (5) at (3,5.5) {$n_{4}$}; \node (11) at (3.5,4) {$n_{5}$}; \node (20) at (3,2.5) {$\mathbf{t}$}; \end{scope} \node (L0) at (4.75,8.4) {$\mathcal{L}_1$}; \node (L1) at (4.75, 7) {$\mathcal{L}_2$}; \node (L2) at (4.75, 5.5) {$\mathcal{L}_3$}; \node (L3) at (4.75, 4) {$\mathcal{L}_4$}; \node (L4) at (4.75, 2.5) {$\mathcal{L}_5$}; \node (L5) at (4.75, 1.2) {$\mathcal{L}_6$}; \begin{scope}[>={Stealth[black]}, every node/.style={scale=0.8}], every edge/.style={draw=black, thick}], \path [->] (0) edge node[auto] {$4$} (1); \path [->] (1) edge[bend right=20] node[left] {$2$} (5); \path [->] (1) edge[bend left = 20] node[right] {$1$} (5); \path [->] (5) edge[bend left = 10] node[auto] {$5$} (11); \path [->] (5) edge[bend right = 25] node[right, left] {$5$} (20); \path [->] (11) edge[bend left = 15] node[right, right] {$3$} (20); \end{scope} \end{tikzpicture} \label{fig:DGchains} \end{subfigure} \label{fig:DG} \end{figure} It is in this context where the FVS found in Section \ref{sec:Lagrange} becomes particularly relevant. A vertex $v^{*}_{i} \in \fvs$ corresponds to an MDD rooted at $v_{i} $ for every cycle copy. As for chains, an MDD can be associated to either every $u_{i} \in \mathscr{N}$ if the goal is to find some positive-price chains from every NDD; or to a super node connected to every NDD\ if regardless of who starts a chain, the goal is to find chains with positive reduced cost. For ease of exposition, we will refer to the former case. An MDD $\mgraph = (\mnodes, \marcs)$ for the $i$-th graph copy of $\mathscr{D}$ has its node set $\mathcal{\hat{N}}^{\fm}$ partitioned into $K$ ($L + 1$) layers, corresponding to the $k$-th ($\ell$-th) position in a cycle (chain), plus either two terminal layers with a singleton node each, representing the close-up of a cycle or only one more layer with a terminal node representing the end of a chain. Recall that for a chain to involve $L$ transplants, $L + 1$ vertices $v \in \mathscr{V}$ are required. Each arc $a \in \mathcal{\hat{A}}^{\fm}$ in an MDD started by $v^{*}_{i} \in \fvs$ has an associated label $val(a) \in \hat{\mathscr{V}}^{i}$ such that $val(a) = \pi_{k}$ corresponds to assigning vertex $v \in \hat{\mathscr{V}}^{i}$ to the $k$-th position of a cycle. Thus, an arc-specified path ($a_1, ..., a_{k}$) from nodes $\mathbf{r}$ to $\mathbf{t}$ defines the PDP\ sequence $(\pi_{1},,...,\pi_{k}) = (val(a_1), ..., val(a_{k}))$, equivalent to a feasible cycle in $\mathscr{D}$. For MDDs started by $u_{i} \in \mathscr{N}$ consider $val(a) = \pi_{\ell} $ and $val(a) \in \bar{\mathscr{V}}^{i}$ in the previous notation. Figure \ref{sub:exactMDD} depicts all possible sequences of PDPs\ $\pi_{1}, \pi_{2},...,\pi_{k}$ encoding feasible cycles on the graph copy of Figure \ref{fig:FirstCopy}. A value $\pi_{k}$ placed on an arc corresponds to the $k$-th vertex $v \in \mathscr{P}$ in a cycle covered by vertex $4$. For instance, the path $(\mathbf{r},n_1, n_3, n_6, \mathbf{t})$ in Figure \ref{sub:exactMDD} encodes cycle $c = \{4,3,5,4\}$ in Figure \ref{fig:FirstCopy}. A decision diagram is called to be \textit{restricted} if the set of $\mathbf{r}$-$\mathbf{t}$ paths is contained yet does not represent the entire feasible set of solutions. Because of the ``curse of dimensionality'' of exact MDDs, in some cases it is necessary to limit the number of vertices in $\mathcal{\hat{G}}^{\fm}$, as we show it in Section \ref{sec:SolAp}. Figure \ref{subfig:MDDchains} shows a restricted MDD as if vertex $4$ in Figure \ref{fig:FirstCopy} were an NDD. In this example, the MDD is restricted to have paths including only two out of the four vertices receiving an arc from vertex $4$; vertices $\{1,2\} $. Let $\delta^{\fm}_{-}(n) \subset \mathcal{\hat{A}}^{\fm}$ be the set of incoming arcs at a node $n$ in $\mathcal{\hat{G}}^{\fm}$ and $\ell(a)$ the layer index of the source node of arc $a$,e.g, in Figure \ref{sub:exactMDD} $\delta^{\fm}_{-}(n_4) = \{(n_1, n_4)^{1}, (n_1, n_4)^{2}\}$ and $\ell((n_1, n_4)^{1}) = 2$, where the superscripts distinguish the two arcs coming into $n_{4}$, one with $\pi_{2} = {1}$ and the other with $\pi_{k} = {2}$. We define the recursive function of an arc $a$, $\recu(a)$, as the maximum reduced cost of all paths ending at $a$ in $\mathcal{\hat{G}}^{\fm}$. For an arc $a = (n_1, n_2)$, $\recu(a)$ can be obtained recursively as follows: \begin{subequations} \label{eq:recu1a} \begin{empheq}[left={\recu(a)=\empheqlbrace\,}]{align} & 0 & n_1 = \mathbf{r} \label{eq:1a} \\ & \max_{a^{'} \in \delta^{\fm}_{-}(n_1)} \left \{ \recu(a^{'}) + w_{val(a^{'}), val(a)} - \lambda_{val(a^{'})} \right \} & \mbox{otrw.} \label{eq:2a} \end{empheq} \end{subequations} The recursive function \eqref{eq:recu1a} is valid by Bellman's principle of optimality since the reduced cost of a cycle or chain in the $i$-th MDD, $r_{c,p}^{i}$, is arc separable \citep{Glorie2014} and the portion taken by every arc only depends on the previous PDP\ or NDD\ in the sequence. Therefore, the maximum reduced cost of a column found in the $i$-th MDD, $\recu$, is given by \begin{subequations} \label{eq:recu1b} \begin{empheq}[left={\recu=\empheqlbrace\,}]{align} &\max \left(0, \max_{a \in \delta^{\fm}_{-}(\mathbf{t})} \recu(a) \right)& i \in \hat{I} \label{eq:1b} \\ & \max \left(0, \max_{a \in \delta^{\fm}_{-}(a): \ell(a) \ge 2} \recu(a) - \lambda_{val(a)} \right)& i \in \bar{I} \label{eq:2b} \end{empheq} \end{subequations} Recursion \eqref{eq:1b} computes the maximum reduced cost of a cycle at the terminal node $\mathbf{t}$, since all paths need to reach $\mathbf{t}$ to close it up. For chains, on the other hand, any portion of a path in $\mathcal{\hat{G}}^{\fm}$ is a feasible path, for which the longest path can be found at any layer of the MDD where the length of a chain (in terms or arcs in $\bar{\mathscr{A}}^{i}$) is at least one. The term subtracted in $\eqref{eq:2b}$ captures the Lagrange multiplier of the last pair in a chain. We know from \eqref{objLag2opt} that $\lambda_{v} \in [0,\infty)$ for all $v \in \mathscr{V}$, thus, if the Lagrange multiplier of a given vertex $v \in \mathscr{V}$ is big enough to lead to a negative-price path at node $\mathbf{t}$, it may be more convenient to cut that path short at some previous vertex in the sequence to obtain a positive-price chain. For instance, in Figure \ref{subfig:MDDchains} consider $\lambda_{2} = \lambda_{3} = 5$, all other Lagrange multipliers set at zero and unitary weights $w_{\uix \vix}$ for all $(\uix, \vix) \in \mathscr{V}$. Sequence $(4,1,5)$ representing a 2-length chain in $\bar{\mathscr{D}}^{i}$ is contained in $(4,1,5, 3)$. Clearly, the former yields the highest reduced cost of a chain in Figure \ref{subfig:MDDchains}. Thus, $\recu = \recu((n_4, n_5)) = 2$. \begin{prop} Given $\hat{I} + \bar{I}$ MDDs, $\mgraph = (\mnodes, \marcs)$, and $r_{c,p}^{i}$ expressed as an arc-separable function for all $a \in \mathcal{\hat{A}}^{\fm}$, a positive-price column, if one exists, can be found in time $\mathcal{O}(\vert \mathcal{\hat{A}}^{\fm} \vert \vert \delta^{\fm}_{-}(a) \vert(\hat{I} + \bar{I}))$. \end{prop} \proof{Proof.} For every arc $a = (n_1, n_2)$, $\vert \delta^{\fm}_{-}(a) \vert$ comparisons need to be performed to obtain $\recu(a)$. Therefore, a total of $\vert \mathcal{\hat{A}}^{\fm} \vert \vert \delta^{\fm}_{-}(a) \vert$ comparisons are required before computing $\recu$. Because there are $\hat{I} + \bar{I}$ MDDs, it follows that the time complexity is as shown above. \hfill$\square$ In the worst-case scenario the length of the input can be exponential, so can be the time complexity of finding a positive-price column. Thus, the selection of $\fvs$ is key to reduce the number of MDDs and their size. Despite this time complexity, computational experiments in Section \ref{sec:Results} show that computing $\recu$ can be performed efficiently for small values of $K$ and $L$. \subsection{Branching scheme} Whenever the solution of the RMP in a node of the branch and bound is fractional, a branching scheme is applied to find an integer and eventually optimal solution to the problem. Different branching schemes have been investigated in the literature: branching on cycles \citep{Abraham2007}, branching on arcs $ (\uix, \vix) \in \mathscr{A}$ \citep{Dickerson2019,Lam2020} and branching on arcs $ (\uix, \vix) \in \mathscr{A}^{l}$ \citep{Klimentova2014}, where $\mathscr{A}^{l}$ is the set of arcs in the $l$-th graph copy of $\mathscr{D}$ as defined in \citep{Constantino2013}. The branching tree may have exponential depth when branching on cycles, for which branching on arcs is a more generalized practice. In the worst-case scenario $\mathscr{D}$ is a complete graph and therefore there can be up to $\vert \mathscr{P} \vert \vert \mathscr{P} - 1 \vert + \vert \mathscr{P} \vert \vert \mathscr{N} \vert$ arcs in $\mathscr{D}$, noticing that NDDs\ have no incoming arcs. On the other hand, branching on arcs in $ \hat{\mathscr{A}}^{i} \cup \bar{\mathscr{A}}^{i}$ implies that there are up to $\vert \bar{I} + \hat{I} \vert \vert \mathscr{P} \vert \vert \mathscr{P} - 1 \vert + \vert \bar{I} \vert \vert \mathscr{P} \vert \vert \mathscr{N} \vert$ arcs across all graph copies, assuming an arc is present in as many copies as the problem allows. Among the two arc-based schemes, branching on arcs in $\mathscr{A}$ results in a lower depth branching tree. We therefore choose this option as our branching scheme. Particularly, on every fractional node of the branching tree we branch on an arc $(\uix, \vix) \in \mathscr{A}$ whose left-hand side of \eqref{eq:fixingarc} is closest to $0.5$. That is, we generate two children, one in which the arc is prohibited and another in which the arc is selected. When banning an arc from the RMP, we modify \eqref{eq:2a} by replacing $w_{\uix \vix}$ with a big positive number $M$. By doing so, the length of any path in any copy traversing that arc approaches negative infinity, thereby ruling it out from $\recu$. On the other hand, enforcing an arc $(\uix, \vix) \in \mathscr{A}$ requires the inclusion of Constraint \eqref{eq:fixingarc} in the RMP. \begin{align} \sum_{i \in \hat{I}} \sum_{c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}: (\uix, \vix) \subset c} z_{\cycle}^{\fm} + \sum_{i \in \bar{I}} \sum_{p \in \mathscr{\bar{C}}^{\fm}_{\Lchain}: (\uix, \vix) \subset p} z_{\chain}^{\fm} = 1 \label{eq:fixingarc} \end{align} The addition of Constraint \eqref{eq:fixingarc} changes the reduced cost of a chain and cycle. If we let $\Arcs^{*} \subseteq \mathscr{A} $ be the set of selected arcs in a node of the branching tree, the reduced cost of column in the $i$-th MDD, is now given by \begin{subequations} \label{eq:NewRedCost} \begin{empheq}[left={r_{c,p}^{i}=\empheqlbrace\,}]{align} &w_{\cycle} - \sum_{v \in \mathscr{V}(c)}\lambda_{v} - \sum_{(\uix, \vix) \in \Arcs^{*}: (\uix, \vix) \subset c} \mu_{(\uix, \vix)} & c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle} \label{eq:1rc} \\ & w_{\chain} - \sum_{v \in \mathscr{V}(p)} \lambda_{v} - \sum_{(\uix, \vix) \in \Arcs^{*}: (\uix, \vix) \subset p} \mu_{(\uix, \vix)} & p \in \mathscr{\bar{C}}^{\fm}_{\Lchain} \label{eq:2rc} \end{empheq} \end{subequations} Thus, if $(val(a^{'}), val(a)) \in \Arcs^{*}$, then $\mu_{(val(a^{'}), val(a))}$ can be subtracted from recursion \eqref{eq:2a} to account for Constraint \eqref{eq:fixingarc} in the RMP. \section{Introduction} \label{sec:Intro} The preferred treatment for kidney failure is transplantation. Demand for deceased-donor kidneys usually outnumbers supply. An alternative, often desirable, is living-donor transplantation. A living donor is typically a close relative, partner or friend who is willing to donate one of their kidneys to grant a life-saving chance to a beloved one. However, biological incompatibilities, such as blood type or antibodies related discrepancies, between the patient in need of a kidney and the potential donor may exist. It is in these cases where Kidney Paired Donation Programs (KPDPs), present nowadays in multiple countries across the globe, have played a life-saving role in kidney transplantation systems. A KPDP is a centralized registry operated at a local or national level, where each patient registers voluntarily along with his or her incompatible (suboptimal) donor (\textit{paired donor}) as a pair. Patients in these patient-donor pairs\ (PDPs) are willing to exchange their paired donors under the promise that they will receive a suitable kidney from a different donor. To accomplish this goal, two types of exchanges are allowed: cyclic and chain-like exchanges. Figure \ref{fig:PoolCycles} illustrates a pool of six patient-donor pairs\ $(p_{1}, d_{1})$, $(p_{2}, d_{2})$,...,$(p_{6}, d_{6})$ arranged in two cycles. In a cyclic exchange, a donor $d_{s}$ donates to patient $p_{t}$, donor $d_{t}$ donates to patient $p_{u}$, so on and so forth, until the donor in the last pair gives a kidney to the first pair's patient $p_s$, thereby forming a \textit{cycle}. Due to pair drop-outs, aggravated health condition of a pair member, or last-minute detected incompatibilities, the patient in the first pair may never receive a kidney back if the donor in any subsequent pair in the cycle fails to donate it. To avoid such a risk, cycles of kidney transplants are performed simultaneously in practice, imposing limitations on the maximum size of a cycle, $K \in \mathbb{Z_{+}}$. In the literature, a $k$-way cycle refers to cycles where $k$ transplants are involved. If we set $K = 4$ in Figure \ref{fig:PoolCycles}, then all $k$-way cycles with $2\le k \le 4$ are allowed. Although in some countries such as the United States, it is common to find $K = 3$ \citep{NKR9years}, other countries have reported longer allowed cycles \citep{CaKEPFoundations, AustraliaKPD}, going up to $K = 6$ as it is the case in Canada \footnote{\label{canada} \url{https://profedu.blood.ca/sites/msi/files/kpd-eng_2018.pdf}}. Depending on the country's population and matching frequency, KPDP sizes may vary. For instance, in \citep{Glorie2014}, there were shown instances of the Dutch KPDP with roughly 500 PDPs. However, in the United States alone, a nationwide KPDP would include 10,000 PDPs\ \citep{Abraham2007}. \begin{figure}[ht] \caption{Example exchanges. PDPs\ are represented by circle nodes, while the NDD\ is the squared one.} \centering \vskip 0.2cm \begin{adjustbox}{minipage=\linewidth,scale=0.7} \begin{subfigure}{.5\textwidth} \centering \caption{2-way cycle (left) and 4-way cycle (right)} \makeatletter \tikzset{circle split part fill/.style args={#1,#2}{% alias=tmp@name, postaction={% insert path={ \pgfextra \pgfpointdiff{\pgfpointanchor{\pgf@node@name}{center}}% {\pgfpointanchor{\pgf@node@name}{east} \pgfmathsetmacro\insiderad{\pgf@x} \fill[#1] (\pgf@[email protected]) ([xshift=-\pgflinewidth]\pgf@[email protected]) arc (0:180:\insiderad-\pgflinewidth)--cycle; \fill[#2] (\pgf@[email protected]) ([xshift=\pgflinewidth]\pgf@[email protected]) arc (180:360:\insiderad-\pgflinewidth)--cycle; }}}}} \makeatother \begin{tikzpicture} \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={blue!20, green!30}] (1) at (-4, -3) {$p_1$\nodepart{lower} $d_1$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={green!30, blue!20}] (2) at (-2, -3) {$d_2$\nodepart{lower} $p_2$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={blue!20, green!30}] (3) at (0, -4) {$p_3$\nodepart{lower} $d_3$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={blue!20, green!30}] (4) at (0, -2) {$p_4$\nodepart{lower} $d_4$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={green!30,blue!20}] (5) at (2, -4) {$d_5$\nodepart{lower} $p_5$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={green!30, blue!20}] (6) at (2, -2) {$d_6$\nodepart{lower} $p_6$}; \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (1) to [bend right=45] (2); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (2) to [bend right=45] (1); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (4) to [bend right=45] (3); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (3) to [bend right=45] (5); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (5) to [bend right=45] (6); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (6) to [bend right=45] (4); \end{tikzpicture} \label{fig:PoolCycles} \end{subfigure} \begin{subfigure}{.5\textwidth} \centering \caption{4-length chain} \makeatletter \tikzset{circle split part fill/.style args={#1,#2}{% alias=tmp@name, postaction={% insert path={ \pgfextra \pgfpointdiff{\pgfpointanchor{\pgf@node@name}{center}}% {\pgfpointanchor{\pgf@node@name}{east} \pgfmathsetmacro\insiderad{\pgf@x} \fill[#1] (\pgf@[email protected]) ([xshift=-\pgflinewidth]\pgf@[email protected]) arc (0:180:\insiderad-\pgflinewidth)--cycle; \fill[#2] (\pgf@[email protected]) ([xshift=\pgflinewidth]\pgf@[email protected]) arc (180:360:\insiderad-\pgflinewidth)--cycle; }}}}} \makeatother \begin{tikzpicture} \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={blue!20, green!30}] (3) at (0, -4) {$p_7$\nodepart{lower} $d_7$}; \node[minimum size=5mm, shape=rectangle, draw=ashgrey,line width=0.5mm, fill=yellow!20, inner sep=12pt] (4) at (0, -2) {$A$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={green!30,blue!20}] (5) at (2, -4) {$d_8$\nodepart{lower} $p_8$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={green!30, blue!20}] (6) at (2, -2) {$d_9$\nodepart{lower} $p_9$}; \node[minimum size=5mm, shape=circle split, draw=ashgrey,line width=0.5mm, circle split part fill={blue!20,green!30}] (7) at (4, -2) {$d_{10}$\nodepart{lower} $p_{10}$}; \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (4) to [bend right=45] (3); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (3) to [bend right=45] (5); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (5) to [bend right=45] (6); \draw [-{Classical TikZ Rightarrow [scale=1.4, gray]}, gray, line width = 0.4mm] (6) to [bend left=45] (7); \end{tikzpicture} \label{fig:PoolChains} \end{subfigure} \label{fig:Exchanges} \end{adjustbox} \end{figure} In the presence of singleton donors, chains become an exchange alternative. Some singleton donors are \textit{altruistic donors} because they are willing to donate selflessly one of their kidneys to a renal-disease sufferer. A \textit{chain} is a path that starts with a singleton donor donating to a patient in a PDP, point from which all remaining patients receive a kidney from a paired donor. The donor in the last pair of a chain can either donate to a patient on the transplant waitlist or become a \textit{bridge donor}. After the intended recipient of a paired donor in the last pair of a chain receives a kidney, the donor becomes a singleton, so it can serve as a ``bridge" in future match runs to start another segment of the starting chain. In general, chain-initiating donors such as altruistic, bridge, or even deceased \citep{DDonors2017}, are usually referred to as \textit{non-directed donors} (NDDs). Unlike cycles, no paired donor in a chain donates a kidney while risking not to receive one for their intended recipient, making possible to relax the simultaneity constraint for chains. Very often, KPDPs also set an upper bound on the number of transplants in a chain, $L \in \mathbb{Z_{+}}$, and it is usually the case that $L \ge K$ \citep{CaKEPFoundations, NKR9years}. Figure \ref{fig:PoolChains} illustrates a 4-length chain involving one NDD and four PDPs. As some transplants may be more suitable (urgent), a score may be given to every potential transplant. A common objective in kidney exchange, although not the only one, is to match donors (paired and singleton) to patients such that the sum of the transplants' score is maximized. Since every donor can donate at most one of their two kidneys and therefore a patient can receive also at most one, the matching must satisfy that every PDP\ belongs to at most one cycle or chain and every NDD\ to at most one chain. Finding a matching of maximum score in this context is known as the \textit{kidney exchange problem} (KEP), which we formally define in Section \ref{sec:PblmD}. Motivated by practical settings for which $K \ge 4$ along with long chains \citep{CaKEPFoundations}, previous studies showing the value of the latter \citep{Ashlagi2012, Dickerson2012, Ding2018}, the increasing number of participants in KPDPs, and space for improvement in state-of-the-art approaches, we propose a new solution technique for the KEP. Particularly, this paper makes the following contributions: \begin{enumerate}[leftmargin=0.5cm] \item We improve an existing integer programming formulation, upon which we propose a Lagrangian Decomposition. Moreover, we show its relationship with branch-and-price (B\&P) algorithms in this context, and its usefulness to provide a tighter upper bound on the optimal value of the KEP compared to an existing upper bound. \item We devise a B\&P \ algorithm that can deal with long cycles and long chains. \item We propose solving the pricing problems via multi-valued decision diagrams (MDDs). To the best of our knowledge, this is the first study using MDDs in cycle and path packing problems in a digraph, and one of the two works in the B\&P\ literature. \item We present an effective three-phase solution method for the pricing problems, shifting between MDDs and linear (worst-case integer) programs. \item We perform extensive computational experiments showing the remarkable performance of our approach over state-of-the-art methods in benchmark realistic instances. \end{enumerate} The rest of the paper is organized as follows. In Section \ref{sec:LitReview}, we review the relevant literature. In Section \ref{sec:PblmD}, we present a formal definition of the KEP. In Section \ref{sec:Lagrange}, we introduce a Lagrangian decomposition. In Section \ref{sec:BP}, we detail our B\&P \ algorithm, including the reformulation of pricing problems via MDDs. In Section \ref{sec:SolAp}, we present our general solution approach. In Section \ref{sec:Results}, we show experimental results comparing our algorithm with the state of the art. Lastly, we draw some conclusions and point to future work in Section \ref{sec:Conclusions}. \section{A New Valid Upper Bound through a Lagrangian Decomposition} \label{sec:Lagrange} In this section we introduce a Lagrangian decomposition of the KEP based on an improved and generalized version of an existing integer programming (IP) formulation. Particularly, we show that the Lagrangian decomposition can be used in B\&P \ to provide a valid upper bound on the optimal value of the KEP, stronger than the one proposed in the literature. Moreover, we indicate the use of this new bound in our B\&P \ implementation. Lastly, we show an advantage of using the \textit{disaggregated cycle formulation} \citep{Klimentova2014} over the so-called \textit{cycle formulation} \citep{Abraham2007, Roth2007} in B\&P. \subsection{IEEF: An Improved Extended Edge Formulation (EEF)} For the cycle-only version of the KEP, \cite{Constantino2013} proposed to clon the input digraph $\mathscr{D}$, $\vert \mathscr{P} \vert$ times, drawn by the fact that $\vert \mathscr{P} \vert$ is a natural upper bound on the number of cycles in any feasible solution. The idea is then to find a feasible cycle in each copy of $\mathscr{D}$. If the selected cycles across the copies are vertex-disjoint, they represent a feasible matching. Thus, an IP formulation can be built based on this disaggregation of $\mathscr{D}$ to solve the problem. We propose to extend this formulation by including long chains and reducing the number of copies. To this end, we start by showing that any \textit{feedback vertex set} (FVS) provides a valid upper bound on the number of vertex-disjoint cycles in $\mathscr{D}$, whose proof is omitted due to its simplicity. \begin{definition} Given a directed graph $\digraph= (\Vertex, \Arcs)$, $\mathscr{V}^{*} \subseteq \mathscr{V}$\ is a feedback vertex set of $\mathscr{D}$ if the subgraph induced by $\mathscr{V} \setminus \fvs$ is acyclic. \end{definition} \begin{proposition} \label{prop:feedback} Given a digraph $\digraph= (\Vertex, \Arcs)$, let $n$ be the cardinality of the set with the maximum number of vertex-disjoint cycles in $\mathscr{D}$, and $\mathscr{V}^{*}$ be an FVS. Then, $n \le \vert \mathscr{V}^{*} \vert$. \end{proposition} We propose to create one graph copy per vertex in the FVS. Thus, the smaller the size of the FVS the smaller the number of copies of $\mathscr{D}$. We will refer to these copies as \textit{cycle copies} and vertices in the FVS as \textit{feedback vertices}. It is known, however, that finding a minimum FVS is one of the classical NP-hard problems in Karp's seminal paper \citep{Karp1972}. However, we can use a vertex-ordering rule to find an FVS with ``small'' cardinality and create the proposed graph copies. A similar rule was used in \citep{Dickerson2016} to reduce the number of variables in their formulations. This vertex-ordering rule is the main difference between the construction of the copies in \cite{Constantino2013} and our approach. Assuming $\fvs = \emptyset$ at the start, a simple algorithm finding both an FVS and graph copies works as follows: (1) sort vertices in $\mathscr{D}$ according to a rule, e.g., maximum in-degree, maximum out-degree, total degree, (2) select the first vertex $v \in \mathscr{V} \setminus \fvs$ in that order (3) take the subgraph induced by $\mathscr{V} \setminus \fvs$ as the $i$-th graph copy and associate it with the $i$-th feedback vertex added to $\fvs$ and let $\fvs = \fvs \cup\{v\}$, (4) if $\vert \fvs \vert \ge 2$ go to (2), otherwise terminate. The ideal goal is to keep in a copy only cycles including the feedback vertex associated to that copy and remove multiplicity of identical cycles that can lead to symmetric solutions in an IP formulation. To further reduce (not necessarily eliminate) multiple identical cycles, following \cite{Constantino2013}, we can find a pairwise shortest path between the feedback vertex of a copy and every other PDP\ in that copy. If the pairwise-shortest-path length is larger than $K$, the corresponding PDP\ can be removed from that copy. This process may lead to empty copies. Therefore, multiple vertex orders lead to a different FVS. The motivation behind the vertex-ordering rule is to favor a small cardinality FVS by removing first the vertices covering most cycles in the input graph $\mathscr{D}$, and thus, leading to fewer feedback vertices at the end. Formally, let us define the graph copies in terms of an FVS. Let $\mathscr{V}^{*} = \{v^{*}_{1},..., v^{*}_{ \vert \mathscr{V}^{*} \vert}\} \subseteq \mathscr{P} $ be an FVS of $\mathscr{D}$ and $\hat{I}$ be the index set of the cycle copies created as explained above, such that $\vert \hat{I} \vert \le \vert \mathscr{V}^{*} \vert$. The $i$-th cycle copy of $\mathscr{D}$, $i \in \hat{I}$, is represented by the graph $\digraphd^{\fm}= (\Vertexd^{\fm}, \Arcsd^{\fm})$. By construction, all cycles in copy $\hat{\mathscr{D}}^{i}$ include vertex $v^{*}_{i} \in \mathscr{V}^{*}$ in position $i \in \hat{I}$. Figure \ref{fig:Copies} shows the proposed graph copies in which only arcs involved in cycles with length at most $K = 4$ are shown. Notice that this scenario is ideal, since the pairwise-shortest-path-based reduction may fail to remove all the arcs that are unnecessary in a copy. Thus, by selecting vertices in Figure \ref{fig:PCompleteGraph} in the order of the maximum out-degree, 2 cycle copies and 14 arcs are obtained as a result. If copies of the digraph depicted by Figure \ref{fig:PCompleteGraph} were created by following the vertex ordering proposed by \citep{Constantino2013}, i.e., in non-decreasing order of vertices index, and taking into account only arcs leading to feasible cycles as before, 4 copies and 19 arcs would be obtained instead. \begin{figure}[ht] \caption{Proposed graph copies. Blue vertices correspond to feedback vertices.} \vskip 0.2cm \begin{subfigure}{.30\textwidth} \centering \caption{$\digraph= (\Vertex, \Arcs)$} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[place] (waiting) at (0,2) {$4$}; \node[transition] (critical) at (0,1) {$3$}; \node[place] (semaphore) at (0,0) {$5$}; \node[transition] (leave critical) at (1,1) {$1$}; \node[transition] (enter critical) at (-1,1) {$2$}; \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend left=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:PCompleteGraph} \end{subfigure} \begin{subfigure}{.30\textwidth} \centering \caption{$\hat{\mathscr{D}}^{1} = (\hat{\mathscr{V}}^{1}, \hat{\mathscr{A}}^{1})$} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[place] (waiting) at (0,2) {$4$}; \node[transition] (critical) at (0,1) {$3$}; \node[transition] (semaphore) at (0,0) {$5$}; \node[transition] (leave critical) at (1,1) {$1$}; \node[transition] (enter critical) at (-1,1) {$2$}; \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend left=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (enter critical); \draw [-{Classical TikZ Rightarrow}] (enter critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (waiting) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (waiting); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:FirstCopy} \end{subfigure} \begin{subfigure}{.30\textwidth} \centering \caption{$\hat{\mathscr{D}}^{2} = (\hat{\mathscr{V}}^{2}, \hat{\mathscr{A}}^{2})$} \centering \tikzstyle{place}=[circle,draw=blue!50,fill=blue!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{transition}=[circle,draw=black!50,fill=black!20,thick, inner sep=0pt,minimum size=4mm] \tikzstyle{whites}=[circle,draw=white,fill=white,thick, inner sep=0pt,minimum size=4mm] \begin{tikzpicture} \node[whites] (waiting) at (0,2) {}; \node[transition] (critical) at (0,1) {$3$}; \node[place] (semaphore) at (0,0) {$5$}; \node[transition] (leave critical) at (1,1) {$1$}; \node[whites] (enter critical) at (-1,1) {}; \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (leave critical); \draw [-{Classical TikZ Rightarrow}] (semaphore) to [bend right=45] (critical); \draw [-{Classical TikZ Rightarrow}] (critical) to [bend right=45] (semaphore); \draw [-{Classical TikZ Rightarrow}] (leave critical) to (semaphore); \end{tikzpicture} \label{fig:SecondCopy} \end{subfigure} \label{fig:Copies} \end{figure} Our extension of the extended edge formulation includes long chains. Thus, we also create copies of the input graph for chains, one for every NDD. Let $\bar{I}$ denote the index set of chain copies, with $\vert \bar{I} \vert \le \vert \mathscr{N} \vert$. The $i$-th chain copy of $\mathscr{D}$, $i \in \bar{I}$, is represented by graph $\digraphbar^{\fm}= (\Vertexbar^{\fm}, \Arcsbar^{\fm})$, whose vertex set $\bar{\mathscr{V}}^{i} = \mathscr{P} \cup \{u_{i}, \tau \}$ is formed by all PDPs, the $i$-th NDD, $u_{i}$, and a dummy vertex $\tau$, representing a dummy patient receiving a fictitious donation from the paired donor in the last pair of a chain. The set of arcs $\bar{\mathscr{A}}^{i} = ( \mathscr{A} \setminus \{(u, v)\mid u \in \mathscr{N} \setminus u_{i}, v \in \mathscr{P}\}) \cup \{(v, \tau) \mid v \in \mathscr{P}\}$ removes arcs emanating from NDDs\ to $u_{i}$ and adds one dummy arc from every PDP\ to $\tau$. Thus, a chain can only be started by the NDD\ $u_{i}$ on the $i$-th chain copy. Additionally, let $\mathscr{\bar{C}}^{\fm}$ and $\mathscr{\bar{C}}^{\fm}_{K} \subseteq \mathscr{\bar{C}}^{\fm}$ be the set of all simple cycles and the set of all feasible cycles in $\bar{\mathscr{D}}^{i}$, respectively. Lastly, let $\hat{x}_{\uix \vix}^{i}$ and $\bar{x}_{\uix \vix}^{i}$ be a decision variable taking the value one if arc $(\uix, \vix) \in \hat{\mathscr{A}}^{i}$ and arc $(\uix, \vix) \in \bar{\mathscr{A}}^{i}$ is selected in a cycle and chain, respectively, and zero otherwise. Then, the improved extended edge formulation can be formulated as follows: \begin{subequations} \label{sub:IEEF} \begin{align} \max \quad & \sum_{i \in \hat{I}}\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} w_{\uix \vix}\hat{x}^{i}_{\uix \vix} + \sum_{i \in \bar{I}}\sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} w_{\uix \vix}\bar{x}^{i}_{\uix \vix} \label{eq:obj} \tag{IEEF}\\ \text{s.t.}\quad &\sum_{i \in \hat{I}}\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} + \sum_{i \in \bar{I}}\sum_{(\uix, \vix)\in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\uix \vix} \le 1 && u \in \mathscr{V} \label{eq:maxflowIEEF}\\ &\sum_{v: (\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} = \sum_{v: (\vix, \uix)\in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\vix \uix} && i \in \hat{I}, u \in \hat{\mathscr{V}}^{i} \label{eq:OutFlowCycleIEEF}\\ &\sum_{v: (\uix, \vix) \in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\uix \vix} = \sum_{v: (\vix, \uix) \in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\vix \uix} && i \in \bar{I} , u \in \bar{\mathscr{V}}^{i} \setminus \{u_{i}, \tau\} \label{eq:OutFlowChainIEEF}\\ &\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} \le K && i \in \hat{I} \label{eq:CycleSizeIEEF}\\ &\sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\uix \vix} \le L && i \in \bar{I} \label{eq:ChainSizeIEEF}\\ &\sum_{v:(\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} \le \sum_{v:(v_{i}^{*}, v) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{v^{*}_{i} v} && i \in \hat{I}, u \neq v^{*}_i \label{eq:symIEEF}\\ &\sum_{(\uix, \vix) \in \mathscr{A}(c)} \bar{x}^{i}_{\uix \vix} \le \vert \mathscr{V}(c) \vert - 1 && i \in \bar{I} , c \in \mathscr{\bar{C}}^{\fm} \setminus \mathscr{\bar{C}}^{\fm}_{K} \label{eq:cycleElimIEEF}\\ & \hat{x}^{i}_{\uix \vix} \in \{0,1 \} && i \in \hat{I}, (\uix, \vix) \in \hat{\mathscr{A}}^{i} \label{eq:integercIEEF}\\ & \bar{x}^{i}_{\uix \vix} \in \{0,1 \} && i \in \bar{I}, (\uix, \vix) \in \bar{\mathscr{A}}^{i} \label{eq:integerpIEEF} \end{align} \end{subequations} The objective function maximizes the weighted sum of transplant scores. Constraints \eqref{eq:maxflowIEEF} enforce that a donor (paired or single) donates at most one kidney. Constraints \eqref{eq:OutFlowCycleIEEF} guarantee that in a selected cycle copy, if a PDP\ receives a kidney, then it donates one to another pair in the same copy. Constraints \eqref{eq:OutFlowChainIEEF} ensure that in a chain if a patient in a PDP\ receives a kidney, his or her paired donor donates either to a PDP\ in the same chain or to a dummy vertex, as it is the case of the donor in the last PDP\ of the chain. Constraints \eqref{eq:CycleSizeIEEF} enforce the use of at most $K$ arcs in a cycle copy, while constraints \eqref{eq:ChainSizeIEEF} limit the number of arcs in a chain copy to be $L$ or fewer. Constraints \eqref{eq:symIEEF} forbid the use of the $i$-th cycle copy unless the $i$-th vertex in the FVS, $v^{*}_{i}$, is selected in that copy. The presence of cycles (subtours) in chain copies, particularly of those with size higher than $K$, jeopardize the correctness of the formulation. Therefore, constraints \eqref{eq:cycleElimIEEF} assure the elimination of all infeasible cycles from chain copies. These constraints resemble those used in the recursive formulation of \cite{Anderson2015}. We note that \cite{Constantino2013} and \cite{Klimentova2014} did not consider infeasible-cycle-breaking constraints in their discussion on how to include NDDs\ in the EEF, thus can be deemed incomplete. In the Appendix \ref{sec:Appendix} we show that infeasible-cycle-breaking constraints, e.g., \eqref{eq:cycleElimIEEF}, are \textit{necessary} to preserve the correctness of the EEF, and thus that of IEEF. Lastly, constraints \eqref{eq:integercIEEF} and \eqref{eq:integerpIEEF} indicate decision variables' domain. \subsection{Lagrangian relaxation} Consider introducing the following valid (redundant) constraints to \eqref{eq:obj}: \begin{subequations} \label{subeqs:maxflowIEEF2} \begin{align} &\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} \le 1 && i \in \hat{I}, u \in \hat{\mathscr{V}}^{i} \label{eq:maxflowIEEF2cycles}\\ & \sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\uix \vix} \le 1 && i \in \bar{I}, u \in \bar{\mathscr{V}}^{i} \label{eq:maxflowIEEF2chains} \end{align} \end{subequations} \noindent Before justifying these new constraints, let us first approximate the optimal objective value of \eqref{eq:obj} by relaxing constraints guaranteeing at most one donation from every donor \eqref{eq:maxflowIEEF} and then penalizing their violation by imposing Lagrange multipliers $ (\boldsymbol{\lambda})$ in the objective function. This relaxation may allow a vertex to be selected in more than one graph copy, thus, in more than one exchange. However, if a copy is selected such a vertex can be selected at most once within that copy, the purpose of adding constraints \eqref{subeqs:maxflowIEEF2}. Given $\lambda \in \mathbb{R}_{+}^{\vert \mathscr{V} \vert}$, a Lagrangian relaxation to the KEP can be formulated as follows: \begin{subequations} \label{sub:LR1} \begin{align} \mathcal{Z} (\lambda) := \hspace*{-0.3cm} &&& \label{objLag}\tag{LR1}\\ \max \ & \sum_{i \in \hat{I}}\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} w_{\uix \vix}\hat{x}^{i}_{\uix \vix} + \sum_{i \in \bar{I}}\sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} w_{\uix \vix}\bar{x}^{i}_{\uix \vix} + \sum_{v \in \mathscr{V}} \lambda_{v} \left(1 - \sum_{i \in \hat{I}}\sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} \hat{x}^{i}_{\uix \vix} + \sum_{i \in \bar{I}}\sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} \bar{x}^{i}_{\uix \vix} \right) \notag\\ \text{s.t.} \ &\eqref{eq:OutFlowCycleIEEF} - \eqref{eq:integerpIEEF}, \eqref{eq:maxflowIEEF2cycles} - \eqref{eq:maxflowIEEF2chains}&& \label{eq:AllOthersLag} \end{align} \end{subequations} \noindent As the only constraints linking decision variables associated with different copies are now relaxed, \eqref{objLag} can be decomposed by graph copies as follows: \begin{subequations} \label{sub:LR2} \begin{align} \centering &\mathcal{Z} (\lambda) = \sum_{i \in \bar{I}} \mathcal{\hat{Z}}^{i}({\lambda}) + \sum_{i \in \hat{I}} \mathcal{\bar{Z}}^{i} ({\lambda})+ \sum_{u \in \mathscr{V}} \lambda_{u} \label{objLag2}\tag{LR2} \end{align} \end{subequations} where, $ \forall i \in \hat{I} $ and $\forall i \in \bar{I}$, we repectively have the following subproblems: \begin{align} &\mathcal{\hat{Z}}^{i}({\lambda}) := \max \left \{ \sum_{(\uix, \vix) \in \hat{\mathscr{A}}^{i}} (w_{\uix \vix} - \lambda_{u}) \hat{x}^{i}_{\uix \vix} \mid \eqref{eq:OutFlowCycleIEEF} , \eqref{eq:CycleSizeIEEF} \eqref{eq:symIEEF},\eqref{eq:integercIEEF}, \eqref{eq:maxflowIEEF2cycles} \right \}, \label{sprbm:cycles}\tag{CC} \end{align} \begin{align} &\mathcal{\bar{Z}}^{i}({\lambda}) := \max \left \{ \sum_{(\uix, \vix) \in \bar{\mathscr{A}}^{i}} (w_{\uix \vix} - \lambda_{u}) \bar{x}^{i}_{\uix \vix} \mid \eqref{eq:OutFlowChainIEEF}, \eqref{eq:ChainSizeIEEF}, \eqref{eq:cycleElimIEEF}, \eqref{eq:integerpIEEF}, \eqref{eq:maxflowIEEF2chains} \right \}. \label{sprbm:chains}\tag{CH} \end{align} Given a set of Lagrange multipliers, $\lambda$, each subproblem aims at finding either a feasible cycle, \eqref{sprbm:cycles}, or a feasible chain, \eqref{sprbm:chains}, whose vertex assignment has minimum penalty, thus, maximum weight. Observe that the inclusion of the dummy vertex $\tau$ is useful to capture the Lagrange multiplier of the last pair in a chain in the objective function of \eqref{sprbm:chains}. The Lagrangian dual problem can be defined as $\sigma^{LD} := \min \{ \mathcal{Z} (\lambda) : \lambda \in \mathbb{R}^{\vert \mathscr{V}\vert}_{+} \}$. That is, $\sigma^{LD}$ is the smallest upper bound that can be obtained when the set of Lagrange multipliers favor an assignment of vertices to cycle and chain copies with minimum intersection. If we define $\mathcal{Z}^{LP}$ as the optimal objective value of the linear programming relaxation of \eqref{eq:obj}, it is possible that $\sigma^{LD} < \mathcal{Z}^{LP}$. Moreover, we show that the quality of the bound provided by $\sigma^{LD}$ is as tight as the one provided by the linear programming relaxation of the disaggregated cycle formulation \citep{Klimentova2014}, one of the formulations in the literature providing the tightest linear relaxation. \begin{proposition} If $\mathcal{Z}_{c}^{LP}$ is the optimal objective value of the integer programming relaxation of the disaggregated cycle formulation, then $\sigma^{LD} = \mathcal{Z}_{c}^{LP}$. \end{proposition} \proof{Proof.} Let $\mathscr{\hat{C}}^{\fm}_{\Kcycle}$ and $\mathscr{\bar{C}}^{\fm}_{\Lchain}$ be the set of feasible cycles (including vertex $v^{*}_{i}$) and chains on the $i$-th graph copy, $i \in \hat{I}, i \in \bar{I}$, respectively. For a cycle $c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}$ and chain $p \in \mathscr{\bar{C}}^{\fm}_{\Lchain}$, let $w_{\cycle} = \sum_{(\uix, \vix) \in \mathscr{A}(c)} w_{\uix \vix}$ and $w_{\chain} = \sum_{(\uix, \vix) \in \mathscr{A}(p)} w_{\uix \vix}$ be the total weight of a cycle and chain, respectively. Then, \eqref{objLag2} can be reformulated as follows: \begin{subequations} \label{sub:LR2opt} \begin{align} \min \quad & \sum_{i \in \bar{I}} \mathcal{\hat{Z}}^{i} + \sum_{i \in \hat{I}} \mathcal{\bar{Z}}^{i} + \sum_{v \in \mathscr{V}} \lambda_{i} \label{objLag2opt}\tag{LR3}\\ &\mathcal{\hat{Z}}^{i} \ge w_{\cycle} - \sum_{v \in \mathscr{V}(c)}\lambda_{v} &&i \in \hat{I}, c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle} & (z_{\cycle}^{\fm})& \label{eq:Lagoptcy}\\ &\mathcal{\bar{Z}}^{i} \ge w_{\chain} - \sum_{v \in \mathscr{V}(p)} \lambda_{v} &&i \in \bar{I}, p \in \mathscr{\bar{C}}^{\fm}_{\Lchain} & (z_{\chain}^{\fm})& \label{eq:Lagoptch}\\ & \mathcal{\hat{Z}}^{i} \ge 0 &&i \in \hat{I} \label{eq:bound1}\\ & \mathcal{\bar{Z}}^{i} \ge 0 &&i \in \bar{I} \label{eq:bound1.1}\\ & \lambda_{v} \ge 0 && v \in \mathscr{V} \label{eq:bound2} \end{align} \end{subequations} \eqref{objLag2opt} finds the optimal value of decision variables $\mathcal{\hat{Z}}^{i}$, $\mathcal{\bar{Z}}^{i} $ and $\lambda$. The validity of (\ref{objLag2opt}) relies on the fact that the maximum weight cycle and chain is selected for every graph copy through constraints \eqref{eq:Lagoptcy} and \eqref{eq:Lagoptch}, met in the equality by the minimization objective. Moreover, since the objective value of \eqref{sprbm:cycles} and \eqref{sprbm:chains} can at least be zero (by selecting $\hat{x} = 0$ and $\bar{x} = 0$), constraints \eqref{eq:bound1} and \eqref{eq:bound1.1} represent a valid lower bound on the objective value of each sub-problem. To finalize the proof, let $z_{\cycle}^{\fm}$ and $z_{\chain}^{\fm}$ be the dual variables of constraints \eqref{eq:Lagoptcy} and \eqref{eq:Lagoptch}, respectively, the dual problem of \eqref{objLag2opt} is shown below: \begin{subequations} \label{sub:discyclef} \begin{align} \max \quad& \sum_{i \in \hat{I}} \sum_{c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}} w_{\cycle} z_{\cycle}^{\fm} + \sum_{i \in \bar{I}} \sum_{p \in \mathscr{\bar{C}}^{\fm}_{\Lchain}} w_{\chain} z_{\chain}^{\fm} \label{objDis}\tag{IDCF}\\ &\sum_{i \in \hat{I}} \sum_{c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}: v \in \mathscr{V}(c)} z_{\cycle}^{\fm} + \sum_{i \in \bar{I}} \sum_{p \in \mathscr{\bar{C}}^{\fm}_{\Lchain}: v \in \mathscr{V}(p)} z_{\chain}^{\fm} \le 1 && v \in \mathscr{V} \label{eq:onepervertex}\\ & z_{\cycle}^{\fm} \ge 0 && i \in \hat{I}, c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}\\ & z_{\chain}^{\fm} \ge 0 && i \in \bar{I}, p \in \mathscr{\bar{C}}^{\fm}_{\Lchain} \end{align} \end{subequations} Note that constraints $ \sum_{c \in \mathscr{\hat{C}}^{\fm}_{\Kcycle}} z_{\cycle}^{\fm} \le 1 \text{\ \ } \forall i \in \hat{I} $ are omitted from \eqref{objDis} along with their chain counterpart since they are implied by constraints \eqref{eq:onepervertex}. Formulation \eqref{objDis} corresponds to the integer linear programming relaxation of the \textit{disaggregated cycle formulation} presented by \cite{Klimentova2014}, inspired by \citep{Constantino2013}. Hence, by strong duality it follows that $\sigma^{LD} = \mathcal{Z}_{c}^{LP}$. \hfill$\square$ Notice that by enforcing integrality of the decision variables, \eqref{objDis} is very similar to the so-called cycle formulation \citep{Abraham2007, Roth2007} which is defined as \eqref{objDis}, except that cycles and chains do not correspond to specific graph copies, thus, the $i$ index is dropped. It had not been shown before whether there is an advantage of using one formulation over another, particularly in B\&P. Notice that the dual variables of constraints \eqref{eq:onepervertex} correspond to the Lagrange multipliers $\lambda$ in \eqref{objLag2}. Therefore, this result shows that when cycles and chains are disaggregated into graph copies, it is possible to obtain a valid upper bound on the objective value by solving \eqref{sprbm:cycles} and \eqref{sprbm:chains}, and simply plugging their result into \eqref{objLag2}, afterwards. Notice that even if the set of Lagrange multipliers is not optimal, \eqref{objLag2} provides a valid upper bound, which can be as good as that of the disaggregated cycle formulation or the cycle formulation itself. Thus, even without proving optimality of \eqref{objDis}, its dual variables can still be used to obtain a valid upper bound. To the best of our knowledge, the only previous method in the literature obtaining a valid upper bound consists of solving a relaxed problem with $K = L = \infty$ \citep{Abraham2007}, which as mentioned in Section \ref{sec:PblmD} can be solved in polynomial time. It is easy to see that the bound provided by this special case is weaker than that of the presented Lagrangian dual problem. Since \eqref{objDis} can be used as a master problem in column generation, the goal is to use the bound provided by \eqref{objLag2} when it is not possible to prove optimality of the master problem. In Section \ref{sub:branching}, we indicate how this new upper bound can be used not just at every node of a branch-and-bound tree. \section{Literature Review} \label{sec:LitReview} A very-well studied variant of the KEP is the cycle-only version, i.e., a problem instance in which either there are no NDDs\ or if present, chains are ``turned'' into cycles by adding an arc from every PDP\ to NDDs. Different methods, mostly from mixed integer programming (MIP), have been used to model this variant of the KEP in the literature. \cite{Abraham2007} and \cite{Roth2007} proposed two widely known formulations: the \textit{cycle formulation} and the \textit{edge formulation}. The former has an exponential number of decision variables, whereas the latter has an exponential number of constraints. \cite{Constantino2013} showed that the edge formulation scales poorly compared to the cycle formulation, reaching more than three million path-violating constraints in instances with only 50 PDPs. However, in the same study it was shown that the number of cycles also grows sharply for $K \ge 4$ in medium and high density instances with 100 PDPs\ or more. \cite{Constantino2013} proposed the first two MIP formulations where the number of constraints and variables are polynomial in the size of the input, referred to as \textit{compact formulations}. It was shown that their \textit{extended edge formulation} outperforms their \textit{assignment edge formulation}. Although, the cycle formulation is theoretically stronger than both of them, the extended edge formulation is able to scale in instances where the cycle formulation requires more than three million variables. To overcome the exponential number of variables of the cycle formulation, it is commonly used within B\&P \ \citep{Barnhart1998}, yielding the most successful solution methods for the KEP to date \citep{Abraham2007, Klimentova2014, Dickerson2016}. On the other hand, \cite{Lam2020} introduced the first B\&P-and-cut algorithm where the cycle formulation is used as a master problem. This approach is the state of the art for the cycle-only version. In their algorithm, every time a candidate solution is obtained, three types of inequalities are separated. In the empirical results, instances from the PrefLib library \citep{Mattei2013} were tested. Most instances with 2048 PDPs\ on $K = 3$ reported total run-time of two seconds and for the majority of instances with up to $1024$ PDPs, less than a second was taken. For $K = 4$ only instances up to the size of the latter could be solved, within eight to twenty two minutes In the general version of the KEP, chains are allowed, and it is usually the case that $L \ge K$, or $L$ is unbounded. For the general case, \cite{MakHau2017} introduced a compact formulation integrating chains and cycles through a variation of the well-known Miller-Tucker-Zemlin constraints and ideas similar to the extended edge formulation. An exponential-sized variant of the same formulation was also proposed. The largest instance presented includes 250 PDPs\ and 6 NDDs. \cite{Anderson2015} proposed two formulations for unbounded chains: a recursive one and one based on the prize-collecting traveling salesman problem (PC-TSP). I Instances with up to 1179 PDPs, 62 NDDs\ were tested on $K = 3$. The recursive formulation outperformed the PC-TSP formulation on a large historical dataset, although, in what was denominated as ``difficult'' instances, the PC-TSP formulation was more successful. This formulation can be modified to include bounds on the length of chains. However, \cite{Plaut2016b} showed that the PC-TSP is effective when unbounded chains and $K = 3$ are considered, otherwise, B\&P-based algorithms outperform it \cite{Dickerson2016} proposed three formulations: a compact formulation for the cycle-only version, PIEF, a compact variation of PIEF allowing long chains, HPIEF, and an exponential-sized formulation also allowing long chains, PICEF. PIEF is a variation of the extended edge formulation in which a fourth variable index is included to indicate the position of an arc in a cycle. In HPIEF, cycles are handled as in PIEF, chains are represented through three-indexed arc variables, the last index indicating the position of an arc in a chain. When $L = 0$, PICEF reduces to the cycle formulation, otherwise, PICEF handles long chains as HPIEF does. In that study, instances from real match runs and from a realistic simulator were used to test seven algorithms \citep{Abraham2007,Klimentova2014, Anderson2015, Glorie2014, Plaut2016b, Dickerson2016}. (H)PIEF and PICEF turned out to be the most effective among all of them. It is worth noticing that amendments to a study by \cite{Glorie2014} were taken into account. \cite{Glorie2014} discussed ``polynomial'' time algorithms for pricing problems but unfortunately some arguments are later shown to be incorrect \citep{Plaut2016}. More recently, \cite{Duncan2019} proposed the position-indexed traveling-salesman problem formulation (PI-TSP) as part of a robust optimization model in which cycles are handled as in the cycle formulation, and chains are expressed by combining the indexing scheme presented in PICEF and ideas from the PC-TSP formulation. A significant reduction in the number of variables to model long chains is achieved. Experimentally, robust solutions were compared to deterministic solutions obtained by PICEF. Other variations of the KEP include finding an optimal matching when information about future arrivals of PDPs\ and NDDs\ is included, having hierarchical objectives and considering transplant failures \citep{Klimentova2016, Glorie2014, Dickerson2019, Duncan2019}. In several of these variations, a deterministic KEP formulation is solved multiple times or used as a base to build up other models, thereby, the relevance of efficient algorithms to solve the deterministic problem. \section{Problem Description} \label{sec:PblmD} Given a set of PDPs\ $\mathscr{P}$, a set of NDDs\ $\mathscr{N}$, and positive integers $K$ and $L$, the KEP can be defined in a digraph $\digraph= (\Vertex, \Arcs)$. A vertex $v \in \mathscr{V}$ is defined for each PDP\ and NDD, i.e., $\mathscr{V} = \mathscr{P} \cup \mathscr{N}$. For $v_i,v_j \in \mathscr{V}$, there exists an arc $(v_i,v_j) \in \mathscr{A}$ if the donor in vertex $v_i$ is compatible with the patient in vertex $v_j$, e.g., see. Figure \ref{fig:Exchanges}. Note that, $\mathscr{A} \subseteq \{(v_i,v_j) \mid v_i \in \mathscr{V} , v_j \in \mathscr{P} \}$, that is, there are no incoming arcs to NDDs, neither from PDPs\ nor from the other NDDs. Each arc $(v_{i}, v_{j})$ is assigned a score $w_{ij}$ representing the suitability/priority of that transplant. Chains and cycles correspond to simple paths and cycles of $\mathscr{D}$, respectively, formally defined as follows: \begin{definition} A chain $p = (v_1, ...,v_\ell)$ is feasible if and only if: (1) $(v_i, v_{i+1}) \in \mathscr{A}$ for all $1 \le i \le \ell -1$, (2) $v_1 \in \mathscr{N}$ and $v_i \in \mathscr{P}$ for all $2 \le i \le \ell$, and (3) $\ell \le L$. \end{definition} \begin{definition} A cycle $c= (v_1, ..., v_k, v_1)$ is feasible if and only if: (1) $(v_i, v_{i+1}) \in \mathscr{A}$ for all $1 \le i \le k -1$ and $(v_{k}, v_{1}) \in \mathscr{A}$, (2) $v_i \in \mathscr{P}$ for all $1 \le i \le k$, and (3) $k \le K$. \end{definition} A feasible solution to the KEP is a matching of donors to patients. To formally introduce this definition, consider $\mathscr{C}_{\Kcycle}$ and $\mathscr{C}_{\Lchain}$ as the set of all feasible cycles and chains in $\mathscr{D}$, respectively. Throughout the paper, notation $\mathscr{V}(\cdot)$ and $\mathscr{A}(\cdot)$ will denote the set of vertices and arcs present in the argument $(\cdot)$, respectively. For instance, $\mathscr{V}(c)$ corresponds to the set of vertices in cycle $c$. \begin{definition} $M(K, L) \subseteq \mathscr{C}_{\Kcycle} \cup \mathscr{C}_{\Lchain}$ is a feasible matching of donors to patients if $\mathscr{V}(m_1) \cap \mathscr{V}(m_2) = \emptyset$ for all $m_1, m_2 \in M(K, L)$ such that $m_1 \ne m_2$. \end{definition} That is, a matching in the KEP corresponds to a collection of feasible chains and cycles where every patient/donor participates in at most one transplant and type of exchange. Thus, the objective of the KEP is to find a matching, whose set of arcs $\mathscr{A}(M(K, L))$ maximizes the total transplant score, i.e., $\sum_{(i,j) \in M(K, L)} w_{ij}$. It is known that there are two cases in which the KEP is solved efficiently, although the general setting is NP-hard \citep{Abraham2007, Biro2009}. In the first case, $\mathscr{V} = \mathscr{P}$ and $K = 2$, i.e., only 2-way cycles are allowed, where the problem can be reduced to a maximum weight matching problem in an undirected graph. The second case occurs when $K$ and $L$ are arbitrarily large, where the problem can be solved as a maximum weight matching problem on a bipartite graph. \section{Computational Experiments} \label{sec:Results} In this section we present computational experiments comparing our solution framework to the state of the art. \subsection{Implementation Details and Data} MDDs as well as our B\&P \ algorithm (BP\_MDD) are implemented in C++ and experiments are conducted on a machine with Debian GNU/Linux as operating system and a 3.60GHz processor Intel(R) Core(TM). CPLEX 12.10 is used as the LP/MIP solver. BP\_MDD is compared against the state-of-the-art PICEF and HPIEF solution methods \citep{Dickerson2016}. The PICEF and HPIEF solvers are retrieved from the original authors, where HPIEF is the model with full reduction. They call Gurobi 7.5.2 to solve MIP models. Although different LP/MIP solvers may add noise to the analysis, in the latest history of benchmark sets, Gurobi is the lead \citep{Mittelman2020}. It is worth noticing that \cite{Lam2020} hold the state-of-the-art solver for the cycle-only version. They tested their algorithm on the same library as ours, PrefLib \citep{Mattei2013}, achieving total run-times up to 33 seconds when $K = 3$ for instances with 2048 PDPs\ and mostly less than a second for instances with 1024 PDPs. Our algorithm, although fast and effective, takes longer. As will be shown, our algorithm does have advantages besides the fact that it can also tackle long chains. When $L = 0$, the formulation by \cite{Duncan2019} reduces to the cycle formulation, and so does PICEF. Therefore, we compare BP\_MDD with HPIEF and PICEF and we will refer to them as ``solvers''. The three models are evaluated on the set of public instances in the PrefLib repository \citep{Mattei2013} generated by \cite{Dickerson2012} based on data from KPDPs in the United States. The first group, referred to as KEP\_N0 has $80$ instances, split into $8$ subsets of $10$ instances with no presence of NDDs\ and hence only cycles are considered. Instances in KEP\_N0 have $\vert \mathscr{P} \vert \in \{16, 32, 64, 128, 256, 512, 1024, 2048\}$ and their graph density varies from 10\% to 32\%. Each instance is solved for $K \in \{3,4,5,6\}$, totaling 320 runs per solver. The second group, referred to as KEP\_N, has $23$ subsets of instances with $10$ instances in each. NDDs\ are present, thus, both cycles and chains are considered in the solution. Let the tuple $(\vert \mathscr{P} \vert, \vert \mathscr{N} \vert)$ characterize each subset. Then, each subset in KEP\_N has a one-to-one correspondence with the set of tuples $\{(17,1), (18,2), (33,1), (35,3), (36,4), (67,3), (70,6), (73,9), (134,6), (140,12), (147,19), \\ (268,12), (281,25), (294,38), (537,25), (563,51), (588,76), (1075,51), (1126,102), (1177,153), \\ (2150,102), (2252,204), (2355,307)\}$. Let $g_{d_N} = \frac{\vert \mathscr{A} \vert}{\vert \mathscr{P} \vert \vert \mathscr{P} -1 \vert + \vert \mathscr{P} \vert \vert \mathscr{N} \vert}$ be the graph density for instances in KEP\_N. In this second group of instances, $g_{d_N}$ varies from $10 \%$ to $42\%$ We used $K \in \{3,4\}$ and $L \in \{3,4,5,6\}$. For every instance we set a time limit of 30 minutes and a RAM-usage limit of 60 GB. Since the total number of runs would reach the non-negligible amount of $1840$ per solver, we proceeded as follows: for every solver, subset and $K$-$L$ combination, if the RAM usage exceeds the limit for a particular instance, the solver stops and aborts the rest of the instances in that subset. Only one thread was used for all the experiments and the rule to find the FVS is the maximum in-degree. \subsection{Results and analysis} Figure \ref{fig:ProfileCycles} plots the number of instances in KEP\_N0 solved to optimality up to discrete points in time before reaching the time limit of $30$ minutes, which shows the outperformance of our algorithm over the state of the art. The time reported includes both the total MDD construction time and the B\&P\ algorithm time. The x-axis is extended up to $40$ minutes to account for the MDD construction time. When $K = 3$, BP\_MDD and PICEF solve all 80 instances in under 20 minutes, followed by HPIEF under 26 minutes. When $K \in \{4,5,6\}$, BP\_MDD solves all 80 instances under 25 minutes as opposed to \cite{Lam2020}, whose solution method was able to solve instances up to $K = 4$ and 1024 PDPs. Both, PICEF and HPIEF perform poorly as $K$ increases. A remarkable fact is that the RAM-usage of BP\_MDD did not exceed 4 GB in these experiments, as opposed to PICEF and HPIEF which surpassed the ample threshold of $60$ GB. Instances for which our machine run out of memory accumulated more than $37$ million variables. By definition PICEF is exponential in the number of variables, thus, this result is not surprising. However, HPIEF is polynomial in terms of the number of variables and constraints, even more so the full-reduction HPIEF we compare our algorithm against, and yet dimensionality is clearly a challenge \begin{figure}[ht!] \centering \caption{Performance profiles for the set KEP\_N0.} \label{fig:ProfileCycles} \includegraphics[width=\linewidth]{Figures/ProfPerfCycles.pdf} \end{figure} In 94\% of the runs, BP\_MDD solves instances to optimality at the root node and at most 8 nodes are explored in the branch-and-bound tree. On average, for instances with $\vert \mathscr{P} \vert \ge 1024$, 55\% of the total run-time accounts for solving the pricing problems, with a minimum and maximum percentage of 19\% and 84\%, respectively. Phase 2 and 3 are responsible on average 27.3\% of the pricing time when $K \ge 4$ varying from 5.4\% to 72.2\%. In the same group, the average and maximum time for building the MDDs is 109.8 seconds and 257.5 seconds, respectively. The remaining time is spent among the RMP and solving the cycle formulation with columns present in the RMP. For only 10 instances, PICEF and HPIEF report a feasible solution beyond the time limit, meaning that for the others the RAM threshold is exceeded. The optimality gap reported by these solvers varies between 0\% and 30\%, with respect to the best solution found among all the three solvers. Overall, PICEF is slightly better than HPIEF in terms of optimality gap but not in terms of the number of instances that could fit into memory. Even though the cumulative result among solvers at the end of the time limit is the same when $K = 3$, Figure \ref{fig:BP_RunTime.pdf} shows the solution time (without preprocessing or MDD construction) taken for every instance and the state-of-the-art solver when $K \in \{3,4\}$ for all chain lengths. Markers located at the y-axis value of 35 on the top part of the plot indicate instances that were not solved to optimality within the time limit. As observed, BP\_MDD is faster in most cases. \begin{figure}[ht!] \centering \caption{Performance comparison when $L \in \{0,3,4,5,6\}$. Triangle and square shaped markers correspond to instances with $K = 3$ and $K = 4$, respectively. The size of the markers are correlated with the number of PDPs\ in the instances.} \includegraphics[scale=0.4]{Figures/BP_RunTime.pdf} \label{fig:BP_RunTime.pdf} \end{figure} Figure \ref{fig:ProfileChains} shows the performance profiles for the instances in KEP\_N solved to optimality by the three solvers, which demonstrates that our algorithm dominates the others, especially when long chains are allowed. For $K = 3$ and $L = 3$ the performance at the end of the time limit is similar for the three algorithms, although, BP\_MDD does not solve to optimality one instance, which includes 2355 PDPs\ and 307 NDDs, where the optimality gap of the best solution found is 0.2\%. The same instance remains suboptimal across the other $K$-$L$ combinations. Clearly, there is an inverse relationship between the growth of $K$-$L$ and the number of instances optimally solved by PICEF and HPIEF. There are 105 runs, mostly with $K = 3$ and different chain lengths for which optimality gap could be obtained after the time limit. HPIEF provides optimality gap for 71 of these runs and PICEF does for the remaining. The maximum gap in these runs is 11\%. \begin{figure}[ht!] \centering \caption{Performance profiles for the set KEP\_N.} \label{fig:ProfileChains} \includegraphics[width=\linewidth]{Figures/ProfPerfCyclesChains.pdf} \end{figure} On average, 94.3\% of runs were solved to optimality at the root node, being 2 the maximum number of explored branch-and-bound nodes. For instances with $\vert \mathscr{P} \vert \ge 1075$, solving the pricing problems accounts for 66\% of the total run-time, and on average 46\% and 21\% of the pricing time is spent in Phase 1 and Phases 2 and 3, respectively. For the same instances, the time spent on building the MDDs accounts on average for 25\% of the total run-time, with a maximum of 35\%. On average, in more than 90\% of the cases, all NDDs\ were used in the final solution. Lastly, we note that the majority of columns across all runs are found in Phase 1 (see Appendix \ref{sec:OtherResults} for more details). \section{General Solution Framework} \label{sec:SolAp} In our final implementation, a combination of exact and restricted MDDs is used so that once built they are stored on computer memory and retrieved every time pricing problems need to be solved. As a result, we cannot solely rely on MDDs to prove optimality of the RMP. In this section, we introduce a three-phase solution framework consisting of searching through MDDs for both cycles and chains (Phase 1), a cutting plane algorithm used to search for positive-price chains and cycles, whose final goal is to prove that no more positive-price chains exist (Phase 2), and a two-step search to find a positive-price cycle, if any (Phase 3). \subsection{Phase 1: Solving the pricing problems via MDDs} Building the MDDs is the first step. We parameterize some aspects to make a reasonable usage of computer memory. Particularly, if $K \ge 4$ and $\vert \mathscr{P} \vert > 500$, we build restricted MDDs by considering a maximum cycle length of 3 in 90\% of the decision diagrams, while in the remaining 10\% we keep the true value of $K$. If $K \le 4$ and $\vert \mathscr{P} \vert \le 500$ we build exact MDDs. When constructing a transition state graph for chains, we explore at most $20\%$ of vertices receiving an arc from $v \in \mathscr{V}$, unless $\vert \mathscr{N} \vert > 250$, case in which we explore only $10\%$. In all cases, the maximum length of chains (in terms of arcs) considered in the construction of MDDs is also 3, regardless of the true value of $L$. After the MDDs are built, we store them in memory and use them to solve the pricing problems as depicted by Figure \ref{fig:LogicDiagram}, in integration with Phases 2 and 3. \subsection{Phase 2: A longest path formulation for chains and cycles} We use a longest path problem as a relaxation of \eqref{sprbm:chains} in which the goal is to find an $\mathbf{s}\mbox{-}\mathbf{t}$ path or a cycle, both corresponding to feasible positive-price columns. To this end, let us define $\mathscr{D}^{'} = (\lonVertex, \lonArcs)$ as a digraph whose vertex set $\mathscr{V}^{'} = \mathscr{V} \cup \{\mathbf{s},\mathbf{t}\} $ has two dummy vertices $\mathbf{s}$ and $\mathbf{t}$ such that the set of arcs $\mathscr{A}^{'} = \mathscr{A} \cup \{(\mathbf{s}, u) \cup (v, \mathbf{t}) \mid u \in \mathscr{N}, v \in \mathscr{P}\}$ connects dummy vertex $\mathbf{s}$ to NDDs\, and PDPs\ to vertex $\mathbf{t}$. For arcs including a dummy vertex, their weight is set to zero. Moreover, let $\mathscr{C}^{'}$ be the set of all simple cycles and $\mathscr{C}^{'}_{\Kcycle} \subseteq \mathscr{C}^{'}$ be the set of feasible cycles in $\mathscr{D}^{'}$. As defined before, $\Arcs^{*}$ is the set of selected arcs. A decision variable $y_{\uix \vix}$ takes the value 1 if arc $(\uix, \vix) \in \mathscr{A}^{'}$ is selected, and 0 otherwise. Thus, a longest path formulation at some node in the branching tree is given below: \begin{subequations} \label{sub:LongPath} \begin{align} \mathcal{Z}^{\mbox{\footnotesize \eqref{objLP}}} := \ \max& \sum_{(\uix, \vix) \in \mathscr{A}^{'}: u \ne \mathbf{s}} (w_{\uix \vix} - \lambda_{u}) y_{\uix \vix} - \sum_{(\uix, \vix) \in \Arcs^{*}} \mu_{(\uix, \vix)} y_{\uix \vix} & \label{objLP}\tag{LPH}\\ \text{s.t.} &\sum_{v:(\uix, \vix) \in \mathscr{A}^{'}} y_{\uix \vix} - \sum_{v: (\vix, \uix) \in \mathscr{A}^{'}} y_{\vix \uix} = \left\{ \begin{array}{@{}ll@{}ll@{}} 1, && u= \mathbf{s}\\ 0, && u \in \mathscr{V}\\ -1, && u = \mathbf{t} \end{array}\right. \label{sub:LongBalance}\\ &\sum_{(\uix, \vix) \in \mathscr{A}^{'}} y_{\uix \vix} \le 1 \qquad\qquad\qquad\qquad\quad u \in \mathscr{V} \label{sub:MaxFlow}\\ &y_{\uix \vix} \in [0,1] \qquad \qquad\qquad\qquad\quad\hspace{2mm} (\uix, \vix)\in\mathscr{A}^{'} \end{align} \end{subequations} Although a solution of \eqref{objLP} may lead to a path using more than $L$ arcs, or a solution with subtours, or a non-integer solution, we see these downsides as an opportunity to either find efficiently a positive-price chain (cycle) missed in the first phase or prove that none exists. Because \eqref{objLP} is a relaxation of \eqref{sprbm:chains}, whenever the objective value of \eqref{objLP} is zero, so is the objective value of \eqref{sprbm:chains}. Notice that, even without subtour-elimination constraints \eqref{eq:subcons}, a solution of \eqref{objLP} guarantees to have a path (may be fractional) from vertex $\mathbf{s}$ to $\mathbf{t}$ due to flow-balance constraints \eqref{sub:LongBalance}. The solution may also have subtours representing positive-price cycles useful for the RMP. Therefore, the goal is to solve first a linear program without constraints \eqref{sub:OntheFly}, check the solution for positive-price and \textit{feasible} columns, and only then add \eqref{sub:OntheFly} if need be. Experimentally, we observed that the ``warmed-up'' Lagrange multipliers resulting from Phase 1 allow us to relax integrality constraints and yet obtain an integer solution in many cases. Particularly, if $y^{*}$ is an optimal solution of \eqref{objLP}, we check arcs $(\uix, \vix) \in \mathscr{A}^{'}$ for which $y_{\uix \vix}^{*} \ge 0.9$ when searching for positive-price columns. Lastly, enforcing two dummy arcs, one going out of $\mathbf{s}$ and one coming into $\mathbf{t}$, requires us to adjust the right-hand side of \eqref{eq:LongSizeChain}. \begin{subequations} \label{sub:OntheFly} \begin{align} \sum_{(\uix, \vix) \in c} y_{\uix \vix} \le&\ \vert c \vert - 1 &c \in \mathscr{C}^{'} \setminus \mathscr{C}^{'}_{\Kcycle} && \label{eq:subcons}\\ \sum_{(\uix, \vix) \in \mathscr{A}^{'}} y_{\uix \vix} \le&\ L + 2&& \label{eq:LongSizeChain}\\ y_{\uix \vix} \in&\ \{0,1\}& (\uix, \vix) \in \mathscr{A}^{'}&& \label{eq:integChain} \end{align} \end{subequations} Figure \ref{fig:LogicDiagram} shows how \eqref{objLP}$+$\eqref{sub:OntheFly} is solved via a cutting plane algorithm integrated with the other two phases during column generation. \input{./Figures/FlowChart} \subsection{Phase 3: A two-step procedure for cycles} \label{sec:2steps} If a positive-price cycle still exists in $\mathscr{D}$ but not found in Phases 1 and 2, we perform an exhaustive enumeration of cycles for as long as a predetermined time limit is not exceeded, otherwise, the exhaustive search is called off and a MIP is solved instead. Notice that Phase 2 guarantees to find any positive-price chain, if does exists. Therefore, in Phase 3, we only have to search positive-price cycles. \subsubsection{Exhaustive search} \label{sec:exsearch} We perform a depth-first search on every cycle copy $\hat{\mathscr{D}}^{i}$, where a feasible cycle is rooted at $v^{*}_{i} \in \fvs$. First, we sort and search over the graph copies in increasing order of their $\lambda_{v}$ values. Next, we traverse $\hat{\mathscr{D}}^{i}$, and when $v^{*}_{i}$ is the leaf node of a path from the root and it is found at a position between $3$ and $K + 1$, that path constitutes a feasible cycle $c$. If $\hat{r}_{c}^{i}> 0$, the cycle has a positive reduced cost and it is sent to the RMP. \cite{Abraham2007} and \cite{Lam2020} implemented a similar search, although unlike them, our paths are rooted at vertices in FVS. Despite of searching cycles on trees rooted only at a subset of vertices, exhaustive enumeration becomes a bottleneck for large instances. Therefore, whenever a time threshold, $t^e$, is surpassed we shift to solving the MIP provided next. \subsubsection{A MIP for cycles} \eqref{objCyclesMIP} aims at finding a feasible cycle in $\mathscr{D}$ with maximum reduced cost at some branch-and-bound node. If found, the cycle is sent to the RMP. Notice that constraint \eqref{eq:CycleSizePh3} guarantees to select at most $K$ arcs, whether they are distributed in multiple (smaller) cycles or not. Thus, no need of subtour-elimination constraints. \begin{subequations} \label{sub:CyclesMIP} \begin{align} \mathcal{Z}^{\mbox{\footnotesize \eqref{objCyclesMIP}}}= \ \max &\sum_{(\uix, \vix) \in \mathscr{A}} (w_{\uix \vix} - \lambda_{u}) y_{\uix \vix} - \sum_{(\uix, \vix) \in \Arcs^{*}} \mu_{(\uix, \vix)} y_{\uix \vix} \label{objCyclesMIP}\tag{MCC}\\ \text{s.t.} &\sum_{v:(\uix, \vix) \in \mathscr{A}} y_{\uix \vix} - \sum_{v: (\vix, \uix) \in \mathscr{A}} y_{\vix \uix} = 0&\\ &\sum_{v: (\uix, \vix)\in \mathscr{A}} y_{\uix \vix} \le \left\{ \begin{array}{@{}ll@{}} 1, & \quad \ u \in \mathscr{P}\\ 0, & \quad \ u \in \mathscr{N} \end{array}\right.\\ &\sum_{(\uix, \vix) \in \mathscr{A}} y_{\uix \vix} \le K &\label{eq:CycleSizePh3} \\ &y_{\uix \vix} \in \{0,1\} \qquad\qquad\ (\uix, \vix) \in \mathscr{A}& \end{align} \end{subequations} Figure \ref{fig:LogicDiagram} shows the solution of the RMP via the three phases discussed before. \subsubsection{Algorithmic details} In the three-phase solution framework, after building the MDDs, the pricing problems are solved to optimality as follows: During Phase 1, we compute $\hat{\eta}^{\fm}$ and $\bar{\eta}^{\fm}$ for all $i \in \bar{I} $ and $i \in \hat{I}$, respectively, and add the positive-price columns found to the RMP after every iteration. We encourage the use of chains by returning up to 15 positive-price chain columns to the RMP, whereas only one from every cycle MDD. Notice that in the former case, recursions \eqref{eq:1b}-\eqref{eq:2b} only need to be computed once. In Phase 2, we delay the inclusion of constraints \eqref{sub:OntheFly}, until their addition is mandatory to find a positive-price chain column. Every time a solution is checked during Phase 2, a positive-price cycle column is searched when failing to find a chain column. As for Phase 3, we set $t^{e} = 20$ seconds to exhaustively find a positive-price cycle or reach the end with none. If the time threshold is exceeded we proceed to solve \eqref{objCyclesMIP} and resolve the RMP, if needed. When there are no NDDs\ present in the input graph $\mathscr{D}$, we simply skip Phase 2. Likewise, because MDDs are exact when $K \le 4$ and $\vert \mathscr{P} \vert \le 500$ and no NDDs, Phase 2 and 3 are dropped and a valid upper bound \eqref{objLag2} is provided at the end, in case pricing problems are not solved to optimality within the time limit. In other cases, pricing problems could be solved, e.g., via MIP. Lastly, notice that the procedure given in Figure \ref{fig:LogicDiagram} can be easily adapted to the case $L = \infty$, since it suffices to remove constraint \eqref{eq:LongSizeChain}.
1,116,691,501,321
arxiv
\section{Background} \label{sec:bg} \subsection{Maps of $2$-monads} \label{subsec:mm} The construction which we introduce here takes for its input a map $\Mmap:\Smon\rightarrow \Cmon$ of $2$-monads on $\cat\cK$. For clarity we stress that the usual diagrams commute on the nose. We rehearse some folklore related to this situation. First, it is elementary categorical algebra that the monad map $\Mmap:\Smon\rightarrow \Cmon$ induces a $2$-functor \(\lift \Mmap:\Algs\Cmon\rightarrow \Algs\Smon\). On objects $\lift\Mmap$ takes an $\Cmon$-algebra $\Cmon X\rightarrow X$ to an $\Smon$-algebra $\Smon X\xrightarrow\Mmap \Cmon X \to X$. It is equally evident that $\Mmap:\Smon\rightarrow \Cmon$ induces a $2$-functor \(\shriek \Mmap:\kleisli\Smon\rightarrow \kleisli\Cmon\) between the corresponding Kleisli $2$-categories. These 2-functors are essentially folklore. Given the evident relation between algebras for a (perhaps enriched!) monad and modules for a ring, $\Mmap^*$ can be called {\em restriction of scalars} and $\Mmap_!$ (or its extension see below) {\em extension of scalars}. These connections are the driving force behind Durov’s PhD Thesis~\cite{durov2007} which gives details of the phenomena We have the standard locally full and faithful comparisons: \( \kleisli\Smon \rightarrow \Algs\Smon\) and \(\kleisli\Cmon \rightarrow \Algs\Cmon\). Suppose we interpret $\shriek\Mmap$ as acting on the free algebras so that $\shriek\Mmap$ takes the free $\Smon$-algebra $\Smon^2A\xrightarrow\Smult \Smon A$ to the free $\Cmon$-algebra $\Cmon^2A\xrightarrow\Cmult \Cmon A$. Then we can see $\shriek\Mmap$ as a restricted left adjoint to $\lift\Mmap$ in the following sense. Given the free $\Smon$-algebra $\Smon^2A\xrightarrow\Smult \Smon A$ on $A$ and $\Cmon B\xrightarrow b B$ an arbitrary $\Cmon$-algebra, we have \(\Algs\Smon(\Smon A,\lift \Mmap B) \simeq \Algs\Cmon(\shriek\Mmap\Smon A, B). \) For $\shriek\Mmap(\Smon^2A\xrightarrow\Smult\Smon A)=\Cmon^2A\xrightarrow\Cmult\Cmon A$ and so both sides are isomorphic to $\cat\cK(A,B)$. Any $\Smon$-algebra $\Smon A\xrightarrow a A$ lies in a coequalizer diagram in $\Algs\Smon$: \( \begin{tikzcd} \Smon^2 A\ar[r,shift left=.75ex,"\Smult"] \ar[r,shift right=.75ex,swap,"\Smon a"] & \Smon A \ar[r,"a"] & a. \end{tikzcd} \) So to extend $\shriek\Mmap$ to a full left adjoint $\shriek\Mmap:\Algs\Smon\Rightarrow \Algs\Cmon$ one has only to take the coequalizer of the corresponding pair in $\Algs\Cmon$: \(\begin{tikzcd} \Cmon\Smon A\ar[r,shift left=.75ex,"\Cmult\Cmon \Mmap"] \ar[r,shift right=.75ex,swap,"\Cmon a"] & \Cmon A. \end{tikzcd} \) As it happens, we do not need the full left adjoint, but we shall need the unit of the adjunction given by the $\Smon$-algebra map $\Mmap_A$ from $\Smon^2 A \xrightarrow\Smult\Smon A$ to \(\lift\Mmap\shriek\Mmap(\Smon^2 A\xrightarrow\Smult\Smon A)=\Smon\Cmon A\xrightarrow{\Mmap\Cmon} \Cmon^2 A\xrightarrow\Cmult \Cmon A\). If $\tz{mm0}$ is an $\Smon$-algebra $2$-cell then the corresponding $2$-cell $\lift\Mmap\shriek\Mmap g\Rightarrow \lift\Mmap\shriek\Mmap g'$ is given by the composite $\tz{mm11}$ so that \begin{equation}\label{eq:mm1} \tz{mm111} \end{equation} \subsection{Left-semi Algebras} \label{subsec:lsa} In this section we present some theory of a generalization of the notion of $\mon$-algebra for a $2$-monad $\mon$. In effect, it is a mere glimpse of an extensive theory of semi-algebra structure, in the sense of structure "up to a retraction", a terminology well-established in computer science. We do not need to have this background in place for the results which we give in this paper: we give only what is required to make the paper comprehensible. However, some impression of what is involved can be obtained by looking at~\cite{garner2019vietoris} which gives some theory in the $1$-dimensional context. \begin{definition} Let $\mon$ be a $2$-monad on a $2$-category $\cat\cK$. A left-semi $\mon$-algebra structure on an object $Z$ of $\cat\cK$ consists of a $1$-cell $\mon Z\xrightarrow z Z$ and a $2$-cell $\epsilon:z.\eta\Rightarrow \id_Z$ satisfying the following $1$-cell and $2$-cell equalities: \begin{center} \begin{minipage}{0.25\linewidth} \begin{equation}\label{semialg1com} \tz{lsa1} \end{equation} \end{minipage}\quad\vrule\quad \begin{minipage}{0.65\linewidth} \begin{equation}\label{semialg2com} \tz{lsa2} \end{equation} \end{minipage} \end{center} \end{definition} \begin{remark} \begin{enumerate} \item The diagrams \begin{equation*} \tz{lsa3}\qtand \tz{lsa4} \end{equation*} demonstrate that Condition~\eqref{semialg1com} implies that the boundaries of the $2$-cells in Condition~\eqref{semialg2com} do match. \item Condition~\eqref{semialg1com} is the standard composition for a strict $\mon$-algebra, while Condition~\eqref{semialg2com} is the unit condition for a colax $\mon$-algebra. \end{enumerate} \end{remark} \begin{definition} Suppose that $\mon Z\xrightarrow z Z, \epsilon: z.\unit \Rightarrow \id_Z$ and $\mon W\xrightarrow w W, \epsilon: w.\unit \Rightarrow \id_W$ are left-semi $\mon$-algebras. A strict map from the first to the second consists of $p:Z\to W$ satisfying the following $1$-cell and $2$-cell equalities: \begin{center} \begin{minipage}{0.25\linewidth} \begin{equation}\label{semialg3com} \tz{lsa5} \end{equation} \end{minipage}\quad\vrule\quad \begin{minipage}{0.65\linewidth} \begin{equation}\label{semialg4com} \tz{lsa6} \end{equation} \end{minipage} \end{center} \end{definition} \begin{remark} \begin{enumerate} \item The Condition~\eqref{semialg3com} with the naturality of $\unit$ imply that the boundaries of the $2$-cells in~\eqref{semialg4com} do match. \item The definition is the restriction to left-semi algebras of the evident notion of strict map of colax $\mon$-algebras. \item If $\mon Z\xrightarrow z Z, \epsilon: z.\unit\Rightarrow \id_Z$ is a left-semi algebra, then $\mon Z\xrightarrow z Z$ is a strict map to it from the free algebra $\mon^2 Z\xrightarrow \mult \mon Z$. \end{enumerate} \end{remark} \begin{proposition} Suppose that $\mon Z\xrightarrow z Z, \epsilon: z.\unit\Rightarrow \id_Z$ is a left-semi algebra. Then the composite $f:Z\xrightarrow\unit\mon Z\xrightarrow z Z$ is a strict endomap of the left-semi algebra. \end{proposition} Finally, we consider $2$-cells between maps of left-semi algebras. \begin{definition} Suppose that $p,q:Z\to W$ are strict maps of left-semi algebras from $\mon Z\xrightarrow z Z, \epsilon: z.\unit\Rightarrow \id_Z$ to $\mon W\xrightarrow w W, \epsilon: w.\unit\Rightarrow \id_W$. A $2$-cell from $p$ to $q$ consists of a $2$-cell $\gamma:p\Rightarrow q$ such that the equality $\tz{lsa7}$ holds. \end{definition} \begin{remark} Again, this is simply the restriction to the world of left-semi algebras of the definition of $2$-cells for colax $\mon$-algebras. \end{remark} \begin{proposition} Suppose that $\mon Z\xrightarrow z Z, \epsilon: z\unit\Rightarrow \id_Z$ is a left-semi $\mon$-algebra, so that both $z.\unit$ and $\id_Z$ are strict endomaps. Then $\epsilon:z.\unit\Rightarrow\id_Z$ is a left-semi $\mon$-algebra $2$-cell. \end{proposition} At this point, it is straightforward to check that left-semi $\mon$-algebras, strict maps and $2$-cells form a $2$-category that we denote as $\lsalg\mon$. Looking more closely at what we showed above we see that if we set $f=z\unit$, then we have $f=f^2$ and $\epsilon.f=\idm f=f.\epsilon$. So, in fact, we have the following. \begin{proposition} Suppose that $\mon Z\xrightarrow z Z, \epsilon: z.\unit\Rightarrow \id_Z$ is a left-semi $\mon$-algebra. Then, in the $2$-category $\lsalg\mon$, the $1$-cell $f$ and the $2$-cell $\epsilon:f\Rightarrow \id_Z$ equip the left-semi $\mon$-algebra with the structure of a strictly idempotent comonad. \end{proposition} Applying the evident forgetful $2$-functor we get that $f=f^2$ and $\epsilon: f\Rightarrow \id_Z$ equip $Z$ with the structure of a strictly idempotent comonad in the underlying $2$-category $\cat\cK$. \begin{proposition}\label{prop:comonlsa} Suppose that $\mon X\xrightarrow x X$ is a $\mon$-algebra and $f=f^2:X\to X$ and $\epsilon: f\Rightarrow \id_X$ equip $X$ with the structure of a strictly idempotent comonad in $\Algs\mon$. Then $\mon X\xrightarrow x X\xrightarrow f X,\epsilon: fx\unit\Rightarrow \id_X$ is a left-semi $\mon$-algebra. \end{proposition} \begin{proof}[Proof sketch] The $1$-cell part is routine and the $2$-cell uses that $\epsilon$ is a $2$-cell in $\Algs \mon$. \end{proof} \begin{definition} Suppose that $\monS$ and $\mon$ are $2$-monads. A left-semi monad map from the first to the second consists of $\lambda:\monS\to\mon$ satisfying the following equalities \begin{center} \begin{minipage}{0.4\linewidth} \begin{equation}\label{semialg5com} \tz{lsa9} \end{equation} \end{minipage}\quad\vrule\quad \begin{minipage}{0.5\linewidth} \begin{equation}\label{semialg6com} \tz{lsa13} \end{equation} \end{minipage} \begin{equation}\label{semialg7com} \tz{lsa10} \end{equation} \end{center} \end{definition} \begin{proposition}\label{prop:mapmonlsa} Suppose that $\mon Z\xrightarrow z Z, \epsilon: z.\unit\Rightarrow \id_Z$ is a left-semi $\mon$-algebra and $\monS\xrightarrow \lambda\mon,\gamma:\lambda.\unit\Rightarrow\unit$ is a left-semi monad map. Then $\monS Z \xrightarrow{\lambda_Z}\mon Z \xrightarrow z Z,\epsilon.\gamma:z.\lambda.\unit\Rightarrow\id_Z$ is a left-semi $\monS$-algebra \end{proposition} \begin{proof}[Proof sketch.] The $1$-cell part is routine and the $2$-cell parts use the naturality of $\lambda$ to separate the two $2$-cells $\gamma$ and $\epsilon$. \end{proof} \subsection{Colax colimits induced by a map in $2$-category} \label{subsec:col} In this section we review the notion of colax colimits in a cocomplete $2$-category specialised to our context~\cite{BIRD19891,Lack2010}. In the $2$-category $\cat\cK$, suppose that $\Dcell$ is a colax cocone $(\Scol,\Ccol,\Dcell)$ under the arrow $\Mmap$ (see Figure belo , left). Then, for every $D$, composition with $\Dcell$ induces an isomorphism of categories between $\cat\cK(C,D)$ and the category of colax cocones under the arrow $\Mmap$ with objects $(f,g,\phi)$ (see Figure below, center) and $1$-cells $(f,g,\phi)\to (f',g',\phi')$ given by $2$-cells $f\xRightarrow \rho f'$ and $g\xRightarrow \sigma g'$ such that $\compC\rho\phi=\compC{\phi'}{\sigma.\lambda}$ (see Figure below right). \begin{figure}[h] \begin{subfigure}[t]{0.18\textwidth} \centering \[\tz{col1}\] \end{subfigure}\hfill \begin{subfigure}[t]{0.18\textwidth} \centering \[\tz{col2}\] \end{subfigure} \begin{subfigure}[t]{0.60\textwidth} \centering \[\tz{col3}\] \end{subfigure} \end{figure} This isomorphism of categories has two universal aspects, the first is $1$-dimensional and the second is $2$-dimensional: \begin{itemize} \item for any $\tz{col2}$ there is a unique $r$ such that $\tz{col4}=\phi$ \item for any $\tz{col3}$ there is a unique $r\xRightarrow \tau r'$ such that \end{itemize} \begin{equation}\label{eq:2cellcol} \tz{col5}\qtand\tz{col6} \end{equation} Although we will require colax colimits in the $2$-category of $\Algs\Smon$ where what happens is more subtle, we illustrate this definition by computing colax colimits in the $2$-category $\Cat$. \begin{example} In $\Cat$, $A\xrightarrow\Mmap B$ is a functor between categories. The colax colimit under $\Mmap$ is a category $C$ which consists of separate copies of $A$ and $B$ together with, for every object $a\in A$, new maps $\Mmap(a)\xrightarrow{\Dcell_a} a$, composition of such and evident identifications. Precisely, maps from $b\in B$ to $a\in A$ are given by $b\xrightarrow v\Mmap(a) \xrightarrow{\Dcell_A} a$ and $C(b,a)\simeq B(b,\Mmap(a))$. \end{example} \section{The colimit $2$-monad induced by a map of $2$-monads} \label{sec:cons} From now on, we assume that $\Smon$ is a finitary $2$-monad, so that $\Algs\Smon$ is cocomplete~\cite{KELLY1993163}. \subsection{Definition of the colimit and its $2$-naturality} \label{subsec:def} \begin{definition}\label{prop:def} Suppose that $\Mmap:\Smon\to\Cmon$ is a map of $2$-monads. Then the colax colimit $(\Dmon X,\SDmap)$ under the induced $\Mmap_X:(\Smon X,\Smult)\to(\Cmon X,\Cmult)$ in $\Algs\Smon$ satisfies \begin{equation}\label{eq:cons1}\tz{cons0} \end{equation} \end{definition} \begin{proposition}\label{prop1:def} The colax colimit $(\Dmon X,\SDmap)$ is natural in $(\Smon X,\Smult)$. \end{proposition} \begin{proof}[Proof sketch] Assume $\tz{cons00}$ is an $\Smon$-algebra $2$-cell. For each $1$-cell we get by $1$-cell naturality a cocone and so we get a unique maps $\widehat g$ mapping $\Dmon A$ to $\Dmon B$ arising from $1$-cell universality. We then have \[ \tz{cons1}=\tz{cons2} \] and similarly for $g'$ and $\widehat{g'}$. By $2$-cell universality~\eqref{eq:2cellcol}, we then get: \[ \tz{cons1a} \] \end{proof} \subsection{A left semi-algebra} \label{subsec:Qlsa} We explore the properties of $\Dmon X$ by considering $1$ and $2$ dimensional aspects of trivial cocones under $\Mmap$. From the identity cocone under $\Mmap$, a unique $\Smon$-algebra map $\Ret$ arises by $1$-dimensional universality. \begin{equation} \label{eq:cons3} \tz{cons3} \qqtand \left\{ \begin{array}{l} \Ret\,\Scol=\Mmap_X \\ \Ret\,\Ccol=\id_{\Cmon X}\\ \Ret . \Dcell =\idm\Mmap \end{array}\right. \end{equation} If $\tz{cons00}$ is an $\Smon$-algebra $2$-cell, then by $2$-dimensional universality $\Ret$ we will get: \[ \tz{cons30} \]. \\ From the $2$-cells $\idm\Ccol:\Ccol=\Ccol$ and $\Dcell:\Ccol\,\Mmap \Rightarrow\Scol$, arises a unique $\Algs\Smon$ $2$-cell $\Icell:\Ccol\,\Ret\Rightarrow \id_{\Dmon X}$ \sut \begin{equation*} \tz{cons4}\qtand \tz{cons5} \end{equation*} Denote $\comon=\Ccol\,\Ret$. Then $\Dmon X$ is a $\Smon$-algebra and $\comon=\comon^2:\Dmon X\to\Dmon X$ and $\Icell:\comon\Rightarrow\id_{\Dmon X}$ equips $\Dmon X$ with the structure of a strictly idempotent comonad natural in $\Algs\Smon$ as $\Icell .\Ccol =\idm\Ccol$, $\Icell .\Scol =\Dcell$, and thus $\Ret .\Icell = \idm\Ccol$. We apply Proposition~\ref{prop:comonlsa} and get \begin{proposition}\label{prop:Dstruct1} $\Smon\Dmon X\xrightarrow \SDmap \Dmon X \xrightarrow \Ret \Cmon X\xrightarrow \Ccol \Dmon X$ with $\Icell:\Ccol\Ret\SDmap\Sunit=\Ccol\Ret\Rightarrow \id_{\Dmon X}$ is a left-semi $\Smon$-algebra. \end{proposition} \begin{proposition}\label{prop:Dstruct2} Assume $\CDmap$ denotes the map $\Cmon\Dmon X\xrightarrow{\Cmon \Ret}\Cmon^2 X\xrightarrow\Cmult\Cmon X\xrightarrow\Ccol\Dmon X$. Then $\Dmon X$ together with $\CDmap$ and $\CDmap\Cunit=\Ccol\Ret\xRightarrow\Icell \id_{\Dmon X}$ is a left-semi $\Cmon$-algebra. \end{proposition} \begin{proof}[Proof sketch] The $2$-cell property relies on $\Icell.\Ccol=\idm\Ccol$ and $\Ret.\Icell=\idm\Ret$. \end{proof} As $\Mmap$ is a map of $2$-monads, it is a left-semi monad map. We apply Proposition~\ref{prop:mapmonlsa} and get \begin{proposition}\label{prop:Dstruct3} $\Smon\Dmon X\xrightarrow{\Mmap\Dmon}\Cmon \Dmon X \xrightarrow{\Cmon\Ccol} \Cmon^2 X\xrightarrow \Cmult \Cmon X\xrightarrow\Ccol\Dmon X$ together with the $2$-cell $\Icell:\CDmap\,(\Mmap\Dmon)\Sunit=\Ccol\Ret\Rightarrow \id_{\Dmon X}$ is a left-semi $\Smon$-algebra. \end{proposition} The following is an immediate consequence of the definitions. \begin{proposition} The left-semi $\Smon$-algebras of Proposition~\ref{prop:Dstruct1} and~\ref{prop:Dstruct3} are equal. \end{proposition} Let us recap the properties of $\Dmon X$. It is equipped with an $\Smon$-algebra structure $\SDmap$ and a left-semi $\Cmon$-algebra structure $\CDmap$ whose $2$-cell $\Icell$ lies in $\Algs\Smon$ and such that the two resulting left-semi $\Smon$-algebra structures coincide. In order to prove that $\Dmon$ is a $2$-monad (Theorem~\ref{th:Qmonad}) and that these properties characterise $\Dmon$-algebras (Theorem~\ref{th:characterisation}), we encapsulate the structure in a $2$-category. Given this structure on a general object $X$, we can build a map $\Dmon X\to X$ in a sufficiently functorial way that both theorems follow. What we need is the $1$-cell and $2$-cell aspects associated to these properties. \subsection{The Structure category} \label{subsec:struc} Let us define the Structure category $\struc$ \begin{itemize} \item an object of $\struc$ consists of an object $X$ of $\cat\cK$ equipped with \begin{itemize} \item the structure $\Smon X\xrightarrow w X$ of an $\Smon$-algebra \item the structure $\Cmon X\xrightarrow z X$, $\epsilon:z\,\Cunit= \comon\Rightarrow\id_X$ of a left-semi $\Cmon$-algebra \end{itemize} such that \begin{itemize} \item $\comon$ is an endomap of the $\Smon$-algebra $\Smon X\xrightarrow w X$ and $\epsilon$ is an $\Smon$-algebra $2$-cell \item the two induced left-semi $\Smon$-algebra structures, with structure maps $\Smon X\xrightarrow w X \xrightarrow \comon X$ and $\Smon X\xrightarrow \Mmap \Cmon X\xrightarrow z X$, are equal \end{itemize} \item a map in $\struc$ between objects $X$ and $X'$ equipped as above is a map $p:X\to X'$ in $\cat\cK$ which is both an $\Smon$-algebra map and a left-semi $\Cmon$-algebra map \item a $2$-cell between two such maps $p$ and $q$ is a $2$-cell $p\Rightarrow p'$ which is both an $\Smon$-algebra and a left-semi $\Cmon$-algebra $2$-cell. \end{itemize} \begin{remark} \begin{enumerate} \item In the definition, the condition regarding the left-semi $\Smon$-algebra structures amounts to the claim that $\comon\,w=z\,\Mmap$. The equality of the $2$-cells is then automatic \item It is a consequence of the definition that $z:\Cmon X\to X$ is a map of $\Smon$-algebras. Indeed, if we consider the three following conditions, any two of them implies the third. \begin{itemize} \item $\comon$ is an endomap of $\Smon$-algebras, \item $\comon\,w=\Mmap\, z$ \item $z$ is a map of $\Smon$-algebras \end{itemize} \end{enumerate} \end{remark} \begin{proposition}\label{prop:Dstruct} $\Dmon X$ together with $\SDmap$, $\CDmap$ and $\Dcell$ is an object in $\struc$. \end{proposition} Assume $X$ together with $w$, $z$, and $\epsilon$ is an object in $\struc$. Then we define $\Dmon X\xrightarrow x X$ to be the unique $\Algs\Smon$ map arising from the colax cocone \begin{equation}\label{eq:cocone} \tz{struct1}=\tz{struct2} \end{equation} \begin{proposition}\label{prop:key} Assume $X$ together with $w$, $z$, and $\epsilon$ is an object in $\struc$ and $x$ denotes the associated map. Then $x:\Dmon X\to X$ is a map in $\struc$ which is natural in $X$. \end{proposition} \begin{proof}[Sketch proof] Assume $X'$ together with $w'$, $z'$, $\epsilon'$ in $\struc$ associated with $x'$ and $p\xRightarrow \rho q$ a $2$-cell in $\struc$. Then $\tz{struct4}$ by $2$-cell universality \end{proof} \subsection{The colimit is a monad} \label{subsec:Qmon} As $\Dmon X$ is an object in $\struc$ (Proposition~\ref{prop:Dstruct}), the induced map $\Dmon^2X\xrightarrow\Dmult \Dmon X$ is a map in $\struc$ (Proposition~\ref{prop:key}). Assume $(X,w,z,\epsilon)$ in $\struc$. Then the induced map $\Dmon X\xrightarrow x\Dmon X$ is a map in $\struc$. We apply the $1$-cell part of the naturality (Proposition~\ref{prop:key}) with $p=x$ and $x'=\Dmult$ and get \[\tz{struct5}\qquad \text{in particular, setting $x=\Dmult$}\qquad\tz{struct6}\] \begin{theorem} \label{th:Qmonad} $\Dmon$ is a $2$-monad with multiplication $\Dmult$ and unit $X\xrightarrow \Sunit \Smon X\xrightarrow \Scol \Dmon X$. \end{theorem} \begin{proposition}\label{prop:monadmap} $\Smon \xrightarrow\Scol\Dmon$ is a map of monads. \end{proposition} \begin{proof}[Proof sketch] The unit aspect is by definition of $\Dunit$. As $\Scol$ is a map of $\Smon$-algebra and $\Dmult\,\Scol=\SDmap$ by cocone equality~\eqref{eq:cocone}, we get the multiplication diagram. \end{proof} \begin{proposition}\label{prop:lsmonadmap} $\Cmon \xrightarrow\Ccol\Dmon$ is a left-semi map of monads. \end{proposition} \begin{proof}[Proof sketch] Recall that $\Ret\Ccol=\id$ and that $\Dmult\,(\Ccol\Dmon)=\CDmap$ by cocone equality~\eqref{eq:cocone}. Then, the multiplication diagram~\eqref{semialg7com} follows since $\Dmult\,(\Ccol\Dmon)\,(\Smon\Ccol)=z\,(\Smon\Ccol)=\Ccol\,\Cmult\,(\Cmon\Ret)\,(\Cmon\Ccol)=\Ccol\,\Cmult$. We define the unit $2$-cell $\gamma:\Ccol\,\Cunit\Rightarrow \Dunit$ in~\eqref{semialg5com} as \[\tz{lsunit1}\] We prove Equalities~\eqref{semialg6com}. Recall that $\Dcell=\Icell .\Scol$ and $\Icell.\Ccol=\idm\Ccol$. As $\Dmult\,(\Ccol\Dmon)=z=\Ccol\,\Cmult\,(\Cmon\Ret)$ and $h.\Dcell=h .\Icell .\Scol=\idm\Ccol .\Scol$ \[\tz{lsunit2}=\idm\Ccol.\] As $\Dmult.\Dcell=\Icell.\SDmap$ (see Equality~\eqref{eq:cocone} with $x=\Dmult$), and as $\SDmap$ is an $\Smon$-algebra $u\,(\Sunit\Dmon)=\id_{\Dmon X}$ so the second $2$-cell equality follows: $\Dmult.\Dcell.(\Sunit\Dmon)\,\Ccol=\Icell.\SDmap\,(\Sunit\Dmon)\,\Ccol=\Icell.\Ccol=\idm\Ccol$. \end{proof} \begin{theorem} \label{th:characterisation} The $2$-category $\Algs\Dmon$ of algebras of the $2$-monad $\Dmon$ is isomorphic to the Structure category. \end{theorem} \begin{proof}[Proof sketch] It remains to prove the direct implication. Assume $\Dmon X\xrightarrow x X$ is a $\Dmon$-algebra. \begin{itemize} \item Since $\Scol:\Smon\to\Dmon$ is a monad map, $w:\Smon X\xrightarrow \Scol \Dmon X \xrightarrow x X$ is an $\Smon$-algebra. \item By Propositions~\ref{prop:mapmonlsa}, since $\Ccol:\Cmon\to\Dmon$ is a left-semi monad map, $z:\Cmon X\xrightarrow \Ccol \Dmon X \xrightarrow x X$ is a left-semi $\Cmon$-algebra with $2$-cell $\Dcell$ where we denote $\comon_x= z\,\Cunit$ \begin{equation}\label{eq:carac} \tz{carac1} \end{equation} \item We know that $\Ret\,\Ccol=\Mmap$ and $\Ret\Ccol=\id_{\Dmon X}$ and $z=x\,\Ccol$ is a left-semi $\Cmon$-algebra. We deduce $\Smon X\xrightarrow w X\xrightarrow \comon_x X= \Smon X\xrightarrow \Mmap \Cmon X\xrightarrow z X$ using the following. \[\tz{carac2}\] \item We prove that $\Rcell$ is in $\Algs \Smon$. We first remark that $x.\Icell=\Rcell.x$. Indeed, by naturality of $\Cunit$ and of $\Dcell$, we have $\Dcell.\Sunit\, x=(\Dmon x).\Dcell.\Sunit$. Because $x$ is a $\Dmon$-algebra, $x.\Dcell.\Sunit\, x=x\,(\Dmon x).\Dcell.\Sunit=x\,\Dmult.\Dcell.\Sunit$ and we conclude as $\Dmult.\Dcell.\Sunit=\Icell$. Then, as $\Icell$ is an $\Smon$-algebra $2$-cell by construction and $x$ is a $\Smon$-algebra, so that $\Rcell.x$ is an $\Smon$-algebra $2$-cell. This can be represented by the lhs $2$-cell equality which results in the rhs equality by precomposition by $\Smon\Dunit$. This proves that $\Rcell$ is an $\Smon$-algebra $2$-cell. \[\tz{carac3} \qquad \qquad\tz{carac4}\] \end{itemize} \end{proof} Our analysis of the $2$-monad $\Dmon$ involved consideration of left-semi $\Cmon$-algebras. We can immediately say something about them. Suppose that $\Cmon^+$ is the result of applying our construction to the map $\unit:\monI\to\Cmon$ of monads given by the unit. By Theorem~\ref{th:characterisation}, we deduce the following. \begin{proposition}\label{prop:id} $\Algs{\Cmon^+}$ is isomorphic to $\lsalg\Cmon$ \end{proposition} So the $2$-category of left-semi $\Cmon$-algebras is in fact monadic over the base $\cat\cK$. \section{The linear-non-linear $2$-monad} \label{sec:lnl} In this section, we show how our theory applies in the case of most immediate interest to us. We take for $\Smon$ the $2$-monad for symmetric strict monoidal categories: we give a concrete presentation in Subsection~\ref{subsec:smon}. We take for $\Cmon$ the $2$-monad for categories with strict finite products: we give a concrete presentation in Subsection~\ref{subsec:cmon}. There is an evident map of monads $\Smon\to\Cmon$ and in Subsection~\ref{subsec:dmon}, we describe the $2$-monad $\Dmon$ obtained by our construction. In further work we shall develop general theory to show that this $\Dmon$ in particular extends from $\CAT$ to profunctors. This gives a notion of algebraic theory in the sense of Hyland~\cite{Hyland14} and we shall use that to handle the linear and non-linear substitutions appearing in differential lambda-calculus~\cite{er:tdlc}. \subsection{The $2$-monad for symmetric strict monoidal categories}\label{subsec:smon} For a category $A$, let $\Smon A$ be the following category. The objects are finite sequences $\seq{a_i}{i\in \nset n}$ with $n\in\N$ and $a_i\in A$. The morphisms \[\seq{a_i}{i\in \nset n}\to \seq{a'_j}{j\in \nset m}\] consist of a bijection $\sigma:\nset n\to\nset m$ (so $n$ and $m$ are equal) and for each $j\in\nset m$ a map $a_{\sigma(j)}\to a'_j$ in $A$. The identity and composition are evident. $\Smon$ extends readily to a $2$-functor on $\CAT$ and it has the structure of a $2$-monad where $\Sunit:A\to\Smon A$ takes $a$ to the singleton $\seq a{}$ and $\Smult:\Smon^2 A\to\Smon A$ acts on objects by concatenation of sequences. Each $\Smon A$ has the structure of a symmetric monaidal category: the unit is the empty sequence and tensor product is given by concatenation. One can check directly that $\Sunit:A\rightarrow \Smon A$ makes $\Smon A$ the free symmetric strict monoidal category on $A$. Moreover to equip $A$ with the structure of a symmetric strict monoidal category is to give $A$ an $\Smon$-algebra structure. Maps and $2$-cells are as expected so we identify $\Algs\Smon$ as the $2$-category of strict monoidal categories, strict monoidal functors and monoidal $2$-cells. \subsection{The $2$-monad for categories with products}\label{subsec:cmon} For a category $A$, let $\Cmon A$ be the following category. The objects are finite sequences $\seq{a_i}{i\in \nset n}$ with $n\in\N$ and $a_i\in A$. The morphisms \[\seq{a_i}{i\in \nset n}\to \seq{a'_j}{j\in \nset m}\] consist of a map $\phi:\nset m\to\nset n$ and for each $j\in\nset m$ a map $a_{\phi(j)}\to a'_j$ in $A$. The identity and composition are evident. $\Cmon$ extends readily to a $2$-functor on $\CAT$ and it has the structure of a $2$-monad where $\Cunit:A\to\Cmon A$ takes $a$ to the singleton $\seq a{}$ and $\Cmult:\Cmon^2 A\to\Cmon A$ acts on objects by concatenation of sequences. Each $\Cmon A$ has the structure of a category with strict products: the terminal object is the empty sequence and product is given by concatenation. Again, one can check directly that $\Cunit:A\rightarrow \Cmon A$ makes $\Cmon A$ the free category with strict products on $A$. Again, to equip $A$ with the structure of a category with strict products is to give $A$ a $\Cmon$-algebra structure. Maps and $2$-cells are as expected so we identify $\Algs\Cmon$ as the $2$-category of categories with strict products, functors preserving these strictly and appropriate $2$-cells. \subsection{The $2$-monad for linear-non-linear substitution}\label{subsec:dmon} There is a map $\Mmap:\Smon\to\Cmon$ which on objects takes $\seq{a_i}{i\in \nset n}\in\Smon A$ to $\seq{a_i}{i\in \nset n}\in\Cmon A$ and includes the maps in $\Smon A$ into those in $\Cmon A$ in the obvious way. It accounts for the evident fact that every category with strict products is a symmetric strict monoidal category. We describe the $2$-monad $\Dmon$ obtained from $\Mmap$ by our colimit construction. For a category $A$, $\Dmon A$ is the following category. The objects are $\seq{a_i^{\epsilon_i}}{i\in \nset n}$ with $n\in\nset n$, $a_i\in A$ and the indices $\epsilon_i$ chosen from the set $\{\Svar,\Cvar\}$ ($\Svar$ indicates linear and $\Cvar$ non-linear). For $a=\seq{a_i^{\epsilon_i}}{i\in \nset n}$, write $\Svar_a$ for $\{i\,|\, \epsilon_i=\Svar\}$. Then a morphism \[\seq{a_i}{i\in \nset n}\to \seq{a'_j}{j\in \nset m}\] is given by first a map $\phi:\nset m\to\nset n$ satisfying the condition \[\phi^{-1}(\Svar_a) \subseteq \Svar_{a'}\qtand \phi_{|\phi^{-1}(\Svar_a)}:\phi^{-1}(\Svar_a)\to\Svar_a\text{ is a bijection;} \] and secondly by for each $j\in\nset m$, a map $a_{\phi(j)}\to a'_j$ in $A$. $\Dmon$ extends readily to a $2$-functor on $\CAT$ and it has the structure of a $2$-monad as follows. The unit $\Dunit:A\to\Dmon A$ takes $a\in A$ to $\seq{a^\Svar}{}$. The multiplication $\Dmult: A\to\Dmon A$ acts by concatenating the objects and with the following behaviour on indices: objects of $\Dmon^2 A$ have shape \[\seqs{\seqs{\dots}\dots \seqs{\dots a^\epsilon\dots}^\eta\dots\seqs{\dots}}\] so that each $a\in A$ has two indices; in the concatenated string in $\Dmon A$, $a$ has index $\Svar$ just when both $\epsilon$ and $\eta$ are $\Svar$. One can now readily see the structure on $\Dmon A$ involved in its definition. \begin{itemize} \item $\Dmon A$ is clearly an $\Smon$-algebra and $\Scol:\Smon A\to\Dmon A$ sends $\seqs{a_1,\dots,a_n}$ to $\seqs{a_1^\Svar,\dots,a_n^\Svar}$ \item $\Ccol:\Cmon A\to\Dmon A$ sends $\seqs{a_1,\dots,a_n}$ to $\seqs{a_1^\Cvar,\dots, a_n^\Cvar}$ given by the identity on $\nset n$ and is evidently an $\Smon$-algebra map \item $\Dcell:\Ccol\Mmap\to\Scol$ is given for each $\seqs{a_1,\dots,a_n}\in\Smon A$ by the map $\seqs{a_1^\Cvar,\dots,a_n^\Cvar}\to\seqs{a_1^\Svar,\dots,a_n^\Svar}$ given by the identity on $\nset n$ and identities $a_i\to a_i$ for each $i$. \\ It is also easy to see $h:\Dmon A\to \Cmon A$: it sends $\seqs{a_1^{\epsilon_1},\dots,a_n^{\epsilon_n}}$ to $\seqs{a_1,\dots,a_n}$. It should now be straightforward for the reader to identify the $2$-cell $\beta$ and deduce that $\Dmult$ is just as described. \end{itemize} Now, we can use our Theorem~\ref{th:characterisation} to give a description of what a $\Dmon$-algebra is in this case. It is an object of our structure category described in Subsection~\ref{subsec:struc}. That means it is a symmetric monoidal category $X$ equipped with a strictly idempotent comonad $f:X\to X$ with $\epsilon:f\Rightarrow \id_X$ and with that structure in the $2$-category of symmetric monoidal categories and strict maps; it is such that the full subcategory of fixpoints of $f$ is equipped with the structure of a category with products; moreover the effect of tensoring objects of $X$ and then applying $f$ is equal to that of first applying $f$ and then taking the product. \subsection{Next steps}\label{nextstep} Starting from the observation that the $2$-monad $\Smon$ for strict monoidal categories and the $2$-monad $\Cmon$ for categories with strict products can be combined into a $2$-monad $\Dmon$ mixing the two related structures, we have introduced a new notion for combining $2$-monads as the colimit of a map of monads. We have proved that our construction gives rise to a $2$-monad in Theorem~\ref{th:Qmonad} and characterised its algebras in Theorem~\ref{th:characterisation}. Our next step will be to give conditions under which $\Dmon$ admits an extension to a pseudomonad on $\Prof$~\cite{fiore2018relative}. We draw attention to the following issue which we need to address. It is clear from~\cite{fiore2018relative} that the $2$-monad $\Smon$ for symmetric strict monoidal categories and $\Cmon$ for categories with strict products admit extensions to pseudomonads on $\Prof$. However, we cannot use our colimit construction at this level as we only have access to bicolimits. All the same, the characterisation of Theorem~\ref{th:characterisation} can be reworked so as to describe pseudo $\Dmon$-algebras. Then one can show that the presheaf construction has a lifting to pseudo $\Dmon$-algebras and so deduce by~\cite{fiore2018relative} the wanted extension of $\Dmon$ to $\Prof$. The extension of $\Dmon$ to $\Prof$ will give a notion of linear-non-linear multicategory which will serve as a basis for describing the substitution structure at play in differential $\lambda$-calculus~\cite{fiore05}. In parallel, we shall compare our approach to existing approaches to the combination of linearity and non-linearity which arises from Linear Logic~\cite{Benton94}. We hope to show that starting from the models of Benton~\cite{Benton94} or Blute-Cokcett-Seely~\cite{blute2006differential}, we can obtain a $\Dmon$-algebra (or at least a $\Dmon$-multicategory) which accounts for the usual practice of modelling linear-non-linear calculi. \subsection*{Related work} \subsection*{Content} In Section~\ref{sec:bg}, we first describe the background on maps of $2$-monads (Subsection~\ref{subsec:mm}), left-semi algebras (Subsection~\ref{subsec:lsa}) and colimits (Subsection~\ref{subsec:col}), needed in our main Section~\ref{sec:cons}. There we define the colimits obtained from a map of monads (Subsection~\ref{subsec:def}) and exhibit their properties (Subsection~\ref{subsec:Qlsa}). Inspired by these properties, we define what we simply call the Structure $2$-category (Subsection~\ref{subsec:struc}). We finally use (Subsection~\ref{subsec:Qmon}) the properties of the Structure $2$-category to prove, in Theorem~\ref{th:Qmonad} that the colimit is a monad; and finally we prove our main Theorem~\ref{th:characterisation} which states that the Structure $2$-category is isomorphic to the $2$-category of strict algebras over the colimit monad. We end by spelling out the construction for two examples, the first one generates the left-semi algebra $2$-category (Proposition~\ref{prop:id}) and the second, what we call the linear-non-linear monad (Section~\ref{sec:lnl}) which was the original intention for developing this theory.
1,116,691,501,322
arxiv
\section{Scientific motivation} \label{sec:fundlimits} Direct imaging of exoplanets and circumstellar dust with space and ground telescopes is challenging due the high flux ration between the central star and the object of interest (planet or disk), combined with the small angular separation between the sources. High contrast imaging (HCI) systems are designed to overcome these challenges by combining optical starlight suppression with wavefront control. At optical and near-IR wavelengths, detection with HCI systems is almost always limited by speckle noise: residual uncontrolled and uncalibrated starlight behind the coronagraph mask imposes a contrast floor below which detections become too unreliable. Differential detection techniques can be employed to mitigate speckle noise. One approach is to use either high spectral reolution signatures in the exoplanet light \cite{Wang_2017} or polarized light in starlight by circumstellar dust or exoplanet atmophseres \cite{2020AA...634A..69H}. The fraction of an exoplanet light containing polarization of spectral signatures is small, typically up to a few percent, so these differential imaging techniques, although generally quite reliable, are inefficient and poorly suited to detect the faintest sources. Another approach is to separate the speckles from real sources by angular differential imaging (ADI) \cite{2006ApJ...641..556M}, which detects the planet image thanks to the known field rotation, or spectral differential imaging (SDI) \cite{1999PASP..111..587R}, which identifies speckles through their wavelength geometrical scaling. The differential geometrical effects (rotation or scaling) that ADI and SDI rely on are proportional to angular separation, so they do not provide significant contrast gains at the smallest angular separations where some of the most valuable exoplanets lie. The underlying assumptions of PSF stationarity or wavevength-scaling are approximations, so ADI and SDI also do not perfectly remove speckle noise. A third line of research is to use the mutual coherence between starlight and speckles to identify them, by spatial \cite{2006dies.conf..553B} or temporal \cite{2004ApJ...615..562G} modulation. These promising approaches have recently seen significant progress, but remain challenging to implement on-sky, as speckles can be mis-identified as incoherent due to chromaticity and finite temporal resolution. In this paper, we explore a complementary approach, where wavefront sensing telemetry could be used to reconstruct the speckle cloud with sufficient accuracy and precision so that it can be numerically removed down to the photon noise residual. If successful, this would allow reliable detection of Earth-like habitable exoplanets orbiting sun-like stars with a 6-m space telescope without relying on extreme long-term stability of the telescope optics, or orbiting M-type stars with a ground-based 30-m aperture equiped with extreme adaptive optics. To illustrate the scientific potential of high contrast imaging at the photon noise limit, we consider examples representative of space and ground telescopes imaging habitable exoplanets: \begin{itemize} \item An Earth-like planet orbiting a Sun-like star at 10pc distance, observed by a 6-m space telescope \item An Earth-size planet orbiting in the habitable zone of a M4 type star at a 4pc distance, observed by a 30-m ground-based telescope \end{itemize} \begin{table}[ht] \caption{Observation examples} \label{tab:HCIobs} \begin{center} \begin{tabular}{|l|c|c|} \hline & Space-6m-Earth-G2 & Ground-30m-Earth-M4\\ \hline \hline Star & G2 at 10pc & M4 at 4pc \\ \hline Bolometric luminosity [$L_{Sun}$] & 1.000 & 0.0072 \\ \hline Planet orbital radius [au] & 1.0 & 0.085 \\ \hline Maximum angular separation [arcsec] & 0.1 & 0.021 \\ \hline Reflected light planet/star contrast & 1.5e-10 & 2.1e-8 \\ \hline \hline Telescope diameter [m] & 6 & 30 \\ \hline Science spectral bandwidth & 20\% & 20\% \\ \hline Central Wavelength & 797 nm (I band) & 1630 nm (H band) \\ \hline Maximum angular separation [$\lambda$/D] & 4.5 & 1.9 \\ \hline Efficiency & 20 \% & 20 \% \\ \hline Total Exposure time & 10 ksec & 10 ksec \\ \hline \hline Star brightness & $m_I = 4.04$ & $m_H = 5.65$ \\ \hline Photon flux in science band (star) & 1.06e9 ph/s & 5.62e9 ph/s \\ \hline Photon flux in science band (planet) & 0.16 ph/s & 118 ph/s \\ \hline Background zodi+exozodi [contrast] & 3.1e-10 & \\ Background starlight [contrast] & 1e-10 & 1e-5\\ Total background & 4.1e-10 & 1e-5\\ \hline Background flux in science band & 0.43 ph/s & 56200 ph/s\\ \hline \hline {\bf Photon-noise limited SNR (10 ksec)} & 20.8 & 49.7 \\ \hline {\bf Exposure time for photon-noise limited SNR=10} & 38 mn & 7 mn\\ \hline {\bf Required PSF calibration accuracy (SNR=10)} & 0.15 & 5e-3\\ \hline \end{tabular} \end{center} \end{table} Photon-noise limited sensitivity derivations are shown in table \ref{tab:HCIobs}. For the space-based observation, the background flux is dominated by the combined zodiacal and exozodiacal light components, which contribute 2.7 $\times$ more light that the planet. A SNR $=$ 10 detection requires 38 mn integration. The ground-based telescope benefits from a larger collecting area, so the planet photon rate is much larger ($>$100 ph/s), but the background is also significantly higher due to residual atmospheric turbulence, assumed here to be at the $1e5$ contrast level. A 7 mn integration allows for SNR=10 detection of the planet. In order to calibrate speckle noise to 10 $\times$ below the planet flux, the residual PSF light must be calibrated to 15\% accuracy for the space observation, and to 5e-3 accuracy for the ground observation. We discuss in this paper steps towards achieving this accuracy, which is especially challenging for the ground observation example discussed in this section. \section{Wavefront Sensor to High Contrast Image Mapping} \subsection{Self-Calibration Approach} High-contrast imaging (HCI) systems rely on wavefront sensor(s) (WFS) to measure optical aberrations. The measurements serve as the input to a wavefront control loop maintaining near-flat wavefront. WFS measurements usually serve no other purpose. Our calibration approach is to use the WFS telemetry to reconstruct the point spread function (PSF) with sufficient accuracy and precision to subtract residual starlight. These additional steps are shown in Figure \ref{fig:calibprinciple}, colored in red. \begin{figure}[ht] \centering \includegraphics[width=17cm]{figures/WFS-PSF-calib-principle.png} \vspace*{0.3cm} \caption{WFS-based PSF calibration principle. In a conventional high contrast imaging system, wavefront sensor (WFS) measurements are used to drive a wavefront control loop. We discuss in this paper how the WFS telemetry can also be used to reconstruct the point spread function (PSF) so that speckle noise can be subtracted in high contrast images (red boxes).} \label{fig:calibprinciple} \end{figure} The main challenge to this approach is that it reaquires accurate knowledge of the relationship between WFS telemetry and PSF. The relationship must be inferred either by modeling, or learned by analysis of telemetry. We explore in this paper the latter option. The WFS-to-PSF relationship is non-linear, so it is not possible to reconstruct the average PSF from an average of the WFS telemetry. For example, a rapid zero-mean oscillation of a pupil plane sine wave mode will generate a pair of focal plane speckles, even through the average wavefront state is free of aberration. This non-linearity requires the WFS-to-PSF reconstruction to be performed sufficiently fast to capture wavefront temporal variations. For a ground-based system, the WFS-to-PSF reconstruction should be performed at $\approx$ kHz speed, while it can be considerably slower on a space-based system. Reconstructed intensity PSFs may then summed to temporally match the science integration and yield a reconstructed PSF that can be subtracted from the science data. The resulting calibrated science image should then be free of speckle noise. In this scheme, detection contrat is limited by the photon noise of the speckle field, as opposed to the speckle field itself. We note that previous PSF reconstruction efforts have followed a different approach, where the long exposure PSF is estimated from the temporal variance of each wavefront mode \cite{2006AA...457..359G}. This approach does not require each WFS frame to be processed, but requires an accurate model of the WFS-to-PSF relationship, and makes several approximations that do not allow for high fidelity PSF reconstruction for high contrast imaging, namely: WFS linearity, quadratic relationship between WF and PSF intensity, and statistical independence between modes. This is not the approach explored in this paper. \subsection{Post-processing vs. closed loop control} Since, as shown in Figrue \ref{fig:calibprinciple}, the WFS telemetry serves as the input to the wavefront control (WFC) loop, we discuss here what is the value of also using WFS telemetry for PSF reconstruction. We must consider what information is contained in WFS telemetry that is valuable for PSF reconstruction but cannot be used for WFC. There are two fundamental advantages in using WFS telemetry for PSF reconstruction over WFC : \begin{itemize} \item {{\bf Correction Null Space.} WFS telemetry contains information about the WF state that is not correctable by the WFC system. For example, WFS frames may extend to higher spatial frequencies than correctable by the deformable mirror(s). WFS frames may also measure amplitude errors. Such errors are in the correction null space, but affect the PSF.} \item {{\bf Immunity to Latency and Arrow of Time for WF estimation} In a WFC loop, each DM command is based on past WFS measurements. Due to hardware latencies such as camera exposure time, the current WFS measurement is not accessible (it will only be available at a later time). Post-processing PSF reconstruction is immune from such latency.} \end{itemize} \begin{figure}[ht] \centering \includegraphics[width=17cm]{figures/WFS-PSF-calib-timetelemetry.png} \vspace*{0.3cm} \caption{Use of WFS temetry for control vs. PSF calibration. In wavefront conrol (top), only past measurements are available to estimate the current WF state. Due to latency, the current and most recent measurements are not yet available. Post-processing (bottom) does not suffer from this limitation: past, current and future measurements are available for PSF estimation.} \label{fig:arrowoftime} \end{figure} The arrow of time concept is illustated in Figure \ref{fig:arrowoftime}. In a WFC correction loop (top), the only WFS measurements available to the control algorithm are in the past, and the WFS telemetry corresponding to the current (and recent) time is not yet available. The control command relies on old and noisy estimates, yielding relatively large WF esimation errors. While predictive control algorithms may help mitigate this limitation, the correction is fundamentally blind to recent and current WF changes. In post-processing (bottom), past, current and future measurements are all available, allowing for a more accurate WF estimation free of latency. By interpolating/averaging between all meassurements, WF estimates can also average down measurement noise. In addition to the null space and arrow of time advantages, post-processing has access to a large sample set of WFS-to-PSF samples, so an accurate model of the WFS-to-PSF relationship can be infered from the telemetry. \section{On-Sky Experimental Validation of WFS-to-PSF mapping} \subsection{Solution Uniqueness} \label{ssec:mappinguniqueness} The WFS-to-PSF calibration approach requires a unique relationship from WFS measurement to PSF intensity. If such a relationship exists, and is stable over time, it can be learned from synchronized samples of WFS and PSF realizations. The first step in developing a PSF calibration process is therefore to test is the relationship exists: does WFS telemetry impose a unique PSF solution ? \begin{figure}[ht] \centering \includegraphics[width=17cm]{figures/WFS-PSF-calib-uniqueness.png} \vspace*{0.3cm} \caption{Solution uniqueness. In case (A) there is a unique mapping from WFS to PSF, while in case (B) the mapping is not unique. } \label{fig:uniqueness} \end{figure} Figure \ref{fig:uniqueness} illustrates two possible cases, and indicates how statistical tests can be engineered to test solution uniqueness. WFS measurements are shown on the left, and corresponding PSF intensity on the right. In case (A), there is a unique mapping between input and output spaces and WFS-to-PSF calibration is possible. In case (B) there is no such mapping, and WFS telemetry is insufficient to constrain the PSF. If a unique mapping exists, clusters of nearby points in the input space map to clusters on nearby points in the output space. If no such mapping exists, nearby points in the input space can map to distant points in the output space. A statistical test to validate the existence of a WFS-to-PSF calibration algorithm is to identify nearby WFS realizations and check that the corresponding output PSFs are similar. We note that this empirical test is not explicitely relying on a WFS-to-PSF reconstruction algorithm. Yet, a multidimentional mapping could be constructed by grouping input points in small clusters and measuring the corresponding output state. Each new measurement would then be matched to one of the input clusters and the corresponding solution would be read from a lookup table. In a high dimension space, this algorithms is not practically feasible and some interpolation is required. \subsection{Experimental Validation of Unique WFS-to-PSF Mapping} The first step toward an on-sky WFS-to-PSF calibration algorithm is to perform the statistical mapping test described in \S \ref{ssec:mappinguniqueness}. We used the Subaru Coronagraphic Extreme Adaptive Optics (SCExAO) system \cite{2018SPIE10703E..59L} on the Subaru Telescope for this test. The input is a pyramid WFS operating in visible light, and the output is a focal plane image at 750nm wavelength acquired with the VAMPIRES instrument \cite{2020SPIE11203E..0SN}. The test was performed in support of the PSF-sharpening iterative DrWHO algorithm where the AO control loop rewards WFS frames corresponding to sharp PSF realizations \cite{2022AA...659A.170S, 10.1117/12.2595008}. \begin{figure}[ht] \centering \includegraphics[width=17cm]{figures/WFS-PSF-calib-DrWHO.png} \vspace*{0.3cm} \caption{Solution uniqueness between WFS and PSF: on-sky validation using SCExAO's pyramid WFS (top right) and VAMPIRES's visible light imaging (top right). Adapted from the DrWHO algorithm on-sky validation \cite{2022AA...659A.170S}.} \label{fig:DrWHO} \end{figure} Results are shown in Figure \ref{fig:DrWHO}. Example WFS and PSF frames are shown at the top. We first constructed synchronized (WFS, PSF) realizations, and then compared pairs of such samples. The bottom left plot shows how euclidian distance between WFS frames compares to euclidian distance between the corresponding PSF frames. The left side of the diagram corresponds to pairs with similar WFS realizations, while the bottom part of the diagram shows similar PSFs. The distribution of points reveals that WFS-similarity implies PSF-similarity: all points to the left side of the cloud exhibit small PSF distance. The converse does not hold: PSF-similarity (bottom of the cloud) does not correspond to WFS-similarity. There are numerous points in the lower right part of the cloud. This experimental results confirms that there is a unique mapping from WFS to PSF, but also reveals there is not unique mapping from PSF to WFS. This result is consistent with expectations: the WFS provides an unambiguous measurement of WF state for the modes that are sensed. The focal plane image does not map unambiguously to WF state: for example, the signature of the focus WF mode on an otherwise perfect PSF is sign-ambiguous, as the opposite focus values yield the same image. In this experiment, the DrWHO algorithm selected high quality PSF realizations for which the focus WF mode was close to zero. In this second sample (bottom right diagram), WFS similarity is enforced by PSF similarity. \subsection{PSF Reconstruction Validation in Standard Imaging Mode} \label{sec:WFStoPSF} \begin{figure}[ht] \centering \includegraphics[width=10cm]{figures/WFSPSF_Figure.png} \vspace*{0.3cm} \caption{Prediction of the PSF from the Pyramid WFS data for an on-sky observation, using a neural network. The predicted PSF image (centre) is determined entirely from the current WFS image (left), and is seen to closely match the true PSF measured at that instant (right column). This example shows a PSF with a large amount of wavefront error (including strong coma) to provide a clear illustration.} \label{fig:wfspsf} \end{figure} Having verified that a unique mapping exists from WFS telemetry to PSF, we can attempt to reconstruct the PSF from WFS telemetry. Results of a PSF reconstruction for on-sky data are shown in Figure \ref{fig:wfspsf}. Here, a fully-connected neural network consisting of two 2000-unit layers was trained on 5 minutes of on-sky data, consisting of synchronised images from the pyramid wavefront sensor camera and VAMPIRES visible camera\cite{2015MNRAS.447.2894N} (wavelength 750~nm), running at approximately 500 Hz frame rate. The network used ReLU activation functions and dropout between each layer as a regularizer, the latter proving to be crucial for successful reconstruction. While the fully-connected network shown here provides good results, certain advantages (such as reduced parameter number and resistance to pupil alignment drift) could be expected from a convolutional neural network, which is the focus of a current study. The main features of the PSF speckle cloud are recovered by the reconstruction. The PSF core shape and bright diffraction features are accurately reproduced, but fainter speckles are less accurately reconstructed. \section{High Contrast Validation} \label{sec:specklecalib} \subsection{Self-Calibration in a Coronagraphic System} We extend here the WFS-to-PSF calibration to coronagraph systems optimized for high contrast imaging. When considering HCI systems, we refer to the input WFS as the bright field (BF) and the output speckle cloud as the drak field (DF). The self-calibration's goal is to derive the DH from BF. An ideal system for self-calibration would have a stable BF-to-DH relationship. This can be challenging with a configuration such as the one described in \S \ref{sec:WFStoPSF}, where the WFS and PSF use different optical trains and cameras. We therefore used a configuration where BF and DF are co-located on the same detector and share the same optical train. The SCExAO near-IR Lyot coronagraph was configured with speckle control to produce a deep contrast area (DF) over one side of the focal plane mask, with the opposite side of the focal plane image remaining relatively bright and serving as the input BF, as shown in Figure \ref{fig:DHcalibLDFC} top left image. The experiment is closely related to linear dark field control (LDFC) \cite{2021AA...646A.145M}, where a linear control loop uses the BF as input for wavefront control. LDFC has been demonstrate to stabilize the DH both in the laboratory \cite{2020PASP..132j4502C} and on-sky \cite{2021AA...653A..42B}. Here, we explore extending LDFC as a DH calibration algorithm, without assuming linearity. \subsection{Laboratory Validation with a Lyot Coronagraph} Results are compiled in Figure \ref{fig:DHcalibLDFC}. The average of all 60000 frames (top left) shows the BF in the lower half of the image and the DH in the top half. The intensity variance across the 60000 images is shown for each pixel of the image at the bottom left, with both readout noise and photon noise variance terms subtracted to reveal actual speckle intensity variance. A clustering algorithm is used to derive the mapping between BF and DH, as detailed in a previous publication \cite{2021SPIE11823E..18G}. Results are shown in Figure \ref{fig:DHcalibLDFC}, comparing the full dataset (left column) with one of the clusters serving as an entry in the BF-to-DH mapping table (center column). The cluster consists of 128 samples with similar BF realizations: the variance within the BF is 35.7 $\times$ smaller within the cluster than across the full dataset. The corresponding measured DH variance is 30.7 $\times$ smaller within the cluster set than across the full input dataset. This last result demonstrates that images with similar BFs also have similar DHs. {\bf Image selection using BF intensity successfully constrains DH intensity, demonstrating that a BF-to-DH calibration algorithm can be derived to calibrate residual speckles in high contrast images}. \begin{figure}[ht] \centering \includegraphics[width=16cm]{figures/DHcalib-SCExAO.png} \vspace*{0.3cm} \caption{Self-calibration of high contrast images in a half dark hole configuration on the SCExAO bench. A near-IR Lyot type coronagraph is used to block starlight. Focal plane wavefront control creates a deep contrast area above the coronagraph mask, and a relatively bright area below it. The DH is calibrated from the input sensing area (BF) below the optical axis. A clustering algorithm is employed to derive BF-to-DH mapping.} \label{fig:DHcalibLDFC} \end{figure} \subsection{Null Calibration with Photonic Nulling Interferometer: On-Sky Validation} \label{sec:GLINT} A photonic nuller is an alternative solution to the high contrast imaging challenge. Unlike a coronagraph constructed from bulk optics between which light freely propagates, the photonic nuller couples starlight into a small number of coherent singlemode waveguides. The waveguides are coherently combined to produce starlight destructive interference in null output(s). Bright starlight is directed to bright outputs which measure the intensity in input waveguides (photometry output(s)) and phase offset between input waveguides (WFS output(s)). The photonic nuller concept and its implementation are discussed in publications from the GLINT instrument team\cite{2020MNRAS.491.4180N, Martinod2021NatCo}. The photonic nuller is well-suited for self calibration : \begin{itemize} \item Starlight is coupled in a small number of coherent waveguides. At each wavelength, light into the photonic device input is fully described by phase and amplitude (and possibly polarization), so the number of dimension in the input is a few times the number of waveguides \item The relationship between input variables (phase and amplitude of each waveguide) and output intensities is entirely established within the photonic chip so it is significantly more stable than an optical train of optical components subject to relative misalignments. \end{itemize} \begin{figure}[ht] \centering \includegraphics[width=8cm]{figures/GLINT-lab.png} \includegraphics[width=8cm]{figures/GLINT-sky.png} \vspace*{0.3cm} \caption{Null calibration with the GLINT photonic nuller: Laboratory (left) and on-sky (right) demonstration. Each of the 8 vertical panel shows the GLINT detector ouput, consisting of 16 horizontal spectra (one per output of the photonic chip) ranging from 1340 nm (right edge) to 1690 nm (left edge). Images are grouped by pair, with the average image on the left and standard deviation on the right. The leftmost pair is the average (a) and standard deviation (b) of the full set of 12,796 consecutive images. The average (c) and standard deviation (d) of one of the clusters that define the BF-to-DH is shown. Blue boxes indicate the BF used for selection, and the red boxes show the DH signals. The right side of the figure shows the same data for on-sky observations.} \label{fig:GLINTcalib} \end{figure} We validated the BF-to-DH calibration approach on the GLINT instrument\cite{Martinod2021NatCo} installed on the Subaru Telescope. Figure \ref{fig:GLINTcalib} shows laboratory (left) and on-sky (right) results. Wavefront errors were added with the system's deformable mirror in the lab experiment. All datasets were aquired at 1.4 kHz frame rate. Both experiments demonstrate that the DH signal is well constrained by the BF state. In the on-sky result, the standard deviation of the DH signal (d) after calibration is below the readout and photon noise of the DH signal, indicating that the {\bf self-calibration is able to estimate the residual starlight to an accuracy below photon and readout noise}. Details of the experiments are provided in a previous publication \cite{2021SPIE11823E..18G}. The experiment validates uniqueness of the null solution for a given BF measurement, with no evidence of a measurement null space which would induce a variation in the null ouputs without a corresponding signature in the BF. \section{Conclusion and Perspectives} Self-calibration of high contrast imaging data using WFS telemetry is a promising solution to the current speckle noise limitation, potentially providing photon-noise limited detection limits on space and ground high contrast imaging systems. Preliminary on-sky experiments described in this paper are encouraging, and demonstrate that speckle noise is well constrained by WFS telemetry. The experiments were however limited in duration and did not validate that the WFS-to-PSF relationship is stable over long periods of time, which is critical to subtract speckle noise without removing planet signal. If the calibration holds, then the WFS-to-PSF mapping can be recorded on a calibration star (without a planet) and used to remove speckle noise on the science target. This demanding stability requirement will most likely be achieved by minimizing the time-variable non-common path aberrations (NCPA) between the input WFS and output science image. Our findings suggest that future space-based high contrast imaging systems could be designed with less demanding telescope wavefront stability, as speckle noise can be accurately calibrated and removed from the science images. In this regime, speckle noise at or below the natural zodi+exozodi background at $\approx$ 3e9 contrast will not affect detetion limits, and stronger residual starlight can be compensated for by longer integration time to average photon noise. The coronagraph throughput and bandpass would be optimized for sensitivity in the photon-noise regime instead of reaching contrast levels below the exoplanet level. For both space and ground systems, accurate self-calibration will require a stable or well-calibrated relationship between WFS and science data. The photonic nulling chip approach appears particularlty promising, as (1) the WFS and nulling functions are imprinted in the same physically small device, leaving little room for the WFS-to-DF relationship to change, (2) the WF state is discretized in a small number of coherent waveguides and (3) the output channels can easily be dispersed in wavelength. We have relied on an explicit BF-to-DH mapping in this study. Our experiments built the mapping by clustering input samples and averaging the corresponding output samples. This allows for inspection of the output samples corresponding to the samples of the input cluster, so that the mapping uniqueness can be verified and quantified. This "lookup table" approach does not scale efficiently to high dimensions, for which more efficient reconstruction algorithms are needed. Neural network approaches have recently been shown to address this challenge \cite{Norris2022PL,Norris2022GLINT}, and perform well on the same photonic nulling data as presented in \S \ref{sec:GLINT}. Validation over larger datasets and longer time spans will be required to confirm algorithms are suitable for the high contrast imaging application discussed in this paper. \acknowledgments This work was supported by NASA grants \#80NSSC19K0336 and \#80NSSC19K0121. This work is based on data collected at Subaru Telescope, which is operated by the National Astronomical Observatory of Japan. The authors wish to recognize and acknowledge the very significant cultural role and reverence that the summit of Maunakea has always had within the Hawaiian community. We are most fortunate to have the opportunity to conduct observations from this mountain. The authors also wish to acknowledge the critical importance of the current and recent Subaru Observatory daycrew, technicians, telescope operators, computer support, and office staff employees. Their expertise, ingenuity, and dedication is indispensable to the continued successful operation of these observatories. The development of SCExAO was supported by the Japan Society for the Promotion of Science (Grant-in-Aid for Research \#23340051, \#26220704, \#23103002, \#19H00703 \& \#19H00695), the Astrobiology Center of the National Institutes of Natural Sciences, Japan, the Mt Cuba Foundation and the director's contingency fund at Subaru Telescope. KA acknowledges support from the Heising-Simons foundation.
1,116,691,501,323
arxiv
\section{Introduction} \label{intro} Although not a new branch of research, the physics of superconductors appears the more and more to provide us with a source of experimental and theoretical challenges, calling for a better understanding of quantum phenomena. In particular, this is true for high temperature superconductors. In a recent work, we proposed the existence of a relation between the critical temperature of superconduction and the energy distribution of the geometric structure, i.e. the geometry of the superconductor intended in the sense of General Relativity~\cite{spc-gregori}. The theoretical ground of this idea is that, at a more fundamental level than the one of quantum electrodynamics, quantization of the geometry of space cannot be neglected. Its effects, that can be generally referred to as "quantum gravity", do not simply involve quantization of the propagating gravitational field (i.e. roughly speaking treating the graviton in a similar way to the photon), but imply quantization of the geometry of space itself. As a result, one obtains an effective dependence of the quantum delocalization, or, if one prefers, of the Planck constant as it enters in the Uncertainty Principle, on the geometry of space. Taking this into account, we were able to compute several critical temperatures of high temperature superconductors, as a pure theoretical prediction from the analysis of their lattice structure. In this note, we want to consider some other important aspects of the physics of high temperature superconductors, which are the object of (controversial) experimental investigation, namely the detection of some kind of gravity screening produced by rotating superconductors, or gravity-like impulse produced by a superconductor undergoing an electric discharge \cite{Podkletnov1997}, \cite{Podkletnov:2001gr}, \cite{PodkletnovModanese2002}. Attempts to explain these effects in the light of a quantum gravity theoretical framework have been proposed in~\cite{Modanese:1995tx}, \cite{Modanese:1996zm}, \cite{ModaneseJunker2009}. Here we want to discuss how all these effects are a natural prediction of the theoretical scenario described in~\cite{assiom}, updated in~\cite{assiom-2011}, namely the same theoretical framework in which critical temperatures have been investigated in \cite{spc-gregori}. This discussion will allow us to point out also some important aspects of the quantum gravity scenario implied in this framework, in particular about how all forces, and the equivalence principle at the ground of General Relativity itself, arise from an entropic principle. \section{A short summary of the theoretical set up} \label{setup} In Refs.~\cite{assiom-2011} we have introduced a physical scenario describing a universe ruled by a dynamics which embeds quantum mechanics and general relativity, to which it reduces under appropriate conditions. The basic idea is that the universe is not a particular configuration of whatever kind, among those predicted within the phase space of a certain theory, but the whole of all possible codes of information, time-ordered according to the inclusion of sets, that we interpret as configurations of energy distributed along the space. In some sense, this can be viewed as the generalization of the idea of the Feynman path integral, according to which the time evolution of a quantum system is given by the weighted sum over all trajectories~\footnote{For a discussion of the relation to the Feynman path integral, see~\cite{assiom-2011}, section~6.4.}. Let us here briefly summarize the main lines of the set up. Consider the set $\Phi = \{ \psi (N) \}$, $N \in {\cal N}$, of all the distributions of an amount $N$ of energy units along a target vector space, of any possible dimension, for any $N$. The subsets $\Phi (N) \equiv \{ \psi (N) \}$ have a natural ordering through $N$, because $\forall \psi(N) \in \Phi (N)$ $ \exists \psi (M) \in \Phi (M)$, $M > N$, such that $\psi (N) \subset \psi (M)$. $N$ can therefore be identified with the "time". The partition function of the universe at time ${\cal T} = N$ is: \be {\cal Z}_{N} = \sum_{\psi(N)} {\rm e}^{S(\psi)} \, , \label{zsum} \ee where $W(\psi) \equiv \exp S(\psi)$ is the weight of a configuration $\psi$ in the phase space, $S$ being as usual the entropy. $N$ not only works as "time", but is also the total energy of the universe at time $N$, implying the identification $E = {\cal T}$. The distributions of energy units in a discrete vector space of cells can be viewed as assignments of "geometry" given by occupation codes (binary codes, of the type full/empty), and therefore seen as corresponding to codes of information. Through this correspondence, one can see that different geometries correspond to different discrete groups of symmetry. The classification of these spaces amounts therefore to a classification of all possible discrete (and in general not simple) groups. This correspondence allows to make the key observation that different codes of information, i.e. different distributions of energy, correspond to different groups of symmetry, and therefore have a different weight in the phase space of all the configurations, the latter being related to the volume of the corresponding group. Expression~\ref{zsum} says that the universe looks the most like the geometry which is realized in the highest number of equivalent ways, i.e. the one which has the highest weight, or the highest entropy. At any energy, and time, $E \sim {\cal T}$, the dominant configuration implied by \ref{zsum} is a three-sphere of radius $R \sim {\cal T}$, i.e. a universe predominantly consisting of a space with three dimensions, and the curvature of a three-sphere $\sim 1 / {\cal T}^2$. Its weight $W$ in the phase space $\Phi$ is proportional to $\exp N^2$. The entropy is therefore $S \sim N^2 \sim {\cal T}^2 \sim R^2$. The dominant configuration can be viewed as the one describing the "classical" part of the geometry of the universe. The contribution to any mean value as due to the configurations different from the dominant one amounts to a "smearing" of the classical value of the order of the Heisenberg Uncertainty. Quantum mechanics arises in this framework as a way of implementing this undefinedness, and the uncertainty principle expresses the fact that any observable in the three-dimensional world is indeed just the average configuration of something which receives contribution from any configuration, in any dimension. The physical world is only in the average three-dimensional, and beyond a certain degree of accuracy the physical quantities and any degree of freedom as is characterized in three dimensions cannot not only be measured, but not even be defined. The probabilistic interpretation introduced in quantum mechanics is a way to deal with the undefinability of any measurable quantity beyond a certain accuracy, by embedding it in a consistent mathematical-theoretical framework. This enables making computations, and predictions up to a certain degree of accuracy, while keeping under control the "unknown". \section{Forces from entropy} \label{forcentropic} In this framework, the universe is a staple of configurations, and what we call dynamics, and usually describe in terms of forces and interactions, is a parametrization of the changes which occur in the geometry produced by this staple of configurations. Our distinction of interactions and forces into classical and quantum mechanical ones regards the level at which we want to consider the physical world, i.e. the degree of approximation we introduce when giving our description of physical phenomena, and which depends on what kind of configurations we decide to neglect. Roughly speaking, classical physics basically corresponds to considering just the most entropic configurations. Since configurations remote in the phase space have weights which are exponentially suppressed as compared to the most entropic ones, with an abuse of language we often indicate classical values as "mean" values. The most entropic of all the configurations describes a sphere in three dimensions., i.e. a completely homogeneous distribution of energy. Were this the only configuration of the universe, the only force existing would be a gravitational force acting in a way to distribute homogeneously energy along this space. However, the contribution of less entropic, and correspondingly less homogeneous, configurations is responsible for the formation of "clusters". It is thanks to this that we can perceive gravity as a force which is locally attractive, i.e. which acts in a way to break the complete homogeneousness of the energy distribution. Including the more and more configurations in our approximation of the universe, up to virtually all the configurations, leads to a quantum mechanical description of physics. This includes also other types of interactions besides gravity, introduced in order to parametrize a more intricate structure in which certain types of energy clusters, that we call particles, do not simply attract each other, but interact in a more complicated way. In this context, what we call gravitational attraction is a way of theoretically parametrize in terms of forces the fact that two objects tend to go closer to each other, because in this way the entropy of the space around each one, corresponding to the entropy of its energy distribution, gets increased by an amount corresponding to the entropy of the energy distribution of the other object. Of course, in the sum over all configurations \ref{zsum}, also other motions are included. Indeed, \emph{all} possible motions are counted. However, they give an exponentially suppressed contribution to whatever mean value of observable quantity, so that it makes sense, at least approximately, to speak of "motion tending to increase entropy" in classical terms. Indeed, in this theoretical framework \emph{all} forces, not just the gravitational one, are entropic. \section{Effective $\hbar$ and quantum delocalization} In the traditional approach to quantum mechanics, the Planck constant sets by convention the size of the quantum uncertainty, namely, the "normalization" of the Heisenberg uncertainty relations. On the other hand, in this way it naturally sets also a unit of conversion from momentum to space, or from energy to time. In our framework, the unit of energy/time conversion is a quantity \emph{independent} on the scale of quantum delocalization. In our case, the "canonical" expression of the uncertainty relation: \be \Delta E \Delta t \geq {\hbar \over 2} \, , \label{DeDtC} \ee encodes the fuzziness in mean values as given by the contribution of all the configurations \emph{out of the most entropic one}, which corresponds to a universe with the ground, classical curvature $\sim 1 / {\cal T}^2$. In this framework, empty space does not exist: the minimal curvature is the average curvature of the three-sphere geometry of the universe at age ${\cal T}$, $1 / {\cal T}^2$ is what corresponds to the curvature of the "empty space". One can view this as the condition which corresponds to considering only the cosmological term of the Einstein's equations, neglecting all other contributions, that come from the local distribution of masses and energies of particles and fields. \ref{DeDtC} is therefore also the uncertainty relation ideally ruling the behavior of what in usual terms is the "free" electron, or the free electromagnetic field, indeed conditions in some cases well approximated, but rigorously never realized in practice. When the physical system presents a more complicated geometry, this approximation is no more valid, and the relation~\ref{DeDtC} must be corrected. The reason is that the configuration describing the system is in this case a superposition of configurations with an average entropy lower than the highest one, the one of the "vacuum" of the universe, the three-sphere; the relative weight of less entropic configurations compared to the one of the system is therefore higher (see sections~2.7 and 3 of \cite{assiom-2011}). As a consequence, geometrically remote systems are more quantum-delocalized. A typical case of physical system in which the higher amount of quantum delocalization shows out in a clearly detectable way is the one of superconductors. As discussed in Ref.~\cite{assiom-2011}, in the approximation of neglecting configurations extremely delocalized, and therefore also very remote in the phase space, one can factor out from the expression of the weights in the phase space overall space volume factors, which can be considered to be the same for all the configurations, and consider the contribution to the weight due just to the internal symmetry of a configuration. Under this approximation, the entropy of the configuration corresponding to the three-sphere scales as $S_0 \sim {\cal T}^2$. Factoring out common volume factors, and approximating sums with integrals, one can therefore write an effective partition function of the universe in the form: \be {\cal Z}_{\rm eff} \sim \int_{S = S_0}^0 d S \, {\rm e}^{S} \, . \label{Zeff} \ee Isolating the dominant term in the integrand, $\exp S_0$, one obtains a correction of the order $\sim (1 / S_0) \exp S_0$. Remembering that $S_0 \sim {\cal T}^2$, and identifying ${\cal T}$ with the duration $\Delta t$ of the experiment of measurement, in this case corresponding to the existence of the universe itself, one obtains that, during the interval $\Delta t$, the correction to the energy of the universe $E \sim \Delta t$, is of the order of: \be \Delta E ~ \sim ~ E \, \Delta {\cal Z}_{\rm eff} \; \approx \; {\Delta t \over (\Delta t)^2} ~ = ~ {1 \over \Delta t} \, . \label{DeDtG} \ee This equality is the lower bound of the ordinary form of the Heisenberg Uncertainty. Indeed, the quantum uncertainty comes from an effective linearization operated on the phase space when isolating a configuration with highest entropy: the uncertainty is treated as a perturbation of this configuration. This leads to a dependence of the amount of delocalization on the entropy, rather than on the weight itself of the configuration in the phase space. Let us now consider cases different from the measurement of the energy of the universe itself: let us consider local experiments. A local experiment corresponds to a subset of the universe. In this case, the classical shape around which to expand in order to find the quantum corrections is a superposition of geometries, and one should better speak in terms of "mean" entropy and average geometry $\overline{S}$ (see~\cite{spc-gregori} for a detailed discussion). In any case, in general $\overline{S} < S_0 \sim (\Delta t)^2$, and expression~\ref{DeDtG} becomes: \be \Delta \overline{E} ~ \sim ~ {\Delta t \over \overline{S}} \; > \, {1 \over \Delta t} \, . \ee Indeed, one obtains: \be \Delta \overline{E} ~ \sim ~ \left( {\overline{S} \over S_0} \right)^{-1} \times {1 \over \Delta t} \, . \ee The delocalizations of two systems with different entropy stay therefore in ratio: \be {\Delta t_i \over \Delta t_j} \, = \, {\Delta X_i \over \Delta X_j} ~ = ~ {\overline{S}_j \over \overline{S}_i} \, . \label{DiDj} \ee On the other hand, since what we are considering are subsets of the whole universe, involving an amount of energy, and entropy, much smaller than the one of the universe, $S({\rm experiment}) \ll S({\rm environment})$, we can decompose the weight of the whole configuration of the universe which includes our experiment as: \ba W & = & W({\rm environment}) \times W({\rm experiment}) ~ = ~ {\rm e}^{S({\rm env.}) \, + \, S({\rm exp.})} \nn \\ & \approx & W({\rm env.}) \times \left(1 + {S({\rm exp.}) \over S({\rm env.}) } \right) \, + \, {\cal O}\left[ \left( {S({\rm exp.}) \over S({\rm env.}) } \right)^2 \right] \, . \label{Wee} \ea Isolating the system corresponding to the experiment corresponds to factoring out the contribution of the environment, i.e. discarding the first term in the r.h.s. of \ref{Wee}. This allows to approximate the ratio of the delocalizations of different systems as: \be {\Delta_i \over \Delta_j } ~ = ~ {S_j \over S_i} ~ \approx ~ {W_j ({\rm exp.}) \over W_i ({\rm exp.})} \, . \label{DDWW} \ee On the other hand, as described in~\cite{spc-gregori} the ratio of the weights of two configurations can be written as the ratio of the volumes of the symmetry group of the energy distributions representing the two configurations. Thanks to the factorization \ref{Wee}, this property transfers also to the subgroups of the symmetry of the whole configuration which correspond to the experiment, $G_i$ and $G_j$. The ratio \ref{DDWW} can therefore be written as: \be {\Delta_i \over \Delta_j } ~ = ~ { ||G_j|| \over ||G_i||} \, . \label{DDGG} \ee This holds both for the weight of single configurations, and for the mean weight in the case one deals with a superposition of configurations, as is the case of concrete local experiments. If the concept of mean weight in the case of a superposition of configurations doesn't sound something unnatural, it is less obvious that one can speak of symmetry groups for a superposition. However, as discussed in~\cite{spc-gregori}, any mean weight can be approximated with the weight of a discrete group, which is then uniquely identified. This can be assumed to be the weight entering in expressions like~\ref{DDGG}. Expression~\ref{DDGG} says that the higher is the degree of inhomogeneity of the physical configuration, the more its description enters into a deep quantum regime. In this theoretical framework, the Uncertainty Principle parametrizes in the form of a bound on the fuzziness of observables the fact of being the world a staple of configurations. However, the degree of fuzziness (or quantum delocalization) depends on how remote is the configuration one wants to consider. According to this interpretation, traditional quantum mechanics considers just the "first level" of quantum delocalization, the one in which the details of the geometry of the energy distribution of a microscopic system are neglected. It is therefore not adequate for the description of systems in which this approximation is not valid. Taking into account the effects due to the geometry of space leads us to a regime of quantum geometry. We can call this a regime of quantum gravity, provided we intend that this does not simply mean the description of gravitational interactions in terms of the propagation of gravitons, quantized in a way possibly similar to, or reminiscent of, the quantization of the electromagnetic field in terms of photons. Quantization of the geometry means, and implies, much more. In particular, it implies that the degree of quantum delocalization of wavefunctions depends on the geometry. One can view this as the result of the fact that quantization of geometry roughly means the introduction of a metric $g_{\mu \nu}$ which depends on $\hbar$; considering instead as a renormalization prescription the geometry of a system to be fixed, and thereby inverting the relation $g_{\mu \nu} = g_{\mu \nu}(\hbar)$, this equivalently means the introduction of an effective metric-dependent Planck constant: $\hbar = \hbar(g_{\mu \nu})$. \section{High temperature superconductors} \label{HTSC} As discussed in~\cite{spc-gregori}, considering this effect allows to justify higher critical temperatures for superconductors with a complex lattice structure, and predict their dependence on the geometry of the lattice for a wide class of superconductors, on the base of the same mechanism that allows to explain superconductivity at low temperature: the formation of Cooper's pairs as in the BCS theory. Simply, the critical condition for superconductivity regime, i.e. having an appropriate number of electrons with an appropriate amount of delocalization, is attained at a higher energy because electrons need a lower localization in the space of energies and momenta to attain the same degree of delocalization in space. The ratio of the critical temperatures of two superconductors is therefore expected to be related to the ratio of their quantum delocalizations: \be {T^i_c \over T^j_c} ~ = ~ { \Delta_i \over \Delta_j } \, , \ee in turn depending, from~\ref{DiDj}, \ref{Wee} and \ref{DDWW}, on the entropy, and approximately the weight, of the respective geometries: \be {T^i_c \over T^j_c} ~ \approx ~ {W_j \over W_i} \, . \label{TiTj} \ee Comparing this with expression~\ref{DDGG} we see that~\ref{TiTj} can be written as: \be {T^i_c \over T^j_c} ~ = ~ {|| G_j || \over || G_i ||} \, . \label{TTGG} \ee In Ref.~\cite{spc-gregori} the ratio of these volumes was approximated by the ratio of the average space gradients of energy: \be {T^i_c \over T^j_c} ~ \approx ~ {\int_{a_i} |\nabla E_i | \over \int_{a_j} |\nabla E_j|} \, , \label{TTEE} \ee where $a_i$ and $a_j$ are the characteristic lengths (in general corresponding to the lattice length) of the two superconductors. Roughly speaking, this can be understood by considering that the degree of symmetry measures how smooth the energy distribution is. From expression~\ref{TTEE} it has been possible to predict the critical temperature of whole families of high temperature superconductors from the analysis of their crystalline structure, finding a remarkable agreement with the temperatures experimentally measured. \section{Black holes} A black hole has an entropy equivalent to the one of a three-sphere with radius proportional to the Schwarzschild radius. Considered as a standalone object, it is therefore a highest symmetric, highest entropic space. However, when considered as inserted in a larger universe, one can see that black holes are the most singular configurations of the three-dimensional space. The most entropic configuration of the universe is the one in which the energy units are uniformly distributed to form the geometry of a three-sphere. Configurations which contain clusters of energy are less symmetric, and therefore less entropic: we can in fact think to form energy clusters out of the most homogeneous energy distribution, by moving, one after the other, a certain amount of energy units from their initial position toward certain regions of agglomeration. While doing this, we clearly reduce the symmetry of the configuration. In order to form a subspace of the whole universe which behaves already in itself like a small universe, we must move energy units to create a bottleneck (a throttling) till we "throttle" the space in a certain point (see figure~\ref{throttle}). In this way, we obtain a configuration in which the space is factorized into a black hole times the rest of the universe. This configuration has an entropy which is at most the one of a small three-sphere (the black hole) times a three-sphere formed with the remaining energy of the universe. If the total energy is $N$, and we take out $n$ energy units to form the black hole, the weight $\exp S^{\prime}$ of the new configuration satisfies the following condition: \be {\rm e}^{S^{\prime}} ~ \lesssim ~ {\rm e}^{n^2} \times {\rm e}^{(N-n)^2} ~ \lsim ~ {\rm e}^{-N} \times {\rm e}^{N^2}\, . \ee It is therefore exponentially suppressed (at least a factor $\exp -N$) as compared to the configuration of highest entropy of the universe at energy, and time, $N$. As discussed in~\cite{assiom-2011}, configurations with this weight belong to the bunch of those so remote to describe a deeply quantum regime. Indeed, from~\ref{Zeff} one can see that the highest contribution to the mean value of the energy density in the region of space of the black hole comes from configurations with higher overall entropy, which however do not describe a black hole. The exponential suppression of the weights implies that the average energy density in the region of the black hole is lower than the critical one. This in practice means that the black hole does not exist. According to this theoretical scenario, the only black hole which can exist is the universe itself. For the sake of our present discussion what is important is however to remark that, intended as a configuration describing a \emph{subset} of the whole universe, a black hole is the less entropic one among all the possible in three dimensions, and describes a limit configuration of deep quantum regime. \section{Gedankenexperiment} For our discussion it is not so important whether black holes can really exist, apart from the universe itself, or not. We are going to treat them as exemplar cases, useful to the purpose of understanding real physical cases. What we want to see here is what happens when in the universe a region of space undergoes a transition to a deep quantum regime, that we ideally assume to be a black hole. As discussed in \cite{assiom-2011}, our theoretical scenario implies, in its classical limit, general relativity. From a macroscopic point of view, in order to investigate some large scale properties of gravity and geometry of space, not too close to the Schwarzschild horizon, we can therefore use much of what is known about the metric around a classical black hole. Let us consider observing from a point (O) an object (A) gravitationally attracted by another one, (B), that we suppose of mass much larger, so that we can neglect its motion, consider it at rest and work with masses instead than with reduced masses. What we see is that object (A) moves with an accelerated motion toward (B). Let us now suppose to shrink the radius of (B) beyond the Schwarzschild value, i.e. to the point of making of it a black hole (for the sake of our discussion, it is not important here to inquire whether this transformation is physically possible, and, in the case, in which way and as a consequence of what this can occur). Owing to the created Schwarzschild singularity in the new metric, the motion of (A) will now appear to an external observer to undergo a deceleration, because the time it takes for an object in order to reach the surface of the black hole diverges. Saying that (A) needs an infinite time to get to a point placed at a (seemingly) well defined, and finite, distance means saying that (A) gets stopped, until it is at rest, at the Schwarzschild surface. As seen from (O), the situation is therefore that some kind of force has been created, which opposes to gravity. This new force has all the properties of gravity, in the sense that it seems to act with twice the strength on objects with twice the mass of (A). It has therefore all the characteristics of a "gravity screening". The surface at the horizon of a black hole is a limit example of extremely non-classical, "quantum" geometry. However, although concerning a rather unphysical situation, this thought experiment helps us in understanding how the presence of a very low-entropic configuration of space acts on the overall geometry in a way to "create" a kind of gravity screening force, or, in dynamical cases, anti-gravity impulse. In our scenario, the dynamics is ruled by the "law" of entropy, in the sense that at any time the universe is predominantly given by the configuration of highest entropy in the phase space. Instead of speaking of "forces", "anti-forces" and similar concepts, the best way to analyze the behavior of a system in response to a certain event, is to compare entropies of configurations. In order to see whether after the creation of the black hole (A) is further attracted by (B), or starts to be repelled, we have to see whether a motion toward (B) leads to an increase of the entropy of the configuration, or to a decrease. In section~\ref{forcentropic} we have justified the gravitational attraction with the fact that, by moving toward each other, two objects reciprocally increase the entropy of their configuration, because in their phase space, besides the entropy due to the energy distribution of each single object, one adds also the entropy due to the energy of the other object: the "space density of entropy" increases. In falling toward a black hole, an object enters in a region of space which corresponds to configurations the more and more remote in the phase space. The object itself becomes the less and less classical. Its entropy decreases. When going away from the black hole, the entropy of the configuration it corresponds to instead increases. This is in our framework the origin of the "repulsion" the object appears to experience. Indeed, the true effective motion depends also on all the rest of the universe, to begin with on the closer environment. In traditional words, this translates in terms of "initial conditions", "continuity of the motion", acceleration and deceleration, gravitational attraction of other bodies, etc... In practice, the balance of entropies doesn't lead in this case to a net repulsion, but to a decelerated fall of (A) toward (B), to the point that, before reaching the horizon, i.e. even before being completely stopped in its motion, (A) looses any property of a classical object, and cannot be anymore described in the same terms as it was before. The reason of a deceleration instead of a net escape from the black hole is that, before becoming a non-classical object, the phase space of (A) still feels an increase of entropy by falling toward the black hole, due to the classical part of the energy distribution of (B) (the "gravitational field" of (B), in classical words). \section{Gravity screening and gravity impulse in high temperature superconductors} In our scenario, there is no net separation between classical and quantum mechanical regime: these two levels of the physical description are sued together in a seamless way. This in particular means that the gravitational screening occurring in the case of the black hole is not a property specific of black holes, but, in a certain amount, is expected to occur in every physical system. In general, the gravitational attraction does not depend only on the mass of the object, but is affected also by a certain amount of screening, depending on the configuration of the object: more singular (=less entropic) configurations should present a higher degree of screening. What distinguishes the situation of a so-called classical object from a quantum one is that the entropy of the classical object is very large, to the point that, in comparison, entropy variations depending on the shape are negligible. As a consequence, negligible is also the size of the gravity screening as compared to the gravitational force. Quantum systems correspond by definition to less entropic configurations, and the gravity screening is therefore comparatively higher. They are therefore good candidates for the detection of this effect. If the case of a black hole is just an ideal case, a very concrete physical case of region of space which is very remote in the phase space (i.e. much less entropic than its environment), although not so extremely remote as the surface of a black hole, is a superconductor with very complicated lattice structure, like those considered in section~\ref{HTSC}. Experiments have been carried out by Podkletnov with a rotating YBCO disc: under appropriate conditions, a loss of weight is observed in objects hanging above the superconductor. In particular, the gravity-screening effect results to be enhanced during transitions of the configuration of the superconductor. This agrees with our expectation that what matters for the production of the effect is the creation of a region with a very singular geometry, i.e. of very low entropy in the phase space. Such a condition can be attained by considering a superconductor with a high gradient of the energy distribution, like the high temperature superconductors, in a deeply quantum regime, i.e. below the critical temperature and with accelerated currents. Why does the acceleration correspond to a configuration of even lower entropy? In section~\ref{forcentropic} we said that in our framework the dynamics underlying the gravitational attraction is ruled by the "law" of highest entropy. Since the dominant configuration, the one which the most contributes to the mean value of the observables, is the one of highest entropy, at any time one produces somewhere a change leading to a decrease of the entropy, from an effective point of view what happens is that the system responds tending to counterbalance this action by changing its configuration toward an increase of entropy wherever possible. For instance, if we create a black hole, an object falling toward it decelerates because it tends to lower the decrease of entropy produced by its falling into a region of very low entropy, and so on. The equivalence principle, i.e. the equivalence of gravitational attraction and acceleration, in our framework is nothing else than the statement that producing an acceleration means forcing a system to change its entropy. In practice, this means that by inducing on it an accelerated motion, we force the system into a regime of lower entropy, to which it responds with a tendency to counterbalance this effect by increasing its entropy as in the case of the gravitational attraction. In the case the superconductor, forcing it to a configuration in which the currents and its whole lattice structure (its whole geometry) is accelerated means that we produce on it a configuration of even lower entropy, therefore even more remote in the phase space. From~\ref{TiTj}--\ref{TTEE} we see that at all the effects it is like having increased the average energy gradient. Creating a region of very low entropy produces therefore a "gravitational response" similar to the one of the black hole. Indeed, such a kind of response is to be expected for any kind of similar situation, and is not in principle related to the fact of having a superconductor: important is the creation of a region with a deeply quantum behavior. The response can occur everywhere in the environment: the only requirement is that it most efficiently compensates the decrease of entropy. The direction in which it is detected depends therefore on conditions such as the symmetries of the problem and of the environment, which state where a change of configuration is possible, and how is the best (= most entropic) way. In the case of a superconductor rotating around the $z$-axes, since on the plane of the rotation the superconductor is constrained (that means forces are balanced and forbid any deformation) the only change of the system can occur along the direction of the rotation axes, i.e. the vertical axes, orthogonal to the superconducting layers. The result is a screening of the gravitational attraction acting on objects hanging over the superconductor. As long as we neglect border effects, on the plane of the disc the layers can approximately be considered of infinite extension and the problem reduces to a one-dimensional one. The region of space expected to be affected by the screening is therefore a cylinder, a column with the same diameter of the superconductor. The range of the effect, namely how far should it be detectable, depends once again on the geometry of the physical system and its environment, i.e. on how heavy can be the changes in the geometry close to superconductor, counterbalancing the reduction of entropy on the superconductor. We expect that in the ideal case of absolutely "rigid" system, the screening can be effective even very far from the superconductor. Indeed, like in the case of the black hole, this effect is of gravitational type. As one can also see in figure~\ref{gravimpulse}, the strength of the screening depends on the amount of reduction of entropy the repulsion is aimed to compensate. When this quantity is fixed, the amount of the response \emph{at the level of change in the geometry} is fixed. This means: what is fixed is the acceleration, not its translation in terms of force. Let us see this more in detail. Accelerating the superconducting disc means making its configuration to weight less in the phase space, by an amount approximately given by the ratio of the symmetry groups after and before the acceleration: \be {W^{\prime}_{\rm SC} \over W_{\rm SC}} ~ \approx ~ {|| G^{\prime}_{\rm SC}|| \over || G_{\rm SC} ||} \, . \label{wpwSC} \ee The system superconductor+environment+probe will re-act in a way to compensate this loss of weight by a move intended to locally increase the weight somewhere else. The only action the system is free to do is to change the weight of the probe, i.e. push it away from the superconductor (see figure~\ref{gravimpulse}), in order to increase its entropy by producing a smoother (more symmetric) configuration, as the result of a decrease of the mean value of the energy gradient. Owing to the factorization of the phase space, this compensation will be obtained by changes leading to a ratio of local weights inverse to~\ref{wpwSC}: \be { W^{\prime}_{\rm probe} \over W_{\rm probe}} ~ = ~ { W_{\rm SC} \over W^{\prime}_{\rm SC}} ~ \approx ~ {|| G^{\prime}_{\rm env.+probe}|| \over || G_{\rm env.+probe} ||} ~ \approx ~ {\int | \nabla E_{\rm env. + probe}| \over \int | \nabla E^{\prime}_{\rm env.+probe} |}\, . \label{Wprobe} \ee Being the probe a rigid body, this implies a fixed change of the \emph{relative} gravitational weight of the probe, the same for any probe, as fixed by the ratio~\ref{wpwSC}. Saying that we have a fixed amount of relative change precisely means that the amount of change is fixed once it is divided by the mass of the probe. That means, what is fixed is not the force but the field strength, precisely what happens in the case of the screening produced by the sudden creation of a black hole: a gravitational screening. Of course, this is a very simplified representation of the physical situation, which is composed by many other pieces. For instance, the probe hangs from a balance, so we should include in our considerations also the balance, etc... However, a little thought about the fact that all forces are at equilibrium should convince the reader that this simplification catches the main point about what is happening. Giving a quantitative prediction of the screening effect is quite hard, but we can at least attempt to estimate a \emph{relative} effect. What we expect is that it should be relatively simple to predict the change in the strength of the gravity screening effect when substituting a superconductor with another one. The difficulty in the theoretical analysis is in the fact that, differently from the situations considered in \cite{spc-gregori}, here we are going to consider superconductors in an accelerated state. Namely, we are going to modify the entropy of their configuration, which now does no more depend roughly on just their lattice structure. Nevertheless, with a certain degree of approximation we can assume that, precisely owing to the implementation of the equivalence principle, the acceleration imposed on the superconductor is theoretically equivalent to a modification of the close environment of the superconductor, as produced by the presence of an object of large mass. This equivalence should be regarded only as a way of parametrizing the changes in the entropy of the configuration of the superconductor plus its close environment, i.e. not in order to treat the whole experiment as being subjected to a modification equivalent to introducing the presence of an object of large mass, something that would gravitationally attract also the probe. We expect it to be reasonable to write the contribution of the rotation in terms of an equivalent energy gradient term $\Delta_{\rm acc.} $, to be added to the energy gradient of the superconductor: \be \int_a | \nabla E | ~ \to ~ \int_a | \nabla E | \, + \, \Delta_{\rm acc.} \, , \ee \be \Delta_{\rm acc.} ~ \equiv ~ "\left( \int | \nabla E |_{\equiv \, {\rm acc.}} \right)" \, . \ee The weight in the phase space of the superconductor subjected to a rotational acceleration should then be: \be W^{\prime}_{\rm SC} ~ \approx ~ \propto {1 \over \int_a | \nabla E | + \Delta_{\rm acc.}} \, . \ee The ratio of the weights of two different materials subjected to the same acceleration should then be: \be {W^{\prime}_{\rm SC} \over W^{\prime}_{\rm SC^{\prime}} } ~ \approx ~ { \int_{a^{\prime}} | \nabla E |^{\prime} + \Delta_{\rm acc.} \over \int_a | \nabla E | + \Delta_{\rm acc.}} ~ = ~ {W_{\rm SC^{\prime}} \left( 1 \, + \, {\Delta_{\rm acc.} \over W_{\rm SC^{\prime}}} \right) \over W_{\rm SC} \left( 1 \, + \, {\Delta_{\rm acc.} \over W_{\rm SC}} \right) } \, . \label{WDE} \ee From \ref{Wprobe} we obtain that the weight of the probe should be: \be {W_{\rm probe} \over W^{\prime}_{\rm probe} } ~ \approx ~ \, {W^{\prime}_{\rm SC^{\prime}} \over W^{\prime}_{\rm SC} } \, . \label{WWprime} \ee $W_{\rm probe} / W^{\prime}_{\rm probe} $ is the ratio of the weights in the phase space; however, due to the particular conditions of the problem, this should effectively reflect in the ratio of the two relative losses of gravitational weights: \be {W_{\rm probe} \over W^{\prime}_{\rm probe} } ~ \approx ~ {\Delta {\rm W}_g \over \Delta {\rm W}^{\prime}_g} \, . \label{WDW} \ee From~\ref{WDW}, \ref{WWprime}, \ref{WDE} and~\ref{TiTj} we obtain therefore: \be {\Delta {\rm W}^{\prime}_g \over \Delta {\rm W}_g} ~ \approx ~ r_T \, { \left[ 1 \, + \, {\Delta_{\rm acc.} \over W_{\rm SC}} \right] \over \left[ 1 \, + \, r^{-1}_T {\Delta_{\rm acc.} \over W_{\rm SC}} \right] } \label{WgT} \, , \ee where we have set $r_T \equiv T^{\prime}_c / T_c$, the ratio of the critical temperatures of the two superconductors. Since $\Delta_{\rm acc.} > 0$ superconductors with higher critical temperature are expected to produce a larger gravitational screening. The ratio $\Delta_{\rm acc.} / W_{\rm SC}$ is not known, but it can be obtained by plugging in~\ref{WgT} experimental data from two materials; in this way, this expression allows then to obtain quantitative predictions, to be tested for any other material. \vspace{.5cm} \noindent An effect analogous to the gravity screening is the impulse that seems to be radiated by a superconductor of the same type, when it undergoes an electric discharge, as described in~\cite{Podkletnov:2001gr}, \cite{PodkletnovModanese2002}. Here the singularity of the configuration, that substitutes the accelerated motion of the superconductor, is produced by the high electric gradient created in a short time. The basic idea is however the same. To be noticed is the instantaneous character of the modification of the geometry of space. Sometimes it is a misleading idea to think that in quantum gravity modifications of the geometry are propagated/mediated by gravitons, and therefore they should obey the laws of any kind of radiation \footnote{From a classical point of view, one may say that gravitons can be considered a good approximation of the description of quantum gravity only for very weak gravitational fields. In the case of electrodynamics things work better, because the electric field is not charged, and the free photon remains a reasonable approximation in a wider range of situations.}. As pointed out is \cite{assiom-2011}, quantum mechanics is basically tachyonic. What is not tachyonic, and is bound by the speed of light, is the transfer of information. In other words, no matter of what the speed at which the quantum modification of the geometry propagates, we can only get the information about the results of the experiment at a speed no higher than that of light. \newpage \providecommand{\href}[2]{#2}\begingroup\raggedright
1,116,691,501,324
arxiv
\section{INTRODUCTION} Model based control approaches are used successfully to control complex dynamic systems \cite{pilco}, \cite{minmax}, \cite{legged}, \cite{derl}. These approaches rely on using the dynamic model of the system to compute the control law for a task in hand. In model based approaches once the controller is developed it can be utilized to perform different types of control tasks compared to model free approaches where agent has to learn a new policy for every task. The asymptotic performance of model based approaches is generally worse than model free approaches due to inaccuracies in the model \cite{psearch}, \cite{rlsurvey} to deal with this issue researchers often use model based controllers in model predictive control (MPC)\cite{IEEEexample:sysid} setting. Model free approaches require millions of samples to learn good policies \cite{tpo}. Collecting samples on a real robot operating in a highly dynamic environments can be extremely dangerous and renders model free approaches ineffective for these kind of systems. Classical model based control on real dynamic systems involve careful system identification \cite{IEEEexample:sysid} that requires considerable domain expertise and modeling of the complex dynamics of actuators, tire forces, slip etc in case of wheeled mobile robots. These constraints make the model free and classical model based control hard and time consuming for real robotic systems. In this work we use multilayer neural networks to learn the dynamic model of different types of wheeled robot and use ILQR \cite{IEEEexample:gen_syn} as the controller. Neural networks are powerful non-linear function approximators \cite{hornik} \cite{boris} 1\cite{dsurvey} and provide an alternative approach for system identification or dynamic modeling of the system by only using the data collected from the system. ILQR uses the dynamic model of the vehicle and provides extra robustness on top of kinematic controllers that cannot deal with the dynamic constraint of the vehicle. In section-\ref{rwork} we discuss some of the past research on learning the dynamic model of the system and then discuss some of the model based control approaches which use ILQR and are closely related to the work presented in this paper. In section-\ref{approach} we discuss our approach and in section-\ref{results} present the results of trajectory tracking on both Warthog and the Polaris GEM e6 for various reference trajectories . \begin{figure}[h] \centering{ \resizebox{75mm}{!}{\input{figures/warthog.eps_tex}} \caption{Warthog} \label{fig:warthog} } \end{figure} \begin{figure}[h] \centering{ \vspace*{1cm} \resizebox{75mm}{!}{\input{figures/golfcart.eps_tex}} \caption{Polaris GEM e6} \label{fig:golfcart} } \end{figure} \section{Related Work}\label{rwork} In this section we discuss some of the past research on learning non-linear and stochastic dynamic systems. Then we discuss previous work on using ILQR to control different kinds of robots. Finally we discuss some of the past research which combine ILQR and learned model to control a robotic system. \subsection{Model Learning} \cite{IEEEexample:dnn} and \cite{IEEEexample:dnn2} present first usage of neural networks for identification and control of non-linear dynamical systems. \cite{IEEEexample:RBFNN} uses Radial Basis Function Neural Network (RBFNN) to model non-linear stochastic dynamic system. \cite{IEEEexample:MPC} uses neural network to model the non-linear dynamics of a neutralization plant and uses this model to control the pH-value. Gaussian processes (GP) are used to model low dimensional stochastic dynamic systems and preferred over neural networks when only few data-points are available \cite{IEEEexample:gpmpc,IEEEexample:gprl2, IEEEexample:localgp, IEEEexample:gpquad, IEEEexample:gpdataeff, IEEEexample:gprldataeff}. \cite{IEEEexample:pnn} uses probabilistic neural network to model high dimensional stochastic dynamic systems using significantly fewer samples. Past research on model learning has focused on learning the dynamic model of manipulators \cite{IEEEexample:gpdataeff}, UAVs \cite{IEEEexample:gpquad}, \cite{IEEEexample:gprl2} or robots in simulation\cite{IEEEexample:gpmpc}, \cite{IEEEexample:gprldataeff}. In this work we use neural networks to learn the dynamic model of off-road and on-road vehicles and validate the learned model by integrating it with a controller for trajectory tracking. \subsection{ILQR based controllers} ILQR is a control analog of the Gauss-Newton method for nonlinear least squares optimization and is a variant of Differential Dynamic Programming (DDP) \cite{IEEEexample:ddprog}. DDP uses second order derivatives of the dynamic model while ILQR uses first order derivatives to speed up the computation. ILQR is usually used in an MPC setting where faster dynamics evaluation is more important than the decrease in performance due to inaccurate dynamics approximation \cite{IEEEexample:gen_syn}. \cite{IEEEexample:ilqr} shows the first use of ILQR to control non-linear biological movement systems. The authors of \cite{IEEEexample:ilqr} extend their work in \cite{IEEEexample:gen_ilqr} and develop ILQG for constrained nonlinear stochastic systems with gaussian noise. \cite{IEEEexample:gen_syn} uses ILQR in MPC settings to deal with model imperfections to control a 22-DoF humanoid in simulation. \cite{IEEEexample:ext_ilqr} and \cite{IEEEexample:smooth_ilqr} introduce the concept of ILQR smoothing for non-linear dynamic systems with non-quadratic cost function. ILQR smoothing converges faster taking only about third of the number of iterations required by other existing ILQR approaches. \cite{IEEEexample:control_ddp} introduces control constraint in a DDP \cite{IEEEexample:ddprog} setting, previous approaches enforced the control constraint by clamping on control limits, \cite{IEEEexample:control_ddp} demonstrates that the naive clamping methods are inefficient and proposed an algorithm which solves a quadratic programming problem subject to box constraints at each time step. \cite{IEEEexample:control_ddp} validates the proposed method on three simulated problems including the 36-DoF HRP-2 robot. \cite{IEEEexample:constrained_ilqr} generalizes the control constrained used in \cite{IEEEexample:control_ddp} and presents an approach that can deal with the complex constraints of the general on-road autonomous driving. \cite{IEEEexample:constrained_ilqr} validates the approach in simulation for different on road driving scenarios like obstacle avoidance, lane change, car following and on general driving which combines all of these different scenarios. \section{Model Based Control Using ILQR}\label{approach} In this section we discuss our approach to learn the dynamic model of both Warthog and the Polaris GEM e6 using multilayer neural networks. Warthog is an off-road robot capable of climbing hills, moving through dense shrubs, rocky terrain and shallow water bodies with maximum speed up to 4.5 m/s. Polaris GEM e6 is a six seater non holonomic vehicle with maximum speed up to 10 m/s. Both vehicles are equipped with a VectorNav-300 GPS for localization. After we discuss the dynamic modeling of these vehicles we presenth the ILQR controller for Model based control in algorithm-\ref{algo1}. Finally we define the trajectory tracking problem and discuss our approach of using ILQR and the learned model to track a reference trajectory. \subsection{Neural Network based Dynamic Model}\label{nnmodel} Let $\mathbf{x}_t \in \mathbb{R}^n$ denote the state and $\mathbf{u}_t \in \mathbb{R}^m $ denote the control commands of a system at discrete time instant $t$ (henceforth referred to as time $t$). The dynamics of the system can be given as follows: \begin{align}\label{eq:dmodel} \mathbf{x}_{t+1} = f(\mathbf{x}_t, \mathbf{u}_t) \end{align} For Warthog the state of the system $\mathbf{x}_t \in \mathbb{R}^2$ is given by $(v_t, \omega_t)$ where $v_t$ is the linear velocity and $\omega_t$ is the angular velocity of the Warthog at time $t$. Control command $\mathbf{u}_t \in \mathbb{R}^2 $ is given by $(v_t^c, \omega_t^c)$ where $v_t^c$ and $\omega_t^c$ are the commanded linear and angular velocities respectively at time $t$. The dynamic function $f_w$ for the warthog can now be given as follows: \begin{align}\label{eq:wmodel} \left[\begin{array}{c} v_{t+1} \\ \omega_{t+1} \end{array}\right] = f^w\left(\left[\begin{array}{c} v_{t} \\ \omega_t\end{array} \right], \left[\begin{array}{c} v_{t}^c \\ \omega_t^c\end{array} \right]\right) \end{align} For Polaris GEM e6 the state $\mathbf{x}_t \in \mathbb{R}^2$ is given by $(v_t, \dot{\phi}_t)$ where $v_t$ is the linear velocity and $\dot{\phi}_t$ is the steering angle rate at time $t$. The control $\mathbf{u}_t \in \mathbb{R}^3$ is given by $(p_t, b_t, \dot{\phi}_t^c)$ where $p_t$ is the throttle, $b_t$ is the brake and $\dot{\phi}_t^c$ is the commanded steering rate at time $t$. The dynamic function $f_g$ for the Polaris GEM e6 can now be given as: \begin{align}\label{eq:gmodel} \left[\begin{array}{c} v_{t+1} \\ \dot{\phi}_{t+1} \end{array}\right] = f^g\left(\left[\begin{array}{c} v_{t} \\ \dot{\phi}_t\end{array} \right], \left[\begin{array}{c} p_{t} \\ b_t \\ \dot{\phi}_t^c\end{array} \right]\right) \end{align} We collected the data $(\mathbf{x}_{t+1}, \mathbf{x}_t, \mathbf{u}_t)$ for both Warthog and Polaris GEM e6 by manually driving them using joystick for an hour in on-road and off-road environments. Driving time is decided using trail and error by observing the traning and validation losses during training process. The data is sampled at 20Hz for the warthog and 30Hz for the Polaris GEM e6 which are fixed hardware specificatons for these platforms. $\mathbf{x}_t, \mathbf{u}_t$ is used as inputs and $\mathbf{x}_{t+1}$ is used as output to a neural network that learns the dynamic function $f$ by minimizing the mean squared error (MSE) between the predicted output state $\overline{\mathbf{x}}_{t+1}$ and observed output state $\mathbf{x}_{t+1}$. Two different neural networks are used to learn $f_w$ and $f_g$. We whiten the data before we feed it to the input layer of the networks. We experimented with multiple architectures and empirically found that a fully connected neural networks with two hidden layers having 64 units each with ReLU activation function performs very well with our controller. \subsection{ILQR Controller} \label{controller} Consider a non-linear discrete dynamic system: \begin{align}\label{eq:model} \mathbf{x}_{t+1} = f(\mathbf{x}_t,\mathbf{u}_t) \end{align} Where $\mathbf{x}_t \in \mathbb{R}^n$ is the state of the system and $\mathbf{u}_t \in \mathbb{R}^m$ is the control input at time $t$. The cost $J_i(\mathbf{x}, \mathbf{U}_i)$ represents the cost incurred by the system starting from state $\mathbf{x}$ and following the control $\mathbf{U_i}$ thereafter. \begin{align}\label{eq:cost} J_i(\mathbf{x}, \mathbf{U}_i)= \sum_{j=i}^{N-1} l(\mathbf{x}_j, \mathbf{u}_j) + l_f(\mathbf{x}_N) \end{align} $l(\mathbf{x}_j, \mathbf{u}_j)$ is the cost of executing control $\mathbf{u}_j$ in state $\mathbf{x}_j$ and $l_f(\mathbf{x}_N)$ is the final cost of sate $\mathbf{x}_N$. We want to find the optimal control $\mathbf{U}_0^{*}(\mathbf{x})$ that minimizes the total cost $J_0(\mathbf{x}, \mathbf{U}_0)$. \begin{algorithm}[h] \caption{ILQR algorithm} \label{algo1} \begin{algorithmic}[1] \Function{backward\_pass}{$l,l_f, f, T$} \State $V_\mathbf{x} \gets l_{f,\mathbf{x}}(\mathbf{x}_n)$ \State $V_{\mathbf{x},\mathbf{x}} \gets l_{f,\mathbf{x} \mathbf{x}}(\mathbf{x}_n)$ \State $k \gets [] , K \gets []$ \For{$i \gets n-1 \textrm{ to } 1$} \State $Q_\mathbf{x} \gets l_\mathbf{x}|_{\mathbf{x}_i} + (f_\mathbf{x}^T V_{\mathbf{x}})|_{\mathbf{x}_i}$ \State $Q_\mathbf{u} \gets l_\mathbf{u}|_{\mathbf{x}_i} + (f_\mathbf{u}^T V_{\mathbf{x}})|_{\mathbf{u}_i,\mathbf{x}_i}$ \State $Q_{\mathbf{x}\mathbf{x}} \gets l_{\mathbf{x}\mathbf{x}}|_{\mathbf{x}_i} +(f_x^T V_{\mathbf{x}\mathbf{x}}f_x) |_{\mathbf{x}_i} $ \State $Q_{\mathbf{u}\mathbf{u}}\gets l_{\mathbf{u}\mathbf{u}}|_{\mathbf{u}_i} +(f_u^T V_{\mathbf{x}\mathbf{x}}f_u) |_{\mathbf{u}_i,\mathbf{x}_i,\mathbf{u}_i} $ \State $Q_{\mathbf{u}\mathbf{x}}\gets l_{\mathbf{u}\mathbf{x}}|_{\mathbf{u}_i,\mathbf{x}_i} +(f_u^T V_{\mathbf{x}\mathbf{x}}f_x) |_{\mathbf{u}_i,\mathbf{x}_i,\mathbf{x}_i} $ \State $\widetilde{Q}_\mathbf{u} \gets l_\mathbf{u}|_{\mathbf{x}_i} + (f_\mathbf{u}^T (V_{\mathbf{x}} + \mu \mathbf{I}_n))|_{\mathbf{u}_i,\mathbf{x}_i}$ \State $\widetilde{Q}_{\mathbf{u}\mathbf{u}}\gets l_{\mathbf{u}\mathbf{u}}|_{\mathbf{u}_i} +(f_u^T (V_{\mathbf{x}\mathbf{x}}+\mu \mathbf{I}_n)f_u) |_{\mathbf{u}_i,\mathbf{x}_i,\mathbf{u}_i} $ \State $\widetilde{Q}_{\mathbf{u}\mathbf{x}}\gets l_{\mathbf{u}\mathbf{x}}|_{\mathbf{u}_i,\mathbf{x}_i} +(f_u^T (V_{\mathbf{x}\mathbf{x}}+\mu \mathbf{I}_n)f_x) _{\mathbf{u}_i,\mathbf{x}_i,\mathbf{x}_i} $ \State $k[i] \gets -\widetilde{Q}_{\mathbf{u}\mathbf{u}}^{-1}\widetilde{Q}_\mathbf{u}$ \State $K[i] \gets -\widetilde{Q}_{\mathbf{u}\mathbf{u}}^{-1}\widetilde{Q}_{\mathbf{u}\mathbf{x}}$ \State $V_\mathbf{x} \gets Q_\mathbf{x} + K^TQ_{\mathbf{u}\mathbf{u}}k + K^TQ_\mathbf{u} + Q_{\mathbf{u}\mathbf{x}}^Tk$ \State $V_{\mathbf{x}\mathbf{x}} \gets Q_{\mathbf{x}\mathbf{x}}+ K^TQ_{\mathbf{u}\mathbf{u}}K + K^TQ_{\mathbf{u}\mathbf{x}} + Q_{\mathbf{u}\mathbf{x}}^TK$ \EndFor \State \Return{$K, k$} \EndFunction \Function{Forward\_pass}{$k, K, f, T$} \State $\overline{\mathbf{x}}_0 \gets \mathbf{x}_0, U \gets [], X \gets []$ \For{$i \gets 1 \textrm{ to } N-1} $ \State $\overline{\mathbf{u}}_i \gets \mathbf{u}_i + \alpha k[i] + K[i](\overline{\mathbf{x}}_i - \mathbf{x}_i)$ \State $\overline{\mathbf{x}}_{i+1} = f(\overline{\mathbf{x}}_i, \overline{\mathbf{u}}_i)$ \State $X[i] \gets \overline{\mathbf{x}}_i $ \State $U[i] \gets \textrm{CLIP}(\overline{\mathbf{u}}_i, \mathbf{u}_{min}, \mathbf{u}_{max})$ \EndFor \State $X[N] \gets \overline{\mathbf{x}}_N$ \State $T \gets \{X,U\}$ \State \Return $T$ \EndFunction \Function{ILQR}{$l, l_f, f$} \State Sample initial trajectory $T$ using model (\ref{eq:model}) for horizon $N$ \For{$j \gets 0 \textrm{ to } M$} \State $k, K \gets \textrm{ BACKWARD\_PASS}(l, l_f, f, T)$ \State $T \gets \textrm{ FORWARD\_PASS}(k, K, f, T)$ \EndFor \State \Return $T$ \EndFunction \end{algorithmic} \end{algorithm} The Pseudo code for ILQR\cite{IEEEexample:ddprog} is given in algorithm-\ref{algo1}. \cite{IEEEexample:ddprog} gives detail discirption of the algorithm which is avoided here due to space constraints. The parameter $\mu$ is the Levenberg-Merquardt parameter and $\alpha$ is tuned using bactracking line-search. The reader is referred to \cite{IEEEexample:gen_syn} for further details on how to tune these parameters. \subsection{Trajectory Tracking} In this section we present the development of the trajectory tracking controller for Polaris GEM e6 and omit the discussion for warthog due to space constraint but similar techniques can be used to develop a trajectory tracking controller for warthog as well. Let $\mathbf{s}_i = \{x_i, y_i, \theta_i, \phi_i, v_i, \dot{\phi}_i\}$ represent the state of the Polaris GEM e6 with wheel base distance $L$, at discrete instant $i$ where $\{x_i, y_i, \theta_i\}$ is the pose, $\phi_i$ is the steering angle, $v_i$ is the velocity and $\dot{\phi}_i$ is the rate of change of steering angle at discrete instant $i$. The control command is given by $\mathbf{u}_i = \{a_i, b_i, \dot{\phi}^c_i\}$, here $a_i$ is the pedal input, $b_i$ is the brake input and $\dot{\phi}^c_i$ is the commanded rate of change of steering angle at discrete instant $i$. We represent the state transition function with $\pi$: \begin{align}\label{statet} \mathbf{s}_{i+1} = \pi(\mathbf{s}_i, \mathbf{u}_i) \end{align} Using the dynamic functions $\{f_v^g, f_{\dot{\phi}}^g\}$ given in section \ref{nnmodel} and following the bicycle model, $\pi$ can be defined by following equations: \begin{align}\label{stateteq} x_{i+1} &= x_i + v_i cos(\theta_i)\Delta t \nonumber \\ y_{i+1} &= y_i + v_i sin(\theta_i)\Delta t \nonumber \\ \dot{\phi}_{i+1} &= f^g_{\dot{\phi}}\left(\left[\begin{array}{c} v_{i} \\ \dot{\phi}_i\end{array} \right], \left[\begin{array}{c} a_{i} \\ b_i \\ \dot{\phi}_i^c\end{array} \right]\right) \nonumber \\ \phi_{i+1}& = \phi_i + \dot{\phi}_i \Delta t \nonumber \nonumber \\ \theta_{i+1} & = \theta_i + \frac{v_i tan(\phi_i)}{L} \nonumber \\ v_{i+1} &= f^g_v\left(\left[\begin{array}{c} v_{i} \\ \dot{\phi}_i\end{array} \right], \left[\begin{array}{c} a_{i} \\ b_i \\ \dot{\phi}_i^c\end{array} \right]\right) \end{align} \begin{figure}[h] \centering{ \resizebox{75mm}{!}{\input{figures/path_track.eps_tex}} \caption{Figure shows bicycle mode of Polaris GEM e6 and error state w.r.t a reference trajectory} \label{fig:topview} } \end{figure} Given a set of $M$ ordered poses with velocities, we fit a cubic spline to them and obtain a reference trajectory. `Fig. \ref{fig:topview}' shows bicycle model of the Polaris GEM e6 in a typical state $\mathbf{s}_i$, the pink rectangles represent the two wheels, maroon circles represent the reference points and the red curve represents the fitted cubic spline. For every state $\mathbf{s}_i$ we define an error state $\bm{\psi}_i$ with respect to this reference trajectory as a 9-tuple $\{d^e_i, \theta^e_i, v^e_i, \dot{d}^e_i, \dot{\theta}^e_i, \dot{v}^e_i, v_i, \dot{\phi}_i, \phi_i \}$. As shown in `Fig. \ref{fig:topview}', $d^e_i$ is the perpendicular distance of a robot in state $\mathbf{s}_i$ from the reference trajectory, $\theta^e_i$ is the heading error of the robot w.r.t the reference trajectory, $v^e_i$ is the velocity error corresponding to the closest point on the reference trajectory ($v^e_i = v_i - v_p$, here $v_p$ is the velocity of the closest point on the reference trajectory), $v_i, \dot{\phi}_i$ and $\phi_i$ are copied from the state $\mathbf{s}_i$. We use error state $\bm{\psi}_i$ for the ILQR states which encodes all the errors from the reference trajectory. Given the error state $\bm{\psi}_i$ and control $u_i$ at a discrete instant $i$ the next error state $\bm{\psi}_{i+1}$ is given: \begin{align}\label{eq:gamma} \bm{\psi}_{i+1} =\gamma(\bm{\psi}_i, \mathbf{u}_i) \end{align} $\gamma$ can be defined by following equations: \begin{align}\label{eq:gamma} d^e_{i+1}& = d^e_i + \dot{d}^e_i \Delta t \nonumber \\ \theta^e_{i+1} &= \theta^e_i + \dot{\theta}^e_i \Delta t \nonumber \\ v^e_{i+1} &= v^e_i + \dot{v}^e_i \Delta t \nonumber \\ \dot{d}^e_{i+1} &= (v^e_i + \dot{v}^e_i \Delta t + v^p_i) sin (\theta^e_i + \dot{\theta}^e_i \Delta t) \nonumber \\ \dot{\theta}^e_{i+1} &= \frac{(v^e_i + \dot{v}^e_i \Delta t + v^p_i) tan (\phi_i + \dot{\phi}_i \Delta t)}{L} \nonumber \\ \dot{v}^e_{i+1} &= (v_{i+1} - v_{i})/\Delta t \nonumber \\ v_{i+1} &= f^g_v\left(\left[\begin{array}{c} v_{i} \\ \dot{\phi}_i\end{array} \right], \left[\begin{array}{c} a_{i} \\ b_i \\ \dot{\phi}_i^c\end{array} \right]\right) \nonumber \\ \dot{\phi}_{i+1} &= f^g_{\dot{\phi}}\left(\left[\begin{array}{c} v_{i} \\ \dot{\phi}_i\end{array} \right], \left[\begin{array}{c} a_{i} \\ b_i \\ \dot{\phi}_i^c\end{array} \right]\right) \nonumber \\ \phi_{i+1} &= \phi_i + \dot{\phi}_i \Delta t \end{align} The cost $l(\bm{\psi}_i, \mathbf{u}_i)$ of executing $\mathbf{u}_i$ in error state $\bm{\psi}_i$ is given as follows: \begin{align}\label{eq:lcost} l(\bm{\psi}_i, \mathbf{u}_i) = \bm{\psi}_i^T A \bm{\psi}_i +\bm{u}_i^T B \bm{u}_i \end{align} Here $A$ and $B$ are diagonal weight matrices with last 3 diagonal elements of $A$ equal to zero since we only care about driving the error terms in (\ref{eq:gamma}) to zero. For a state $\bm{\psi}$ the final cost $l_f(\bm{\psi})$ is given as follows: \begin{align}\label{eq:lfcost} l_f(\bm{\psi}) = \bm{\psi}^T A \bm{\psi} \end{align} We can now define the trajectory tracking problem for our vehicle with a given reference trajectory as finding the optimal control sequence $\{\mathbf{u}_0, \mathbf{u}_1, ..., \mathbf{u}_{N-2}\}$ for horizon $N$ that minimizes the following cost: \begin{align}\label{eq:fincost} \sum_{i=0}^{N-2}l(\bm{\psi}_i, \mathbf{u}_i) + \bm{\psi}_{N-1}^T A \bm{\psi}_{N-1} \end{align} Subject to the constraints: \begin{align}\label{eq:gammconst} \bm{\psi}_{i+1} =\gamma(\bm{\psi}_i, \mathbf{u}_i) \quad \forall i \in \{0, 1, ..., N-2\} \end{align} (\ref{eq:fincost}) and (\ref{eq:gammconst}) transform the trajectory tracking problem to a standard ILQR problem defined in section \ref{controller}, and hence can be solved by algorithm \ref{algo1}. \section{Results and future work}\label{results} We evaluate the performance of the trajectory tracking algorithm by using four metrics, average cross track error(ACE), maximum cross track error(MCE), average velocity error(AVE) and maximum velocity(MVE). For Polaris GEM e6 we calculate these metrics on five types of reference trajectories namely circular track `Fig. \ref{f1:traj}', oval track `Fig. \ref{f2:traj}', snake track `Fig. \ref{f3:traj}', 'Eight' track `Fig. \ref{f4:traj}', and a combination track `Fig. \ref{f5:traj}'. We collect the reference trajectories by logging the VectorNav-300 GPS data while driving manually. We evaluate Warthog's performance on the reference trajectory shown in `Fig. \ref{f6:traj}' which involves moving at the speeds of 3m/s-4m/s, mimicking the kind of trajectories Warthog might be required to follow in an off-road environment. The warthog has a maximum velocity of 4.5m/s so we are testing the controller at the limits of what the Warthog can perform. `Table \ref{errom}' summarizes the results of these experiments on both Warthog and Polaris GEM e6. Both vehicles are equipped with a VectorNav-300 GPS for localization which is accurate up to 20-30 cm, considering the GPS accuracy the ACEs and MCEs are acceptable for both Polaris GEM e6 and Warthog. The reason for high MVEs is the fact that at the start vehicle has zero velocity while the initial points in the reference trajectories have 1m/s-2m/s velocities. The high MCE for 'Eight' track is due to the 0.8m/s reference velocity reported by GPS around 25th second from starting time as shown in `Fig. \ref{f4:vel}'. `Fig. [4-9](b)' compare commanded velocities and actual vehicle velocities for the reference trajectories. The control input plots `Fig. [4-8][c-e]' and `Fig. 9[c-d]' show that the control inputs satisfy the predefined constraints of $[0,1]$ for pedal and brake, $[-60,60]$ for steering rate(deg/s), $[0,4.5]$ for Warthog linear velocity (m/s) and $[-180, 180]$ for Warthog angular velocity (deg/s). In this work we demonstrated a model based control methodology for an off-road vehicle as well as an on-road shuttle with varying dynamics, speeds as well as environmental conditions. In future we plan to compare this approach with classical geometric and dynamic controllers for wheeled robots in trajectory following context. We also plan to implement the controller presented in this paper on an eighteen wheeler with a trailer attached. Truck and a trailer has a complex non linear dynamics and is a challenging problem what will further test the limits of the controller presented in this work. \vspace{0.4cm} \newpage \setlength{\tabcolsep}{0.01em} \begin{figure}[H] \onecolumn \begin{tabular}{ccccc} \centering \subcaptionbox{\label{f1:traj}}{\includegraphics[scale=0.175]{figures/circle/vel1/traj.eps}} & \subcaptionbox{\label{f1:vel}}{\includegraphics[scale=0.175]{figures/circle/vel1/vel.eps}} & \subcaptionbox{\label{f1:pedal}}{\includegraphics[scale=0.175]{figures/circle/vel1/pedal.eps}} & \subcaptionbox{\label{f1:steer}}{\includegraphics[scale=0.175]{figures/circle/vel1/steer.eps}} & \subcaptionbox{\label{f1:brake}}{\includegraphics[scale=0.175]{figures/circle/vel1/brake.eps}} \\ \end{tabular} \caption{Polaris GEM e6 Circular trajectory response} \label{circle} \end{figure} \vspace{-0.8cm} \begin{figure}[H] \onecolumn \begin{tabular}{ccccc} \centering \subcaptionbox{\label{f2:traj}}{\includegraphics[scale=0.175]{figures/oval/vel1/traj.eps}} & \subcaptionbox{\label{f2:vel}}{\includegraphics[scale=0.175]{figures/oval/vel1/vel.eps}} & \subcaptionbox{\label{f2:pedal}}{\includegraphics[scale=0.175]{figures/oval/vel1/pedal.eps}} & \subcaptionbox{\label{f2:steer}}{\includegraphics[scale=0.175]{figures/oval/vel1/steer.eps}} & \subcaptionbox{\label{f2:brake}}{\includegraphics[scale=0.175]{figures/oval/vel1/brake.eps}} \\ \end{tabular} \caption{Polaris GEM e6 Oval trajectory response} \label{oval} \end{figure} \vspace{-0.8cm} \begin{figure}[H] \onecolumn \begin{tabular}{ccccc} \centering \subcaptionbox{\label{f3:traj}}{\includegraphics[scale=0.175]{figures/snake/vel1/traj.eps}} & \subcaptionbox{\label{f3:vel}}{\includegraphics[scale=0.175]{figures/snake/vel1/vel.eps}} & \subcaptionbox{\label{f3:pedal}}{\includegraphics[scale=0.175]{figures/snake/vel1/pedal.eps}} & \subcaptionbox{\label{f3:steer}}{\includegraphics[scale=0.175]{figures/snake/vel1/steer.eps}} & \subcaptionbox{\label{f3:brake}}{\includegraphics[scale=0.175]{figures/snake/vel1/brake.eps}} \\ \end{tabular} \caption{Polaris GEM e6 Snake trajectory response} \label{snake} \end{figure} \vspace{-0.8cm} \begin{figure}[H] \onecolumn \begin{tabular}{ccccc} \centering \subcaptionbox{\label{f4:traj}}{\includegraphics[scale=0.175]{figures/eight/vel1/traj.eps}} & \subcaptionbox{\label{f4:vel}}{\includegraphics[scale=0.175]{figures/eight/vel1/vel.eps}} & \subcaptionbox{\label{f4:pedal}}{\includegraphics[scale=0.175]{figures/eight/vel1/pedal.eps}} & \subcaptionbox{\label{f4:steer}}{\includegraphics[scale=0.175]{figures/eight/vel1/steer.eps}} & \subcaptionbox{\label{f4:brake}}{\includegraphics[scale=0.175]{figures/eight/vel1/brake.eps}} \\ \end{tabular} \caption{Polaris GEM e6 Eight trajectory response} \label{eight} \end{figure} \vspace{-0.8cm} \begin{figure}[H] \onecolumn \begin{tabular}{ccccc} \centering \subcaptionbox{\label{f5:traj}}{\includegraphics[scale=0.175]{figures/comb/vel1/traj.eps}} & \subcaptionbox{\label{f5:vel}}{\includegraphics[scale=0.175]{figures/comb/vel1/vel.eps}} & \subcaptionbox{\label{f5:pedal}}{\includegraphics[scale=0.175]{figures/comb/vel1/pedal.eps}} & \subcaptionbox{\label{f5:steer}}{\includegraphics[scale=0.175]{figures/comb/vel1/steer.eps}} & \subcaptionbox{\label{f5:brake}}{\includegraphics[scale=0.175]{figures/comb/vel1/brake.eps}} \end{tabular} \caption{Polaris GEM e6 Combination trajectory response} \label{comb} \end{figure} \vspace{-0.8cm} \begin{figure}[H] \onecolumn \setlength{\tabcolsep}{1em} \begin{tabular}{cccc} \centering \subcaptionbox{\label{f6:traj}}{\includegraphics[scale=0.175]{figures/warthog/vel1/traj.eps}} & \subcaptionbox{\label{f6:vel}}{\includegraphics[scale=0.175]{figures/warthog/vel1/vel.eps}} & \subcaptionbox{\label{f6:lin}}{\includegraphics[scale=0.175]{figures/warthog/vel1/linear_vel.eps}} & \subcaptionbox{\label{f6:ang}}{\includegraphics[scale=0.175]{figures/warthog/vel1/angular_vel.eps}} \end{tabular} \caption{Warthog Combination trajectory response} \label{warthog} \end{figure} \twocolumn \addtolength{\textheight}{-12cm} \begin{table} \begin{tabular}{|p{2.9cm}|p{1.4cm}|p{1.4cm}|p{1.4cm}|p{1.4cm}|} \hline \multicolumn{5}{|c|}{Error Metrics}\\ \hline Reference & ACE & MCE & AVE & MVE \\ \hline GEM Circular & 0.24m & 0.61m & 0.44m/s & 1.73m/s \\ \hline GEM Oval & 0.32m & 0.66m & 0.48m/s & 1.61m/s \\ \hline GEM Snake & 0.40m & 0.72m & 0.46m/s & 1.02m/s \\ \hline GEM Eight & 0.44m & 1.32m & 0.58m/s & 3.88m/s \\ \hline GEM Combination & 0.43m & 0.89m & 0.43m/s & 2.10m/s \\ \hline Warthog Combination & 0.25m & 0.56m & 0.28m/s & 2.46m/s \\ \hline \end{tabular} \caption{Error result on various reference Trajectories.} \label{errom} \end{table} \bibliographystyle{IEEEtran}
1,116,691,501,325
arxiv
\section{Introduction} Stochastic differential geometry of infinite-dimensional manifolds has been a very active topic of research in recent times. One of the important and intriguing problems discussed concerns the construction of spaces of differential forms over such manifolds and the study of the corresponding Laplace operators and associated (stochastic) cohomologies. A central role in this framework is played by the concept of the Dirichlet operator of a differentiable measure, which is actually an infinite-dimensional generalization of the Laplace--Beltrami operator on functions, respectively the Laplace--Witten--de Rham operator on differential forms. The study of the latter operator and the associated semigroup on finite-dimensional manifolds was the subject of many works, and it leads to deep results on the border of stochastic analysis, differential geometry and topology, and mathematical physics, see, e.g., \cite{E3}, \cite{CFKSi}, \cite{EL}. Dirichlet forms on Clifford algebras were considered in \cite{Gr}. In an infinite-dimensional situation, such questions were discussed in the flat case in \cite{Ar1}, \cite{Ar2}, \cite{ArM}, \cite{AK}. A regularized heat semigroup on differential forms over the infinite-dimensional torus was studied in \cite {LeBe}. A study of such questions on general infinite product manifolds was given in \cite{ADK1}, \cite{ADK2}. The case of loop spaces was considered in \cite{JL}, \cite{LRo}. At the same time, there is a growing interest in geometry and analysis on Poisson spaces, i.e., on spaces of locally finite configurations in non-compact manifolds, equipped with Poisson measures. In \cite{AKR-1}, \cite {AKR0}, \cite{AKR1}, an approach to these spaces as to infinite-dimensional manifolds was initiated. This approach was motivated by the connection of such spaces with the theory of representations of diffeomorphism groups, see \cite{GGPS}, \cite{VGG}, \cite{I} (these references and \cite {AKR1}, \cite{AKR3} also contain discussion of relations with quantum physics). We refer the reader to \cite{AKR2}, \cite{AKR3}, \cite{Ro}, and references therein for further discussion of analysis on Poisson spaces and applications. In the present work, we develop this point of view. We define spaces of differential forms over Poisson spaces. Next, we define and study Laplace operators acting in the spaces of 1-forms. We show, in particular, that the corresponding de Rham Laplacian can be expressed in terms of the Dirichlet operator on functions on the Poisson space and the Witten Laplacian on the initial manifold associated with the intensity of the corresponding Poisson measure. We give a probabilistic interpretation and investigate some properties of the associated semigroups. Let us remark that the study of Laplacians on $n$-forms by our methods is also possible, but it leads to more complicated constructions. It will be given in a forthcoming paper. The main general aim of our approach is to develop a framework which extends to Poisson spaces (as infinite-dimensional manifolds) the finite-dimensional Hodge--de Rham theory. A different approach to the construction of differential forms and related objects over Poisson spaces, based on the ``transfer principle'' from Wiener spaces, is proposed in \cite{Pr2}, see also \cite{PPr} and \cite{Pr}. \section{Differential forms over configuration spaces} The aim of this section is to define differential forms over configuration spaces (as infinite-dimensional manifold). First, we recall some known facts and definitions concerning ``manifold-like'' structures and functional calculus on these spaces. \subsection{Functional calculus on configuration spaces} Our presentation in this subsection is based on \cite{AKR1}, however for later use in the present paper we give a different description of some objects and results occurring in \cite {AKR1}. Let $X$ be a complete, connected, oriented, $C^\infty $ (non-compact) Riemannian manifold of dimension $d$. We denote by $\langle \bullet ,\bullet \rangle _x$ the corresponding inner product in the tangent space $T_xX$ to $X$ at a point $x\in X$. The associated norm will be denoted by $|\bullet |_x$. Let also $\nabla ^X$ stand for the gradient on $X$. The configuration space $\Gamma _X$ over $X$ is defined as the set of all locally finite subsets (configurations) in $X$: \begin{equation}\notag \Gamma _X:=\left\{ \,\gamma \subset X\mid |\gamma \cap \Lambda |<\infty \text{ for each compact }\Lambda \subset X\,\right\} . \end{equation} Here, $|A|$ denotes the cardinality of the set $A$. We can identify any $\gamma \in \Gamma _X$ with the positive integer-valued Radon measure \begin{equation}\notag \sum_{x\in \gamma }\varepsilon _x\subset {\cal M}(X), \end{equation} where $\varepsilon _x$ is the Dirac measure with mass at $x$, $\sum_{x\in \varnothing }\varepsilon _x:=$zero measure, and ${\cal M}(X)$ denotes the set of all positive Radon measures on the Borel $\sigma $-algebra ${\cal B}(X)$. The space $\Gamma _X$ is endowed with the relative topology as a subset of the space ${\cal M}(X)$ with the vague topology, i.e., the weakest topology on $\Gamma _X$ such that all maps \begin{equation}\notag \Gamma _X\ni \gamma \mapsto \langle f,\gamma \rangle :=\int_Xf(x)\,\gamma (dx)\equiv \sum_{x\in \gamma }f(x) \end{equation} are continuous. Here, $f\in C_0(X)$($:=$the set of all continuous functions on $X$ with compact support). Let ${\cal B}(\Gamma _X)$ denote the corresponding Borel $\sigma $-algebra. Following \cite{AKR1}, we define the tangent space to $\Gamma _X$ at a point $\gamma $ as the Hilbert space \begin{equation}\notag T_\gamma \Gamma _X:=L^2(X\to TX;d\gamma ), \end{equation} or equivalently \begin{equation} T_\gamma \Gamma _X=\bigoplus_{x\in \gamma }T_xX. \label{tg-sp1} \end{equation} The scalar product and the norm in $T_\gamma \Gamma _X$ will be denoted by $% \langle \bullet ,\bullet \rangle _\gamma $ and $\left\| \bullet \right\| _\gamma $% , respectively. Thus, each $V(\gamma )\in T_\gamma \Gamma _X$ has the form $% V(\gamma )=(V(\gamma )_x)_{x\in \gamma }$, where $V(\gamma )_x\in T_xX$, and \begin{equation}\notag \| V(\gamma )\| _\gamma ^2=\sum_{x\in \gamma }|V(\gamma )_x|_x^2. \end{equation} Let $\gamma \in \Gamma _X$ and $x\in \gamma $. By ${\cal O}_{\gamma ,x}$ we will denote an arbitrary open neighborhood of $x$ in $X$ such that the intersection of the closure of ${\cal O}_{\gamma ,x}$ in $X$ with $\gamma \setminus \{x\}$ is the empty set. For any fixed finite subconfiguration $% \left\{ x_1,\dots ,x_k\right\} \subset \gamma $, we will always consider open neighborhoods ${\cal O}_{\gamma ,x_1},\dots ,{\cal O}_{\gamma ,x_k}$ with disjoint closures. Now, for a measurable function $F\colon\Gamma _X\to {\Bbb R}$, $\gamma \in \Gamma _X$, and $\left\{ x_1,\dots ,x_k\right\} \subset \gamma $, we define a function $F_{x_1,\dots ,x_k}(\gamma ,\bullet )\colon{\cal O}_{\gamma ,x_1}\times \dots \times {\cal O}_{\gamma ,x_k}\to {\Bbb R}$ by \begin{multline*} {\cal O}_{\gamma ,x_1}\times \dots \times {\cal O}_{\gamma ,x_k} \ni (y_1,\dots ,y_k)\mapsto F_{x_1,\dots ,x_k}(\gamma ,y_1,\dots ,y_k):= \\ =F((\gamma \setminus \{x_1,\dots ,x_k\})\cup \{y_1,\dots ,y_k\})\in {\Bbb R% }. \end{multline*} Since we will be interested only in the local behavior of the function $% F_{x_1,\dots ,x_k}(\gamma ,\bullet )$ around the point $(x_1,\dots ,x_k)$, we will not write explicitly which neighborhoods ${\cal O}_{\gamma ,x_i}$ we use. \begin{definition} \label{def2.0}\rom{We say that a function $F:\Gamma _X\to {\Bbb R}^1$ is differentiable at $\gamma \in \Gamma _X$ if for each $x\in \gamma $ the function $F_x(\gamma ,\cdot )$ is differentiable at $x$ and \[ \nabla ^\Gamma F(\gamma )=(\nabla ^\Gamma F(\gamma )_x)_{x\in \gamma }\in T_\gamma \Gamma _X, \] where \begin{equation}\notag \nabla ^\Gamma F(\gamma )_x:=\nabla ^XF_x(\gamma ,x). \end{equation} } \end{definition} {We will call $\nabla ^\Gamma F(\gamma )$ the gradient of $F$ at $\gamma $. } For a function $F$ differentiable at $\gamma $ and a vector $V(\gamma )\in T_\gamma \Gamma _X$, the directional derivative of $F$ at the point $\gamma $ along $V(\gamma )$ is defined by \begin{equation}\notag \nabla _V^\Gamma F(\gamma ):=\langle \nabla ^\Gamma F(\gamma ),V(\gamma )\rangle _\gamma . \end{equation} In what follows, we will also use the shorthand notation \begin{equation} \nabla _x^XF(\gamma ):=\nabla ^XF_x(\gamma ,x), \label{flick} \end{equation} so that $\nabla ^\Gamma F(\gamma )=(\nabla _x^XF(\gamma ))_{x\in \gamma }$. It is easy to see that the operation $\nabla ^\Gamma $ satisfies the usual properties of differentiation, including the Leibniz rule. We define a class ${\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ of smooth cylinder functions on $\Gamma _X$ as follows: \begin{definition} \label{def2.1} \rom{A measurable bounded function $F:\Gamma _X\to {\Bbb R} ^1$ belongs to ${\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ iff: (i) there exists a compact $\Lambda \subset X$ such that $F(\gamma )=F(\gamma _\Lambda )$ for all $\gamma \in \Gamma _X$, where $\gamma _\Lambda :=\gamma \cap \Lambda $; (ii) for any $\gamma \in \Gamma _X$ and $\left\{ x_1,\dots ,x_k\right\} \subset \gamma $, $k\in {\Bbb N}$, the function $F_{x_1,\dots ,x_k}(\gamma ,\bullet )$ is infinitely differentiable with derivatives uniformly bounded in $\gamma $ and $x_1,\dots ,x_k$ (i.e., the majorizing constant depends only on the order of differentiation but not on the specific choice of $\gamma \in \Gamma _X$, $k\in {\Bbb N}$, and $\{x_1,\dots ,x_k\}\subset \gamma $).} \end{definition} Let us note that, for $F\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X)$, only a finite number of coordinates of $\nabla ^\Gamma F(\gamma )$ are not equal to zero, and so $\nabla ^\Gamma F(\gamma )\in T_\gamma \Gamma _X$. Thus, each $F\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ is differentiable at any point $\gamma \in \Gamma _X$ in the sense of Definition~\ref{def2.0}. \begin{remark} \label{rem2.1}\rom{In \cite{AKR1}, the authors introduced the class ${\cal FC} _{\mathrm b}^\infty ({\cal D},\Gamma _X)$ of functions on $\Gamma _X$ of the form \begin{equation} F(\gamma )=g_F(\left\langle \varphi _1,\gamma \right\rangle ,\dots ,\left\langle \varphi _N,\gamma \right\rangle ), \label{2.1} \end{equation} where $g_F\in C_{\mathrm b}^\infty ({\Bbb R}^N)$ and $\varphi _1,\dots ,\varphi _N\in {\cal D}:=C_0^\infty (X)$($:=$ the set of all $C^\infty $-functions on $X$ with compact support). Evidently, we have the inclusion $${\cal FC}_{\mathrm b}^\infty ( {\cal D},\Gamma _X)\subset {\cal FC}_{\mathrm b}^\infty (\Gamma _X),$$ and moreover, the gradient of $F$ of the form (\ref{2.1}) in the sense of Definition~\ref {def2.0}, \begin{equation}\notag \nabla ^\Gamma F(\gamma )_x=\sum_{i=1}^N\frac{\partial g_F}{\partial s_i}% (\langle \varphi _1,\gamma \rangle ,\dots ,\langle \varphi _N,\gamma \rangle )\nabla ^X\varphi _i(x), \end{equation} coincides with the gradient of this function in the sense of \cite{AKR1}. } \end{remark} \subsection{Tensor bundles and cylinder forms over configuration \\ spaces} Our next aim is to introduce differential forms on $\Gamma _X$. Vector fields and first order differential forms on $\Gamma _X$ will be identified with sections of the bundle $T\Gamma _X.$ Higher order differential forms will be identified with sections of tensor bundles $\wedge ^n(T\Gamma _X)$ with fibers \begin{equation}\notag \wedge ^n(T_\gamma \Gamma _X)% =% \wedge ^n(L^2(X\rightarrow TX;\gamma )), \end{equation} where $\wedge ^n({\cal H})$ (or ${\cal H}^{\wedge n}$) stands for the $n$-th antisymmetric tensor power of a Hilbert space ${\cal H}$. In what follows, we will use different representations of this space. Because of (\ref{tg-sp1}% ), we have \begin{equation} \wedge ^n(T_\gamma \Gamma _X)=\wedge ^n\bigg( \bigoplus_{x\in \gamma }T_xX\bigg) . \label{tang-n} \end{equation} Let us introduce the factor space $X^n/S_n$, where $S_n$ is the permutation group of $\{1,\dots ,n\}$ which naturally acts on $X^n$: \begin{equation}\notag \sigma (x_1,\dots ,x_n)=(x_{\sigma (1)},\dots ,x_{\sigma (n)}),\qquad\sigma \in S_n. \end{equation} The space $X^n/S_n$ consists of equivalence classes $[x_1,\dots ,x_n]$ and we will denote by $[x_1,\dots ,x_n]_d$ an equivalence class $[x_1,\dots ,x_n] $ such that the equality $x_{i_1}=x_{i_2}=\dots =x_{i_k}$ can hold only for $k\le d$ points. (In other words, any equivalence class $[x_1,\dots ,x_n]$ is a multiple configuration in $X$, while $[x_1,\dots ,x_n]_d$ is a multiple configuration with multiplicity of points $\le d$.) We will omit the lower index $d$ in the case where $n\le d$. In what follows, instead of writing $[x_1,\dots ,x_n]_d:\{x_1,\dots ,x_n\}\subset \gamma $, we will use the shortened notation $[x_1,\dots ,x_n]_d\subset \gamma $, though $% [x_1,\dots ,x_n]_d$ is not, of course, a set. We then have from (\ref{tang-n}% ): \begin{equation} \wedge ^n(T_\gamma \Gamma _X)=\bigoplus_{[x_1,\dots ,x_n]_d\,\subset \gamma }T_{x_1}X\wedge T_{x_2}X\wedge \dots \wedge T_{x_n}X, \label{tang-n0} \end{equation} since for each $\sigma\in S_n$ the spaces $T_{x_1}X\wedge T_{x_2}X\wedge\dots\wedge T_{x_n}X$ and $T_{x_{\sigma(1)}}X\wedge T_{x_{\sigma(2)}}X\wedge\dots\wedge T_{x_{\sigma(n)}}X$ coincide. Thus, under a differential form $\omega $ of order $n$, $n\in {\Bbb N}$, over $\Gamma _X,$ we will understand the mapping \begin{equation}\notag \Gamma _X\ni \gamma \mapsto \omega (\gamma )\in \wedge ^n(T_\gamma \Gamma _X). \end{equation} We denote by $\omega (\gamma )_{[x_1,\dots ,x_n]_d}$ the corresponding component in the decomposition (\ref{tang-n0}). In particular, in the case $n=1$, a 1-form $V$ over $\Gamma _X$ is given by the mapping \begin{equation}\notag \Gamma _X\ni \gamma \mapsto V(\gamma )=(V(\gamma )_x)_{x\in \gamma }\in T_\gamma \Gamma _X. \end{equation} For fixed $\gamma \in \Gamma _X$ and $x\in \gamma ,$ we consider the mapping \begin{equation}\notag {\cal O}_{\gamma ,x}\ni y\mapsto \omega _x(\gamma ,y)% :=% \omega (\gamma _y)\in \wedge ^n(T_{\gamma _y}\Gamma _X), \end{equation} where $\gamma _y% :=% (\gamma \setminus \{x\})\cup \{y\},$ which is a section of the Hilbert bundle \begin{equation} \wedge ^n(T_{\gamma _y}\Gamma _X)\mapsto y\in {\cal O}_{\gamma ,x} \label{bund1} \end{equation} over ${\cal O}_{\gamma ,x}.$ The Levi--Civita connection on $TX$ generates in a natural way a connection on this bundle. We denote by $\nabla _{\gamma ,x}^X$ the corresponding covariant derivative, and use the notation \begin{equation}\notag \nabla _x^X\omega (\gamma )% :=% \nabla _{\gamma ,x}^X\omega _x(\gamma ,x)\in T_xX\otimes \left( \wedge ^n(T_\gamma \Gamma _X)\right) \end{equation} if the section $\omega _x(\gamma ,\cdot )$ is differentiable at $x$. Analogously, we denote by $\Delta _x^X$ the corresponding Bochner Laplacian associated with the volume measure $m$ on ${\cal O}_{\gamma ,x}$ (see subsec.~3.2 where the notion of Bochner Laplacian is recalled). Similarly, for a fixed $\gamma \in \Gamma _X$ and $\left\{ x_1,\dots,x_n\right\} \subset \gamma $, we define a mapping \begin{multline*} {\cal O}_{\gamma ,x_1}\times \dots\times {\cal O}_{\gamma ,x_n} \ni (y_1,\dots,y_n)\mapsto \omega _{x_1,\dots,x_n}(\gamma ,y_1,\dots,y_n) := \\ = \omega (\gamma _{y_1,\dots,y_n}) \in \wedge ^n(T_{\gamma _{y_1,\dots,y_n}}\Gamma _X), \end{multline*} where $\gamma _{y_1,\dots,y_n}% :=% (\gamma \setminus \{x_1,\dots,x_n\})\cup \{y_1,\dots,y_n\}$, which is a section of the Hilbert bundle \begin{equation} \wedge ^n(T_{\gamma _{y_1,\dots,y_n}}\Gamma _X)\mapsto \left( y_1,\dots,y_n\right) \in {\cal O}_{\gamma ,x_1}\times \dots\times {\cal O}% _{\gamma ,x_n} \label{bund-n} \end{equation} over ${\cal O}_{\gamma ,x_1}\times\dots\times {\cal O}_{\gamma ,x_n}.$ Let us remark that, for any $\eta \subset \gamma $, the space $\wedge ^n(T_\eta \Gamma _X)$ can be identified in a natural way with a subspace of $\wedge ^n(T_\gamma \Gamma _X)$. In this sense, we will use expressions of the type $\omega (\gamma )=\omega (\eta )$ without additional explanations. A set ${\cal F}\Omega ^n$ of smooth cylinder $n$-forms over $\Gamma _X$ will be defined as follows. \begin{definition} \label{def2.2}\rom{${\cal F}\Omega ^n$ is the set of $n$-forms $\omega $ over $% \Gamma _X$ which satisfy the following conditions: (i) there exists a compact $\Lambda =\Lambda (\omega )\subset X$ such that $% \omega (\gamma )=\omega (\gamma _\Lambda )$; (ii) for each $\gamma \in \Gamma _X$ and $\left\{ x_1,...,x_n\right\} \subset \gamma $, the section $\omega _{x_1,\dots,x_n}(\gamma ,\bullet )$ of the bundle (\ref{bund-n}) is infinitely differentiable at $(x_1,\dots,x_n),$ and bounded together with the derivatives uniformly in $\gamma $. }\end{definition} \begin{remark} \label{form-fin}\rom{For each $\omega \in {\cal F}\Omega ^n$, $\gamma \in \Gamma _X$, and any open bounded $\Lambda \supset \Lambda (\omega )$, we can define the form $\omega _{\Lambda ,\gamma }$ on ${\cal O}_{\gamma ,x_1}\times \dots \times {\cal O}_{\gamma ,x_n}$ by \begin{equation} \omega _{\Lambda ,\gamma }(y_1,\dots,y_n)=\operatorname{Proj}_{\wedge ^n(T_{y_1}X\oplus \dots \oplus T_{y_n}X)}\omega (\gamma \setminus \{x_1,\dots,x_n\}\cup \{y_1,\dots,y_n\}), \label{cyl-form} \end{equation} where $\{x_1,\dots,x_n\}=\gamma \cap \Lambda $. The item (ii) of Definition~% \ref{def2.2} is obviously equivalent to the assumption $\omega _{\Lambda ,\gamma }$ to be smooth and bounded together with the derivatives uniformly in $\gamma $ (for some $\Lambda $ and consequently for any $% \Lambda \supset \Lambda (\omega )$).} \end{remark} \begin{definition} \label{def2.3}\rom{We define the covariant derivative $\nabla ^\Gamma \omega $ of the form $\omega \in {\cal F}\Omega ^n$ as the mapping \begin{equation}\notag \Gamma _X\ni \gamma \mapsto \nabla ^\Gamma \omega (\gamma )% :=% (\nabla _x^X\omega (\gamma ))_{x\in \gamma }\in T_\gamma \Gamma _X\otimes \left( \wedge ^n(T_\gamma \Gamma _X)\right) \end{equation} if for all $\gamma\in\Gamma_X$ and $x\in\gamma$ the form $\omega_x(\gamma,\bullet)$ is differentiable at $x$ and the $\nabla^\Gamma\omega(\gamma)$ just defined indeed belongs to $T_\gamma \Gamma _X\otimes \left( \wedge ^n(T_\gamma \Gamma _X)\right) $. } \end{definition} \begin{remark}\rom{ For each $\omega\in{\cal F\Omega}^n$, the covariant derivative $\nabla^\Gamma\omega$ exists, and moreover only a finite number of the coordinates $\nabla^\Gamma\omega(\gamma)_{x,[x_1,\dots,x_n]_d}$ in the decomposition $$T_\gamma\Gamma_X\otimes\big( \wedge^n(T_\gamma\Gamma_X) \big)=\bigoplus_{x\in\gamma,\,[x_1,\dots,x_n]_d\subset\gamma} T_xX\otimes(T_{x_1}X\wedge\dots\wedge T_{x_n}X) $$ are not equal to zero.} \end{remark} \begin{proposition} \label{prop2.1} For arbitrary $\omega ^{(1)},\omega ^{(2)}\in {\cal F}\Omega ^n$, we have \begin{multline*}\nabla ^\Gamma \langle \omega ^{(1)}(\gamma ),\omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)} = \\ =\langle\nabla ^\Gamma \omega ^{(1)}(\gamma ),\omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)}+\langle \omega ^{(1)}(\gamma ),\nabla ^\Gamma \omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)}. \end{multline*} \end{proposition} \noindent {\it Proof}. We have, for any fixed $\gamma \in \Gamma _X$ and $x\in \gamma $,% \begin{gather*} \nabla _x^X\langle \omega ^{(1)}(\gamma ),\omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)}=\nabla _x^X\langle \omega _x^{(1)}(\gamma ,x),\omega _x^{(2)}(\gamma ,x)\rangle _{\wedge ^n(T_\gamma \Gamma _X)} \\ =\langle \nabla _x^X\omega ^{(1)}(\gamma ),\omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)}+\langle \omega ^{(1)}(\gamma ),\nabla _x^X\omega ^{(2)}(\gamma )\rangle _{\wedge ^n(T_\gamma \Gamma _X)}, \end{gather*} because of the usual properties of the covariant derivative $\nabla _x^X$. \quad$\blacksquare $ \subsection{Square integrable forms} In this subsection, we will consider spaces of forms over the configuration space $\Gamma_X$ which are square integrable with respect to a Poisson measure. Let $m$ be the volume measure on $X$, let $\rho \colon X\to {\Bbb R}$ be a measurable function such that $\rho >0$ $m$-a.e., and $\rho ^{1/2}\in H_{\mathrm loc}^{1,2}(X)$, and define the measure $\sigma (dx):=\rho (x)\,m(dx)$. Here, $H_{\mathrm loc}^{1,2}(X)$ denotes the local Sobolev space of order 1 in $% L_{\mathrm loc}^2(X;m)$. Then, $\sigma $ is a non-atomic Radon measure on $X$. Let $\pi _\sigma $ stand for the Poisson measure on $\Gamma _X$ with intensity $\sigma $. This measure is characterized by its Fourier transform \begin{equation}\notag \int_{\Gamma _X}e^{i\langle f,\gamma \rangle }\,\pi _\sigma (d\gamma )=\exp \int_X(e^{if(x)}-1)\,\sigma (dx),\qquad f\in C_0(X). \end{equation} Let $F\in L^1(\Gamma _X;\pi _\sigma )$ be cylindrical, that is, there exits a compact $\Lambda \subset X$ such that $F(\gamma )=F(\gamma _\Lambda )$. Then, one has the following formula, which we will use many times: \begin{equation} \int_{\Gamma _X}F(\gamma )\,\pi _\sigma (d\gamma )=e^{-\sigma (\Lambda )}\sum_{n=0}^\infty \frac 1{n!}\int_{\Lambda ^n}F(\{x_1,\dots ,x_n\})\,\sigma (dx_1)\dotsm\sigma (dx_n). \label{3.1} \end{equation} Since the measure $\sigma $ is non-atomic, the sets $\{(x_1,\dots ,x_n)\in \Lambda ^n:x_i=x_j\}$, $i,j=1,\dots ,n$, $i\ne j$, have zero $\sigma (dx_1)\dotsm\sigma (dx_n)$ measure, and therefore the expression on the right hand side of (\ref{3.1}) is well-defined. We define on the set ${\cal F}\Omega ^n$ the $L^2$-scalar product with respect to the Poisson measure: \begin{equation} (\omega ^{(1)},\omega ^{(2)})_{L_{\pi _\sigma }^2\Omega ^n}% :=% \int_{\Gamma _X}\langle \omega ^{(1)}(\gamma ),\omega ^{(2)}(\gamma )\rangle _{\wedge ^nT_\gamma \Gamma _X}\,\pi _\sigma (d\gamma ). \label{4.1} \end{equation} As easily seen, for each $\omega \in {\cal F}\Omega ^n$, the function $% \langle \omega (\gamma ),\omega (\gamma )\rangle _{\wedge ^nT_\gamma \Gamma _X}$ is polynomially bounded on $\Gamma _X$, and therefore it belongs to all $L^p(\Gamma _X;\pi _\sigma )$, $p\ge 1$. Moreover, $(\omega ,\omega )_{L_{\pi _\sigma }^2\Omega ^n}>0$ if $\omega $ is not identically zero. Hence, we can define the Hilbert space \begin{equation}\notag L_{\pi _\sigma }^2\Omega ^n% :=% L^2(\Gamma _X\to \wedge ^nT\Gamma _X;\pi _\sigma ) \end{equation} as the closure of ${\cal F}\Omega ^n$ in the norm generated by the scalar product (\ref{4.1}). From now on, we consider the case of 1-forms only and suppose that $\dim X\ge 2$. We give another description of the spaces $L_{\pi _\sigma }^2\Omega ^1.$ Let us recall the following well-known result (Mecke identity, see e.g. \cite{KMM}): \begin{equation} \int_{\Gamma _X}\int_Xf(\gamma ,x)\,\gamma (dx)\,\pi (d\gamma )=\int_{\Gamma _X}\int_Xf(\gamma \cup \{x\},x)\,\sigma (dx)\,\pi _\sigma (d\gamma ) \label{mecke} \end{equation} for any measurable bounded $f:\Gamma _X\times X\rightarrow {\Bbb R}^1$. Let us introduce the notations $$L_\sigma ^2\Omega ^1(X)% :=% L^2(X\rightarrow TX;\sigma ),\qquad L_{\pi _\sigma }^2(\Gamma _X)% := L^2(\Gamma _X\rightarrow {\Bbb R}^1;\pi _\sigma ).$$ \begin{proposition}\label{kkkk} The space $L_{\pi _\sigma }^2\Omega ^1$ is isomorphic to the space $L_{\pi_\sigma} ^2(\Gamma _X)\otimes L_\sigma ^2\Omega ^1(X)$ with the isomorphism $I^1$ given by the formula \begin{equation} ( I^1V) (\gamma ,x)% :=% V(\gamma \cup \{x\})_x,\qquad\gamma \in \Gamma _X,\;x\in X. \label{isom} \end{equation} \end{proposition} \noindent {\it Proof}. Let us specify the scalar product of two cylinder 1-forms $% V,W\in {\cal F}\Omega ^1.$ We have: \begin{align*} ( W,V) _{L_{\pi _\sigma }^2\Omega ^1} &=\int_{\Gamma _X}\left\langle W(\gamma ),V(\gamma )\right\rangle _\gamma\, \pi _\sigma (d\gamma ) \\ &=\int_{\Gamma _X}\int_X\left\langle W(\gamma )_x,V(\gamma )_x\right\rangle _\gamma \,\gamma (dx)\,\pi _\sigma (d\gamma ) \\ &=\int_{\Gamma _X}\int_X\left\langle W(\gamma \cup \{x\})_x,V(\gamma \cup \{x\})_x\right\rangle _\gamma\, \gamma (dx)\,\pi _\sigma (d\gamma ), \end{align*} because $\gamma \cup \{x\}=\gamma $ for $x\in \gamma .$ The application of the Mecke identity to the function \begin{equation}\notag f(\gamma ,x)=\left\langle V(\gamma \cup \{x\})_x,W(\gamma \cup \{x\})_x\right\rangle _\gamma \end{equation} shows that \begin{equation}\notag \left( W,V\right) _{L_{\pi _\sigma }^2\Omega ^1}=\int_{\Gamma _X}\int_X\left\langle V(\gamma \cup \{x\})_x,W(\gamma \cup \{x\})_x\right\rangle _\gamma \,\sigma (dx)\,\pi _\sigma (d\gamma ). \end{equation} The space ${\cal F}\Omega ^1$ is, by definition, dense in $L_{\pi _\sigma }^2\Omega ^1,$ and so it remains only to show that $I^1({\cal F}\Omega ^1)$ is dense in $L_{\pi _\sigma }^2(\Gamma _X)\otimes L_\sigma ^2\Omega ^1(X).$ For $F\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X),$ and $\nu \in \Omega _0^1(X)$ (the set of smooth 1-forms on $X$ with compact support), we define a form $% V(\gamma )=(V(\gamma )_x)_{x\in \gamma }$ by setting \begin{equation} V(\gamma )_x% :=% F(\gamma \setminus \{x\})\nu (x). \label{isom1} \end{equation} Evidently, we have $V\in {\cal F}\Omega ^1,$ and \begin{equation} \left( I^1V\right) (\gamma ,x)=F(\gamma )\nu (x) \label{isom2} \end{equation} for each $\gamma $ and any $x\notin \gamma $. Since each $\gamma \in \Gamma _X$ is a subset of $X$ of zero $m$ measure, we conclude from (\ref{isom2}) that \begin{equation} I^1V=F\otimes \nu . \label{isom3} \end{equation} Noting that the linear span of such $F\otimes \nu $ is dense in $L_{\pi _\sigma }^2(\Gamma _X)\otimes L_\sigma ^2\Omega ^1(X)$, we obtain the result. \quad $\blacksquare $ In what follows, we will denote by ${\cal D}\Omega ^1$ the linear span of forms $V$ defined by (\ref{isom1}). As we already noticed in the proof of Proposition~\ref{kkkk}, ${\cal D}\Omega ^1\subset {\cal F}\Omega ^1$ and is dense in $L_{\pi _\sigma }^2\Omega ^1$. \begin{corollary} \label{fock}We have the unitary isomorphism \begin{equation}\notag {\cal I}:L_{\pi _\sigma }^2\Omega ^1\rightarrow \operatorname{Exp}\left( L^2(X;\sigma )\right) \otimes L_\sigma ^2\Omega ^1(X) \end{equation} given by \begin{equation}\notag {\cal I}=\left( U\otimes {\bf 1}\right) I^1, \end{equation} where $U$ is the unitary isomorphism between the Poisson space $L_{\pi _\sigma }^2(\Gamma _X)$ and the symmetric Fock space $\operatorname{Exp}\left( L^2(X;\sigma )\right) $\rom, see e\rom.g\rom{.\ \cite{AKR1}.} \end{corollary} \section{Dirichlet operators on differential forms over configuration spaces\label{dodf}} In this section, we introduce Dirichlet operators associated with the Poisson measure on $\Gamma _X$ which act in the space $L^2_{\pi_\sigma} \Omega^1$. These operators generalize the notions of Bochner and de Rham--Witten Laplacians on finite dimensional manifolds. In the two first subsections, we recall some known facts and definitions concerning Dirichlet operators of Poisson measures on configuration spaces and Laplace operators on differential forms over finite-dimensional manifolds. \subsection{The intrinsic Dirichlet operator on functions} In this subsection, we recall some theorems from \cite{AKR1} which concern the intrinsic Dirichlet operator in the space $L^2_{\pi_\sigma} (\Gamma_X)$, to be used later. Let us recall that the logarithmic derivative of the measure $\sigma $ is given by the vector field \begin{equation}\notag X\ni x\mapsto \beta _\sigma (x):=\frac{\nabla ^X\rho (x)}{\rho (x)}\in T_xX \end{equation} (where as usual $\beta _\sigma :=0$ on $\{\rho =0\}$). We wish now to define the notion of logarithmic derivative of the Poisson measure, and for this we need a generalization of the notion of vector field. For each $\gamma \in \Gamma _X$, consider the triple \begin{equation}\notag T_{\gamma ,\,\infty }\Gamma _X\supset T_\gamma \Gamma _X\supset T_{\gamma ,0}\Gamma _X. \end{equation} Here, $T_{\gamma ,0}\Gamma _X$ consists of all finite sequences from $% T_\gamma \Gamma _X$, and $T_{\gamma ,\,\infty }\Gamma _X% :=% \left( T_{\gamma ,0}\Gamma _X\right) ^{\prime }$ is the dual space, which consists of all sequences $V(\gamma )=(V(\gamma )_x)_{x\in \gamma }$, where $% V(\gamma )_x\in T_xX$. The pairing between any $V(\gamma )\in T_{\gamma ,\,\infty }\Gamma _X$ and $v(\gamma )\in T_{\gamma ,0}\Gamma _X$ with respect to the zero space $T_\gamma \Gamma _x$ is given by \begin{equation}\notag \langle V(\gamma ),v(\gamma )\rangle _\gamma =\sum_{x\in \gamma }\langle V(\gamma )_x,v(\gamma )_x\rangle _x \end{equation} (the series is, in fact, finite). From now on, under a vector field over $% \Gamma _X$ we will understand mappings of the form $\Gamma _X\ni \gamma \mapsto V(\gamma )\in T_{\gamma ,\infty }\Gamma _X$. The logarithmic derivative of the Poisson measure $\pi _\sigma $ is defined as the vector field \begin{equation} \Gamma _X\ni \gamma \mapsto B_{\pi _\sigma }(\gamma )=(\beta _\sigma (x))_{x\in \gamma }\in T_{\gamma ,\infty }\Gamma _X \end{equation} (i.e., the logarithmic derivative of the Poisson measure is the lifting of the logarithmic derivative of the underlying measure). The following theorem is a version of Theorem~3.1 in \cite{AKR1} (for more general classes of functions and vector fields). \begin{theorem}[Integration by parts formula on the Poisson space] \label{th-ibp}$\text{}$\newline For arbitrary $F^{(1)},F^{(2)}\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ and a smooth cylinder vector field $V\in {\cal FV}(\Gamma_X)$ $(:= {\cal F}\Omega^1)$\rom, we have \begin{gather*} \int_{\Gamma _X}\nabla _V^\Gamma F^{(1)}(\gamma )F^{(2)}(\gamma )\,\pi _\sigma (d\gamma )=-\int_{\Gamma _X}F^{(1)}(\gamma )\nabla _V^\Gamma F^{(2)}(\gamma )\,\pi _\sigma (d\gamma ) \\ \text{} -\int_{\Gamma _X}F^{(1)}(\gamma )F^{(2)}(\gamma )\big[ \left\langle B_{\pi _\sigma }(\gamma ),V(\gamma )\right\rangle _\gamma +\operatorname{div}^\Gamma V(\gamma )\big] \,\pi _\sigma (d\gamma ), \end{gather*} where the divergence $\operatorname{div}^\Gamma V(\gamma )$ of the vector field $V$ is given by \begin{gather*} \operatorname{div}V(\gamma )=\sum_{x\in \gamma }\operatorname{div}_x^XV(\gamma )=\langle \operatorname{div}_{\bullet }^XV(\gamma ),\gamma \rangle , \\ \operatorname{div}_x^XV(\gamma )% :=% \operatorname{div}^XV_x(\gamma ,x),\qquad x\in \gamma , \end{gather*} $\operatorname{div}^X$ denoting the divergence on $X$ with respect to the volume measure $m.$ \end{theorem} \noindent {\it Proof}. The theorem follows from formula (\ref{3.1}) and the usual integration by parts formula on the space $L^2(\Lambda ^n,\sigma ^{\otimes n})$ (see also the proof of Theorem~\ref{th4.1} below).\quad $\blacksquare $ \vspace{2mm} Following \cite{AKR1}, we consider the intrinsic pre-Dirichlet form on the Poisson space \begin{equation} {\cal E}_{\pi _\sigma }(F^{(1)},F^{(2)})=\int_{\Gamma _X}\langle \nabla ^\Gamma F^{(1)}(\gamma ),\nabla ^\Gamma F^{(2)}(\gamma )\rangle _\gamma \,\pi _\sigma (d\gamma ) \label{3.2} \end{equation} with domain $D({\cal E}_{\pi _\sigma }):={\cal FC}_{\mathrm b}^\infty (\Gamma _X)$. By using the fact that the measure $\pi _\sigma $ has all moments finite, one can show that the expression (\ref{3.2}) is well-defined. Let $H_\sigma $ denote the Dirichlet operator in the space $L^2(X;\sigma )$ associated to the pre-Dirichlet form \begin{equation}\notag {\cal E}_\sigma (\varphi ,\psi )=\int_X\langle \nabla ^X\varphi (x),\nabla ^X\psi (x)\rangle _x\,\sigma (dx),\qquad \varphi ,\psi \in {\cal D}. \end{equation} This operator acts as follows: \begin{equation}\notag H_\sigma \varphi (x)=-\Delta ^X\varphi (x)-\langle \beta _\sigma (x),\nabla ^X\varphi (x)\rangle _x,\qquad \varphi\in{\cal D}, \end{equation} where $\Delta ^X:=\operatorname{div}^X\nabla ^X$ is the Laplace--Beltrami operator on $X$. Then, by using Theorem~\ref{th-ibp}, one gets \begin{equation} {\cal E}_{\pi _\sigma }(F^{(1)},F^{(2)})=\int_{\Gamma _X}H_{\pi _\sigma }F^{(1)}(\gamma )F^{(2)}(\gamma )\,\pi _\sigma (d\gamma ),\qquad F^{(1)},F^{(2)}\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X). \label{sukk} \end{equation} Here, the intrinsic Dirichlet operator $H_{\pi _\sigma }$ is given by \begin{align} H_{\pi _\sigma }F(\gamma ) :&=% \sum_{x\in \gamma }H_{\sigma ,x}F(\gamma )\equiv \langle H_{\sigma ,\bullet }F(\gamma ),\gamma \rangle , \notag \\ H_{\sigma ,x}F(\gamma ) % :&=% H_\sigma F_x(\gamma ,x),\qquad x\in \gamma , \label{dir-op1} \end{align} so that the operator $H_{\pi _\sigma }$ is the lifting to $L^1(\Gamma _X;\pi _\sigma )$ of the operator $H_\sigma $ in $L^2(X;\sigma )$. Upon (\ref{sukk}), the pre-Dirichlet form ${\cal E}_{\pi _\sigma }$ is closable, and we preserve the notation for the closure of this form. \begin{theorem} \label{th3.2}\rom{\cite{AKR1}} Suppose that $(H_\sigma ,{\cal D})$ is essentially self-adjoint on $L^2(X;\sigma )$\rom. Then\rom, the operator $H_{\pi _\sigma }$ is essentially self-adjoint on ${\cal FC}_{\mathrm b}^\infty (\Gamma _X).$ \end{theorem} \begin{remark} \label{proof}\rom{This theorem was proved in \cite{AKR1}, Theorem~5.3. (We have already mentioned in Remark~\ref{rem2.1} that the inclusion ${\cal FC}% _{\mathrm b}^\infty ({\cal D},\Gamma _X)\subset {\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ holds.) We would like to stress that this result is based on the theorem which says that the image of the operator $H_{\pi _\sigma }$ under the isomorphism $U$ between the Poisson space and the Fock space $\operatorname{Exp% }\left( L^2(X;\sigma )\right) $over $L^2(X;\sigma )$ is the differential second quantization $d\operatorname{Exp}H_\sigma $ of the operator $H_\sigma $. }\end{remark} \begin{remark}\rom{ In what follows, we will always assume that the conditions of the theorem are satisfied. It is true e.g.\ in the case where $\| \beta _\sigma \| _{TX}\in L_{\mathrm loc}^p(X;\sigma )$ for some $p>\dim \,X,$ see \cite {AKR1}. }\end{remark} Finally, we mention the important fact that the diffusion process which is properly associated with the Dirichlet form $({\cal E}_{\pi _\sigma },D(% {\cal E}_{\pi _\sigma }))$ is the usual independent infinite particle process (or distorted Brownian motion), cf. \cite{AKR1}. \subsection{Laplacians on differential forms over finite-dimensional\\ manifolds} We recall now some facts on the Bochner and de Rham--Witten Laplacians on differential forms over a finite-dimensional manifold. Let $M$ be a Riemannian manifold equipped with the measure $\mu (dx)=e^{\phi (x)}dx,$ $dx$ being the volume measure and $\phi $ a $C_{\mathrm b}^2$-function on $M$. We consider a Hilbert bundle \begin{equation}\notag {\cal H}_x\mapsto x\in M \end{equation} over $M$ equipped with a smooth connection, and denote by $\nabla $ the corresponding covariant derivative in the spaces of sections of this bundle. Let $L^2(M\rightarrow {\cal H};\mu )$ be the space of $\mu $-square integrable sections. The operator \begin{equation}\notag H_\mu ^B% :=% \nabla _\mu ^{*}\nabla \end{equation} in $L^2(M\rightarrow {\cal H};\mu ),$ where $\nabla _\mu ^{*}$ is the adjoint of $\nabla $, will be called the Bochner Laplacian associated with the measure $\mu $. Differentiability of $\mu $ implies that $\nabla _\mu ^{*}\nabla $ is a uniquely defined self-adjoint operator. One can easily write the corresponding differential expression on the space of twice differentiable sections. In the case where $\phi \equiv 0$ and ${\cal H}% _x=\wedge ^n(T_xM)$, we obtain the classical Bochner Laplacian on differential forms (see \cite{CFKSi}). Now, let $d$ be the exterior differential in spaces of differential forms over $M.$ The operator \begin{equation}\notag H_\mu ^R% :=% d_\mu ^{*}d+dd_\mu ^{*} \end{equation} acting in the space of $\mu $-square integrable forms, where $d_\mu ^{*}$ is the adjoint of $d$, will be called the de Rham Laplacian associated with the measure $\mu $ (or the Witten Laplacian associated with $\phi $, see \cite {CFKSi}). The relation of the Bochner and de Rham--Witten Laplacians on differential forms is given by the Weitzenb\"{o}ck formula, which in the case of 1-forms has the following form (see\cite{CFKSi}, \cite{E3}): \begin{equation}\notag H_\mu ^Ru(x)=H_\mu ^Bu(x)+R_\mu (x)u(x), \end{equation} where \begin{equation} R_\mu (x)% :=% R(x)-\nabla ^X\beta _\mu (x). \label{weitz} \end{equation} Here, $R(x)\in {\cal L}(T_xM)$ is the usual Weitzenb\"{o}ck correction term: \begin{equation}\notag R(x)% :=% \sum_{i,j=1}^{\dim M}\operatorname{Ric}_{ij}(x)a_i^{*}a_j, \end{equation} where $\operatorname{Ric}$ is the Ricci tensor on $X$, and $a_i^{*}$ and $a_j$ are the creation and annihilation operators, respectively. \subsection{Bochner Laplacian on 1-forms over the Poisson space} Let us consider the pre-Dirichlet form \begin{equation} {\cal E}_{\pi _\sigma }^B(W^{(1)},W^{(2)})=\int_{\Gamma _X}\langle \nabla ^\Gamma W^{(1)}(\gamma ),\nabla ^\Gamma W^{(2)}(\gamma )\rangle _{T_\gamma \Gamma _X\otimes T_\gamma \Gamma _X}\,\pi _\sigma (d\gamma ), \label{4.2} \end{equation} where $W^{(1)},W^{(2)}\in {\cal F}\Omega ^1$. Again using the fact that $\pi _\sigma $ has finite moments, one shows that the function under the sign of integral in (\ref{4.2}) is integrable with respect to $\pi _\sigma $. \begin{theorem} \label{th4.1} For any $W^{(1)},W^{(2)}\in {\cal F}\Omega ^1$\rom, we have \begin{equation}\notag {\cal E}_{\pi _\sigma }^B(W^{(1)},W^{(2)})=\int_{\Gamma _X}\left\langle H_{\pi _\sigma }^BW^{(1)}(\gamma ),W^{(2)}(\gamma )\right\rangle _{T_\gamma \Gamma _X}\,\pi _\sigma (d\gamma ), \end{equation} where $H_{\pi _\sigma }^B$ is the operator in the space $L_{\pi _\sigma }^2\Omega ^1$ given by \begin{equation} H_{\pi _\sigma }^BW=-\Delta ^\Gamma W-\left\langle \nabla ^\Gamma W,B_{\pi _\sigma }(\gamma )\right\rangle _\gamma ,\qquad W\in {\cal F}\Omega ^1. \label{boch1} \end{equation} Here\rom, \begin{equation} \Delta ^\Gamma W(\gamma )% :=% \sum_{x\in \gamma }\Delta _x^XW(\gamma )\equiv \left\langle \Delta _{\bullet }^\Gamma W(\gamma ),\gamma \right\rangle , \label{boch2} \end{equation} where $\Delta _x^X$ is the Bochner Laplacian of the bundle $T_{\gamma _y}\Gamma _X\mapsto y\in {\cal O}_{\gamma ,x}$ with the volume measure\rom. \end{theorem} \noindent {\it Proof}. Let us fix $W^{(1)},W^{(2)}\in {\cal F}\Omega ^1$. Let $\Lambda $ be an open bounded set in $X$ such that $\Lambda (W^{(1)})\subset \Lambda$, $\Lambda (W^{(2)})\subset \Lambda $. Then, by using (\ref{3.1}), \begin{gather*} \int_{\Gamma _X}\langle \nabla ^\Gamma W^{(1)}(\gamma ),\nabla ^\Gamma W^{(2)}\rangle _{T_\gamma \Gamma _X\otimes T_\gamma \Gamma _X}\,\pi _\sigma (d\gamma ) = \\ =e^{-\sigma (\Lambda )}\sum_{k=0}^\infty \frac 1{k!}\int_{\Lambda ^k}\sum_{i=1}^k\langle \nabla _{x_i}^XW^{(1)}(\{x_1,\dots ,x_k\}) , \\ \nabla _{x_i}^XW^{(2)}(\{x_1,\dots ,x_k\})\rangle _{T_{x_i}X\otimes T_{\{x_1,\dots ,x_k\}}\Gamma _X}\,\sigma (dx_1)\dotsm\sigma (dx_k) \\ =e^{-\sigma (\Lambda )}\sum_{k=0}^\infty \frac 1{k!}\sum_{i=1}^k\int_{\Lambda ^k}\langle \Delta _{x_i}^XW^{(1)}(\{x_1,\dots ,x_k\}) \\\text{}+ \langle \nabla _{x_i}^XW^{(1)}(\{x_1,\dots ,x_k\}),\beta _\sigma (x_i)\rangle _{T_{x_i}X}, \\ W^{(2)}(\{x_1,\dots ,x_k\})\rangle _{T_{\{x_1,\dots ,x_k\}}\Gamma _X}\,\sigma (dx_1)\dotsm\sigma (dx_k) \\ =\int_{\Gamma _X}\langle H_{\pi _\sigma }^BW^{(1)}(\gamma ),W^{(2)}(\gamma )\rangle _{T_\gamma \Gamma _X}\,\pi _\sigma (d\gamma ). \quad \blacksquare \end{gather*} \begin{remark} \label{rem4.1}\rom{We can rewrite the action of the operator $H_{\pi _\sigma }^B$ in the two following forms: \begin{enumerate} \item[1)] We have from (\ref{boch1}) and (\ref{boch2}) that \begin{equation} H_{\pi _\sigma }^BW(\gamma )=\sum_{x\in \gamma }H_{\sigma ,x}^BW(\gamma )\equiv \left\langle H_{\sigma ,\bullet }^BW(\gamma ),\gamma \right\rangle ,\qquad W(\gamma )\in {\cal F}\Omega ^1, \label{blo1} \end{equation} where \begin{equation} H_{\sigma ,x}^BW(\gamma ):=-\Delta _x^XW(\gamma )-\left\langle \nabla _x^XW(\gamma ),\beta _\sigma (x)\right\rangle _x. \label{blo2} \end{equation} Thus, the operator $H_{\sigma ,x}^B$ is the lifting of the Bochner Laplacian on $X$ with the measure $\sigma .$ \item[2)] As easily seen, the operator $H_{\pi _\sigma }^B$ preserves the space ${\cal F}\Omega ^1$, and we can always take $\Lambda (H_{\pi _\sigma }^BW)=\Lambda (W)$. Then for any open bounded $\Lambda \supset \Lambda (W)$ \begin{equation} (H_{\pi _\sigma }^BW)_{\Lambda ,\gamma }=H_{\sigma ,\Lambda \cap \gamma }^BW_{\Lambda ,\gamma }, \label{cyl-boch} \end{equation} where $H_{\sigma ,\Lambda \cap \gamma }^B$ is the Bochner Laplacian of the manifold $X^{\Lambda \cap \gamma }% :=% \times _{x\in \Lambda \cap \gamma }X_x$, $X_x\equiv X$, with the product measure $\sigma ^{\Lambda \cap \gamma }% :=% \otimes _{x\in \Lambda \cap \gamma }\sigma _x$, $\sigma _x\equiv \sigma $ (cf. (\ref{cyl-form})). \end{enumerate} }\end{remark} It follows from Theorem~\ref{th4.1} that the pre-Dirichlet form ${\cal E}% _{\pi _\sigma }^B$ is closable in the space $L_{\pi _\sigma }^2\Omega ^1$. The generator of its closure (being actually the Friedrichs extension of the operator $H_{\pi _\sigma }^B$, for which we will use the same notation) will be called the Bochner Laplacian on 1-forms over $\Gamma _X$ corresponding to the Poisson measure $% \pi _\sigma $. For operators $A$ and $B$ acting in Hilbert spaces ${\cal H}$ and ${\cal K}$, respectively, we introduce the operator $A\boxplus B$ in ${\cal H\otimes K}$ by \begin{equation}\notag A\boxplus B% :=% A\otimes {\bf 1}+{\bf 1}\otimes B. \end{equation} \begin{proposition} \rom{1)} On ${\cal D}\Omega ^1$ we have \begin{equation} I^1\,H_{\pi _\sigma }^B=\left( H_{\pi _\sigma }\boxplus H_\sigma ^B\right) \,I^1. \label{dec-gen0} \end{equation} \rom{2)} ${\cal D}\Omega ^1$ is a domain of essential self-adjointness of $% H_{\pi _\sigma }^B.$ \end{proposition} \noindent {\it Proof}. 1) Let $W\in {\cal D}\Omega ^1.$ Then, for some $F\in {\cal FC}_{\mathrm b}^\infty (\Gamma _X)$, $\omega\in \Omega _0^1(X)$, and any $\gamma \in \Gamma _X$, $x,z\in \gamma $, $y\in {\cal O}_x$, we have \begin{equation}\notag W_x(\gamma ,y)_z=\begin{cases} F((\gamma \setminus\{x,z\})\cup \{y\})\omega(z),& z\ne y, \\ F(\gamma \setminus \{x\})\omega(y),&z=y\end{cases} \end{equation} Thus \begin{equation}\notag H_{\sigma ,x}^BW(\gamma )_z=\begin{cases} H_{\sigma ,x}F(\gamma \backslash \{z\})\omega(z),&z\ne x, \\ F(\gamma \setminus \{z\})H_\sigma ^B\omega(z),& z=x. \end{cases} \end{equation} Formula (\ref{dec-gen0}) follows now from (\ref{blo1}) and (\ref{isom2}). 2) The statement follows from (\ref{dec-gen0}) and the essential self-adjointness of $H_{\pi _\sigma }$ on ${\cal FC}_{\mathrm b}^\infty (\Gamma _X)$ (Theorem~\ref{th3.2}) and $H_\sigma ^B$ on $\Omega _0^1(X)$ (the latter fact can be shown by standard methods similar to \cite{E2}, \cite{E3}) by the theory of operators admitting separation of variables \cite[Ch.6]{B}.\quad $ \blacksquare $\vspace{2mm} We give also a Fock space representation of the operator $H_{\pi _\sigma }^B $. Corollary~\ref{fock} implies the following \begin{corollary} We have \begin{equation}\notag {\cal I}H_{\pi _\sigma }^B{\cal I}^{-1}=d\operatorname{Exp}H_\sigma \boxplus H_\sigma ^B, \end{equation} cf\rom.\ Remark~\rom{ \ref{proof}.} \end{corollary} \subsection{De Rham Laplacian on 1-forms over the Poisson space} We define the linear operator \begin{equation}\notag d^\Gamma :{\cal F}\Omega ^1\to {\cal F}\Omega ^2 \end{equation} by \begin{equation} (d^\Gamma W)(\gamma ):=\sqrt{2}\,\operatorname{AS}(\nabla _x^XW(\gamma )), \label{5.1} \end{equation} where $\operatorname{AS}:(T_\gamma \Gamma _X)^{\otimes 2}\to (T_\gamma \Gamma _X)^{\wedge 2} $ is the antisymmetrization operator. It follows from this definition that \begin{equation} (d^\Gamma W)(\gamma )=\sum_{x\in \gamma }(d_x^XW)(\gamma ), \label{5.2} \end{equation} where \begin{align} (d_x^XW)(\gamma ) % :&=% \sum_{y\in \gamma }d^X(W_x(\gamma ,x)_y) \notag \\ &=\sum_{y\in \gamma }\sqrt{2}\,\operatorname{AS}(\nabla ^XW_x(\gamma ,x)_y) \label{5.3} \end{align} with $\operatorname{AS}:T_xX\otimes T_yX\to T_xX\wedge T_yX$ being again the antisymmetrization. This implies that we have indeed the inclusion $d^\Gamma W\in {\cal F}\Omega ^2$ for each $W\in {\cal F}\Omega ^1$. Suppose that, for $W\in{\cal F}\Omega^1$, $\gamma\in\gamma_X$, and $x,y\in\gamma$, the 1-form $W_x(\gamma,\bullet)_y$ has, in local coordinates on the manifold $X$, the following form: \begin{equation}\label{kuku} W_x(\gamma,\bullet)_y=u(\bullet)h,\qquad u\colon{\cal O}_{\gamma,x} \to{\Bbb R},\ h\in T_y.\end{equation} Then, we have \begin{equation}\label{krik} \operatorname{AS}(\nabla^XW_x(\gamma,x)_y)=\nabla^X u(x)\wedge h, \end{equation} which, upon \eqref{5.3}, describes the action of $d^X_x$. Let us consider $d^\Gamma $ as an operator acting from the space $L_{\pi _\sigma }^2\Omega ^1$ into $L_{\pi _\sigma }^2\Omega ^2$. Analogously to the proof of Theorem~\ref{th4.1}, we get the following formula for the adjoint operator $( d_{\pi _\sigma }^\Gamma ) ^{*}$ restricted to ${\cal F% }\Omega ^2$: \begin{equation} ( d_{\pi _\sigma }^\Gamma ) ^{*}W(\gamma )=\sum_{x\in \gamma }( d_{\sigma ,x}^X) ^{*}W(\gamma ),\qquad W\in{\cal F}\Omega^2, \label{5.4} \end{equation} where \begin{equation} ( d_{\sigma ,x}^X) ^{*}W(\gamma )=\sum_{y\in \gamma }( d_{\sigma ,x}^X) ^{*}W_x(\gamma ,x)_{[x,y]}. \label{5.5} \end{equation} Suppose that, in local coordinates on the manifold $X$, the form $W_x(\bullet ,\bullet )_{[x,y]}$ has the representation \begin{equation} W_x(\gamma ,\bullet)_{[x,y]}=w(\bullet)h_1\wedge h_2,\qquad w\colon {\cal O}_{\gamma,x}\to{\Bbb R},\ h_1\in T_xX,\ h_2\in T_yX. \label{loc-form} \end{equation} Then, taking to notice \eqref{krik}, one concludes that \begin{multline} ( d_{\sigma ,x}^X) ^{*}(W_x(\gamma ,x)_{[x,y]}) = -\frac 1{\sqrt{2}}\left[ \left( \langle \nabla ^Xw(x),h_1\rangle _x+w(x)\langle \beta _\sigma (x),h_1\rangle _x\right) h_2\right. \\ \text{}-\left. \delta _{x,y}\left( \langle \nabla ^Xw(x),h_2\rangle _x+w(x)\langle \beta _\sigma (x),h_2\rangle _x\right) h_1\right]. \label{5.6} \end{multline} Here, \begin{equation}\notag \delta _{x,y}= \begin{cases} 1,&\text{if } x=y, \\ 0,&\text{otherwise.} \end{cases} \end{equation} In what follows, we will suppose for simplicity that the function $\rho $ is infinite differentiable on $X$ and $\rho (x)>0$ for all $x\in X$. Then, by (% \ref{5.4})--(\ref{5.6}) \begin{equation}\notag ( d_{\pi _\sigma }^\Gamma ) ^{*}:{\cal F}\Omega ^2\to {\cal F}% \Omega ^1. \end{equation} We set also \begin{equation}\label{chc}d^\Gamma:{\cal FC}_{\mathrm b}^\infty(\Gamma_X)\to{\cal F}\Omega^1,\qquad d^\Gamma:=\nabla^\Gamma. \end{equation} Evidently, the restriction to ${\cal F}\Omega^1$ of the adjoint of $d^\Gamma$ considered as an operator acting from $L^2_{\pi_\sigma}(\Gamma_X)$ into $L^2_{\pi_\sigma}\Omega^1$ is given by \begin{equation}\label{lao}(d_{\pi_\sigma}^\Gamma)^{*}: {\cal F}\Omega^1\to {\cal FC}^\infty_{\mathrm b}(\Gamma_X),\qquad (d_{\pi_\sigma}^\Gamma)^{*}V(\gamma)=-\operatorname{div}^\Gamma V(\gamma)- \langle V(\gamma),B_{\pi_\sigma}(\gamma)\rangle_\gamma.\end{equation} For $n\in {\Bbb N}$, we define the pre-Dirichlet form ${\cal E}_{\pi _\sigma }^R$ by \begin{multline*} {\cal E}_{\pi _\sigma }^R(W^{(1)},W^{(2)}) % :=% \int_{\Gamma _X}\big[ \langle d^\Gamma W^{(1)}(\gamma ),d^\Gamma W^{(2)}(\gamma )\rangle _{\wedge ^2(T_\gamma \Gamma _X)} \\ \text{}+\langle ( d_{\pi _\sigma }^\Gamma ) ^{*}W^{(1)}(\gamma ),( d_{\pi _\sigma }^\Gamma ) ^{*}W^{(2)}(\gamma )\rangle _{T_\gamma \Gamma _X}\big] \,\pi _\sigma (d\gamma ), \end{multline*} where $W^{(1)},W^{(2)}\in D({\cal E}^R_{\pi_\sigma}):={\cal F}\Omega ^1$. The next theorem follows easily from (\ref{5.1})--(\ref{lao}). \begin{theorem} \label{th5.1} For any $W^{(1)},W^{(2)}\in {\cal F}\Omega ^1$\rom, we have \begin{equation}\notag {\cal E}_{\pi _\sigma }^R(W^{(1)},W^{(2)})=\int_{\Gamma _X}\langle H_{\pi _\sigma }^RW(\gamma )^{(1)}(\gamma ),W(\gamma )^{(2)}\rangle _\gamma \,\pi _\sigma (d\gamma ). \end{equation} Here, \begin{equation}\notag H_{\pi _\sigma }^R% :=% d^\Gamma ( d_{\pi _\sigma }^\Gamma ) ^{*}+( d_{\pi _\sigma }^\Gamma ) ^{*}d^\Gamma ,\qquad D(H_{\pi _\sigma }^R):={\cal F}\Omega ^1, \end{equation} is an operator in the space $L_{\pi _\sigma }^2\Omega ^1$\rom. It can be represented as follows\rom: \begin{equation} H_{\pi _\sigma }^RW(\gamma )=\sum_{x\in \gamma }H_{\sigma ,x}^RW(\gamma )\equiv \left\langle H_{\sigma ,\bullet }^R\,W(\gamma ),\gamma \right\rangle , \label{5.7} \end{equation} where \begin{equation} H_{\sigma ,x}^R=d_x^X( d_{\sigma ,x}^X) ^{*}+( d_{\sigma ,x}^X) ^{*}d_x^X. \label{5.8} \end{equation} \end{theorem} From Theorem~\ref{th5.1} we conclude that the pre-Dirichlet form ${\cal E}% _{\pi _\sigma }^R$ is closable in the space $L_{\pi _\sigma }^2\Omega ^1$. The generator of its closure (being actually the Friedrichs extension of the operator $H_{\pi _\sigma }^R$, for which we will use the same notation) will be called the de Rham Laplacian on $\Gamma _X$ corresponding to the Poisson measure $\pi _\sigma $. By (\ref{5.7}) and (% \ref{5.8}), $H_{\pi _\sigma }^R$ is the lifting of the de Rham Laplacian on $% X$ with measure $\sigma $. \begin{remark}\rom{ Similarly to (\ref{cyl-boch}), the operator $H_{\pi _\sigma }^B$ preserves the space ${\cal F}\Omega ^1$, and we can always take $\Lambda (H_{\pi _\sigma }^BW)=\Lambda (W)$. Then for any open bounded $\Lambda \supset \Lambda (W)$, we have \begin{equation} (H_{\pi _\sigma }^RW)_{\Lambda ,\gamma }=H_{\sigma ,\Lambda \cap \gamma }^RW_{\Lambda ,\gamma }, \label{cyl-der} \end{equation} where $H_{\sigma ,\Lambda \cap \gamma }^R$ is the de Rham Laplacian of the manifold $X^{\Lambda \cap \gamma }$ with the product measure $\sigma ^{\Lambda \cap \gamma }$.} \end{remark} \begin{proposition} \rom{1)} On ${\cal D}\Omega ^1$ we have \begin{equation} I^1\,H_{\pi _\sigma }^R=\left( H_{\pi _\sigma }\boxplus H_\sigma ^R\right) \,I^1. \label{dec-gen2} \end{equation} \rom{2)} ${\cal D}\Omega ^1$ is a domain of essential self-adjointness of $H_{\pi _\sigma }^R.$ \end{proposition} \noindent {\it Proof}. 1) The proof is similar to that of (\ref{dec-gen0}). It is only necessary to note that, for a ``constant'' 1-form $W$ such that $W(\gamma )_x=\nu (x)$, we have evidently $\left( H_{\sigma ,x}^Rw(\gamma )\right) _x=H_\sigma ^R\omega (x)$. 2) The proof is similar to that of the corresponding statement for the Bochner Laplacian $H_{\pi _\sigma }^B$.\quad $\blacksquare $ \begin{remark}\rom{ By similar methods, one can define Bochner and de Rham Laplacians on $n$% -forms over $\Gamma _X$. An extension to this case of formulas (\ref {dec-gen0}) and (\ref{dec-gen2}) will have, however, a more complicated form. }\end{remark} \subsection{Weitzenb\"{o}ck formula on the Poisson space} In this section, we will derive a generalization of the Weitzenb\"{o}ck formula to the case of the Poisson measure on the configuration space. In other words, we will derive a formula which gives a relation between the Bochner and de Rham Laplacians. We assume that the Weitzenb\"{o}ck correction term $R_\sigma (x)\in {\cal L}(T_xX)$ (cf.\ (\ref{weitz})) is bounded uniformly in $x\in X$. Given an operator field \begin{equation} X\ni x\mapsto J(x)\in {\cal L}(T_xX) \label{op-pot} \end{equation} on $X$ (with $J(x)$ bounded uniformly in $x\in X$), we define the ``diagonal'' operator field \begin{equation} \Gamma _X\ni \gamma \mapsto {\bf J}(\gamma )\in {\cal L}(T_\gamma \Gamma _X), \label{op-field} \end{equation} using the decomposition (\ref{tg-sp1}). Thus, we can define the operator field ${\bf R}_\sigma (\gamma )$. \begin{theorem}[Weitzenb\"ock formula on the Poisson space] We have\rom, for each $W\in {\cal F}\Omega ^1,$% \begin{equation} H_{\pi _\sigma }^RW(\gamma )=H_{\pi _\sigma }^BW(\gamma )+{\bf R}_\sigma (\gamma )W(\gamma ). \label{6.2} \end{equation} \end{theorem} \noindent {\it Proof}. Let us fix $W\in {\cal F}\Omega ^1$ and $\gamma \in \Gamma _X$. Let $\Lambda \subset X$ be an open bounded set such that $\Lambda \supset \Lambda (W)$ (cf.\ Definition~\ref{def2.2}), and let ${\cal O}_{\gamma ,x_1}\times \dots \times {\cal O}_{\gamma ,x_k}$ and $W_{\Lambda ,\gamma }$ be as in Remark \ref{form-fin}. We have then, according to (\ref{cyl-boch}) and (\ref{cyl-der}), \begin{align*} (H_{\pi _\sigma }^BW)_{\Lambda ,\gamma } &=H_{\sigma ,\Lambda \cap \gamma }^BW_{\Lambda ,\gamma }, \\ (H_{\pi _\sigma }^RW)_{\Lambda ,\gamma } &=H_{\sigma ,\Lambda \cap \gamma }^RW_{\Lambda ,\gamma }, \end{align*} and the Weitzenb\"{o}ck formula for the manifold $X^{\Lambda \cap \gamma }$ and the measure $\sigma ^{\Lambda \cap \gamma }$ implies that \[ H_{\sigma ,\Lambda \cap \gamma }^RW_{\Lambda ,\gamma }(y_1,\dots,y_k)=H_{\sigma ,\Lambda \cap \gamma }^BW_{\Lambda ,\gamma }(y_1,\dots,y_k)+R(y_1,\dots,y_k)W_{\Lambda ,\gamma }(y_1,\dots,y_k), \] where the correction term $R(y_1,\dots,y_k)\in {\cal L}(T_{(y_1,\dots,y_k)}X^{% \Lambda \cap \gamma })$ is equal to the restriction of ${\bf R}_\sigma (\gamma )$ to the space $T_{(y_1,\dots,y_k)}X^{\Lambda \cap \gamma }$ (considered as a subspace of $T_\gamma \Gamma _X$), which is well-defined because of the ``diagonal'' character of ${\bf R}_\sigma (\gamma )$. It is now enough to remark that the forms $H_{\pi _\sigma }^RW$ and $H_{\pi _\sigma }^BW$ are completely defined by the corresponding forms $(H_{\pi _\sigma }^RW)_{\Lambda ,\gamma }$ and $(H_{\pi _\sigma }^BW)_{\Lambda ,\gamma }$, respectively\quad $\blacksquare $ We can give also an intrinsic description of the correction term ${\bf R}% _\sigma ^n(\gamma )$. To this end, for each fixed $\gamma \in \Gamma _X$, we define the operator $R(\gamma )\colon T_{\gamma,0}\Gamma_X\to T_{\gamma,0}\Gamma_X$ as follows: \begin{gather} R(\gamma ) % :=% \sum_{x\in \gamma }R(\gamma ,x),% \nonumber \\ \label{corr} R(\gamma,x)(V(\gamma)_y):=\delta_{x,y}\sum_{i,j=1}^d\operatorname{Ric}_{ij}(x)e_i\,\langle V(\gamma)_x,e_j\rangle_x,\qquad V(\gamma)\in T_{\gamma,0} \Gamma_X. \end{gather} Here, $\{e_j\}_{j=1}^d$ is again a fixed orthonormal basis in the space $% T_xX $ considered as a subspace of $T_\gamma \Gamma _X$. Next, we note that \begin{align*} \nabla ^\Gamma B_{\pi _\sigma }(\gamma ) &=(\nabla _x^XB_{\pi _\sigma }(\gamma ))_{x\in \gamma }=(\nabla _x^X(B_{\pi _\sigma }(\gamma )_y))_{x,y\in \gamma } \nonumber \\ &=(\delta _{x,y}\nabla ^X\beta _\sigma (y))_{x,y\in \gamma }\in (T_{\gamma ,\infty }\Gamma _X)^{\otimes 2}. \end{align*} Hence, for any $V(\gamma )\in T_{\gamma ,0}\Gamma _X$, \begin{align} \nabla _V^\Gamma B_{\pi _\sigma }(\gamma ) % :&=% \langle \nabla ^\Gamma B_{\pi _\sigma }(\gamma ),V(\gamma )\rangle _\gamma \nonumber \\ &=\bigg( \sum_{y\in \gamma }\delta _{x,y}\langle \nabla ^X\beta _\sigma (y),V(\gamma )_y\rangle _y\bigg) _{x\in \gamma } \notag\\ &=\left( \langle \nabla ^X\beta _\sigma (x),V(\gamma )_x\rangle _x\right) _{x\in \gamma }\in T_{\gamma ,0}\Gamma _X. \label{sdlog} \end{align} Thus, $\nabla ^\Gamma B_{\pi _\sigma }(\gamma )$ determines the linear operator in $T_{\gamma ,0}\Gamma _X$ given by \begin{equation}\notag T_{\gamma ,0}\Gamma _X\ni V(\gamma )\mapsto \nabla ^\Gamma B_{\pi _\sigma }(\gamma )V(\gamma )% :=% \nabla _V^\Gamma B_{\pi _\sigma }(\gamma )\in T_{\gamma ,0}\Gamma _X. \end{equation} \begin{proposition} We have \begin{equation}\notag {\bf R}_\sigma (\gamma )W(\gamma )=R(\gamma )W(\gamma )-\nabla ^\Gamma B_{\pi _\sigma }(\gamma )W(\gamma ). \end{equation} \end{proposition} \noindent {\it Proof}. The proposition is derived from the definition of $% {\bf R}_\sigma $ and formulas (\ref{sdlog}) and (\ref{corr}).\quad $\blacksquare $ \section{Probabilistic representations of the Bochner and de Rham Laplacians} Let $\xi _x(t)$ be the Brownian motion with the drift $\beta _\sigma $ on $% X $ started at a point $x\in X$. We suppose the following: \begin{itemize} \item for each $x$, the process $\xi _x(t)$ has an infinite life-time; \item the semigroup \begin{equation}\notag T_0(t)f(x)% :=% {\sf E}\,f(\xi _x(t)) \end{equation} acting in the space of bounded measurable functions on $X$ can be extended to a strongly continuous semigroup of contractions in $L^2(X;\sigma )$, and its generator $H_0$ is essentially self-adjoint on the space $\cal D$ (in this case $H_0=-H_\sigma $). \end{itemize} It follows from the general theory of stochastic differential equations that these assumptions are satisfied if e.g.\ $\beta _\sigma \in C_b^4(X\rightarrow TX).$ We denote by $\xi _\gamma (t)$ the corresponding independent particle process on $\Gamma _X$ which starts at a point $\gamma ,$% \begin{equation}\notag \xi _\gamma (t)=(\xi _x(t))_{x\in \gamma }. \end{equation} Let \begin{equation} {\bf T}_0(t)F(\gamma )% :=% {\sf E}\,F(\xi _\gamma (t)) \end{equation} be the corresponding semigroup in the space of measurable bounded functions on $\Gamma _X$. It is shown in \cite{AKR1}, that it can be extended to a strongly continuous semigroup in $L_{\pi _\sigma }^2(\Gamma _X)$ with the generator ${\bf H}_0=-H_{\pi _\sigma }$ on ${\cal FC}_{\mathrm b} ^\infty (\Gamma _X)$. Given the operator field (\ref{op-pot}), which is supposed to be continuous and symmetric (i.e., $J(x)=J(x)^{*}$), we define the operator \begin{equation} {\bf P}_{\xi _\gamma }^J(t):T_{\xi _\gamma (t)}\Gamma _X\rightarrow T_\gamma \Gamma _X \label{partr} \end{equation} by setting \begin{equation}\notag ( {\bf P}_{\xi _\gamma }^J(t)V) _x=( P_{\xi _x}^J(t)) ^{*}V_{\xi _x(t)},\qquad V\in T_{\xi _\gamma (t)}\Gamma _X, \end{equation} where the operator \[ ( P_{\xi _x}^J(t)) ^{*}:T_{\xi _x(t)}X\rightarrow T_xX \] is adjoint (w.r.t.\ the Riemannian structure of $X$) of the parallel translation \begin{equation}\notag P_{\xi _x}^J(t):T_xX\rightarrow T_{\xi _x(t)}X \end{equation} along $\xi _x(t)$ with potential $J.$ That is, $\eta (t)=P_{\xi _x}^J(t)h$ satisfies the SDE \begin{equation} \frac D{dt}\eta (t)=J(\eta (t)),\qquad \eta (0)=h, \end{equation} where $\frac D{dt}$ is the covariant differentiation along the paths of the process $\xi $ (see \cite{E3}). It is known that \begin{equation} \| P_{\xi _x}^J(t)\| \le e^{tC}, \notag\end{equation} where $C$ is the supremum of the spectrum of $J(x)$. This implies obviously the similar estimate for ${\bf P}_{\xi _\gamma }^J$: \begin{equation} \| {\bf P}_{\xi _\gamma }^J(t)\| \le e^{tC}. \label{est1} \end{equation} Let us define a semigroup ${\bf T}_1^{{\bf J}}(t)$ associated with the process $\xi _\gamma $ and potential ${\bf J}$. \begin{definition}\rom{ For $V\in {\cal F}\Omega ^1$, we set \begin{equation} {\bf T}_1^{{\bf J}}(t)V(\gamma )% :=% {\sf E}\,{\bf P}_{\xi _\gamma }^J(t)V(\xi _\gamma (t)).\notag \end{equation} }\end{definition} Let $T_1^J(t)$ be the semigroup acting in $L_\sigma ^2\Omega ^1(X)$ as \begin{equation}\notag T_1^J(t)\nu (x)% :=% {\sf E\,}P_{\xi _x}^J(t)^{*}\nu(\xi _x(t)). \end{equation} The following result describes the structure and properties of the semigroup ${\bf T}_1^{{\bf J}}(t)$. \begin{proposition} \label{pnsem}\rom{1)} ${\bf T}_1^{{\bf J}}(t)$ satisfies the estimate \begin{equation} \| {\bf T}_1^{{\bf J}}(t)V(\gamma )\| _\gamma \le e^{tC}{\bf T}% _0(t)\| V(\gamma )\| _\gamma . \label{markov} \end{equation} \rom{2)} Under the action of the isomorphism $I^1$\rom, ${\bf T}_1^{{\bf J}}(t) $ obtains the following form\rom: \begin{equation} I^1{\bf T}_1^{{\bf J}}(t)={\bf T}_0(t){\bf \otimes }T_1^J(t)\;I^1. \label{dec-sem} \end{equation} \rom{3)} ${\bf T}_1^{{\bf J}}(t)$ extends to a strongly continuous semigroup in $L_{\pi _\sigma }^2\Omega ^1.$ \end{proposition} \noindent{\it Proof}. 1) The result follows from formula (\ref{est1}). 2) Let $V\in{\cal D}\Omega^1$ be given by \eqref{isom1}. By the definition of ${\bf T}_1^{{\bf J}}(t)$ and the construction of the process $\xi _\gamma $, we have \begin{equation}\notag {\bf T}_1^{{\bf J}}(t)V(\gamma )={\sf E\,}{\bf P}_{\xi _\gamma }^{{\bf J}% }(t)V(\xi _\gamma (t)) \end{equation} and \begin{align*} ( {\bf T}_1^{{\bf J}}(t)V(\gamma )) _x &={\sf E\,}F(\xi _\gamma (t)\setminus \left\{ \xi _x(t)\right\} )P_{\xi _x}^{J_1}(t)^{*}\nu(\xi _x(t)) \\ &={\sf E\,}F(\xi _\gamma (t)\setminus \left\{ \xi _x(t)\right\} )\,{\sf E}% _{\xi _x} \,P_{\xi _x}^{J_1}(t)^{*}\nu(\xi _x(t)) \\ &={\bf T}_0(t)F(\gamma \setminus \left\{ x\right\} )T_1^J(t)\nu(x), \end{align*} ${\sf E}_{\xi _x}$ meaning the expectation w.r.t.\ the process $\xi _x(t)$, from where the result follows. 3) The result follows from the corresponding results for semigroups ${\bf T}% _0(t){\bf \ }$and $T_1^J(t)$, which are well-known (see \cite{AKR1} resp. \cite{E3}).\quad $\blacksquare $\vspace{2mm} Let ${\bf H}_1^{{\bf J}}$ and $H_1^J$ be the generators of ${\bf T}_1^{{\bf J}}(t)$ and $% T_1^J(t)$, respectively. Now we give probabilistic representations of the semigroups $T_{\pi _\sigma }^B(t)$ and $T_{\pi _\sigma }^R(t)$ associated with operators $H_{\pi _\sigma }^B$ and $H_{\pi _\sigma }^R$, respectively. We set $J_0=0,$ $J_1(x)=R_\sigma (x)$ (cf.\ (\ref{weitz})). Let us remark that $P_{\xi _x}^{J_0}(t)\equiv P_{\xi _x}(t)$ is the parallel translation of 1-forms along the path $\xi _x,$ and we have $H_1^{J_0}=-H_\sigma ^B$ and $H_1^{J_1}=-H_\sigma ^R$ on $% \Omega _0^1(X).$ We have the following \begin{theorem} \rom{1)} For $W\in {\cal D}\Omega ^1$\rom, we have \begin{equation} H_{\pi _\sigma }^BW=-{\bf H}_1^{{\bf J}_0}W,\qquad H_{\pi _\sigma }^RW=-{\bf H}% _1^{{\bf J}_1}W. \label{prrepgen} \end{equation} \rom{2)} As $L^2$-semigroups\rom, \begin{equation} T_{\pi _\sigma }^B(t)={\bf T}_1^{{\bf J}_0}(t),\qquad T_{\pi _\sigma }^R(t)={\bf T% }_1^{{\bf J}_1}(t). \label{prrepsem} \end{equation} \rom{3)} The semigroups $T_{\pi _\sigma }^B(t)$ and $T_{\pi _\sigma }^R(t)$ satisfy the estimates\rom: $$ \| T_{\pi _\sigma }^B(t)V(\gamma )\|_\gamma \le {\bf T}_0(t)\| V(\gamma )\|_\gamma$$ and $$ \| T_{\pi _\sigma }^R(t)V(\gamma )\|_\gamma \le e^{tC}{\bf T}% _0(t)\| V(\gamma )\|_\gamma . $$ \end{theorem} \noindent {\it Proof}. 1) It follows directly from the decomposition (\ref{dec-sem}) that, on ${\cal D}% \Omega ^1$, we have \begin{equation} I^1\,{\bf H}_1^{{\bf J}}=\left( {\bf H}_0\boxplus H_1^J\right) \,I^1. \label{dec-gen} \end{equation} Setting $J=J_0$ and $J=J_1$ and comparing (\ref{dec-gen0}) and (% \ref{dec-gen}), we obtain the result. 2) The statement follows from (\ref{prrepgen}) and the essential self-adjointness of $H_{\pi_\sigma}^B$ and $H_{\pi_\sigma}^R$ on $% {\cal D}\Omega ^1$ by applying Proposition \ref{pnsem}, 3), with $J=J_0$ and $J=J_1$, respectively. 3) The result follows from (\ref{prrepsem}) and (\ref{markov}).\quad $\blacksquare $ \section{Acknowledgments} The first author is very grateful to the organizers for giving him the possibility to present his results at a most stimulating conference. It is a great pleasure to thank our friends and colleagues Yuri Kondratiev, Tobias Kuna, and Michael R\"{o}ckner for their interest in this work and the joy of collaboration. We are also grateful to V. Liebscher for a useful discussion. The financial support of SFB 256 and DFG Research Project AL 214/9-3 is gratefully acknowledged.
1,116,691,501,326
arxiv
\section{Introduction\label{sec:intro}} Throughout this paper, we assume that icon images, or pictogram, are designed by abstracting and simplifying some object images. Figure~\ref{fig:icon_sample} shows the black-and-white icon images provided in Microsoft PowerPoint. We can observe that icon images are not just binarized object images but designed with severe abstraction and simplification of the original object appearance. For example, person's heads are often drawn as a plain circle. Graphic designers have professional knowledge and skills of abstraction and simplification while keeping discriminability as the original object. \par \begin{figure}[tb] \centering \includegraphics[width=0.4\textwidth]{fig/icon_sample.pdf}\\[-2mm] \caption{Black-and-white icon images provided in Microsoft PowerPoint.} \label{fig:icon_sample} \end{figure} This paper reports our trials to generate icon images automatically from natural photographs by using machine learning techniques. Our main purpose is to reveal whether the machine learning techniques can capture and mimic the abstraction and simplification skill of human experts on designing icons. We encounter the following three difficulties that make our task challenging. \par The first difficulty is that this is a domain conversion task between two sample sets (i.e., domains). If we have a dataset with image pairs of an icon and its original photo image, our image generation task becomes a direct conversion, which can be solved by conventional methods, such as U-net or its versions. However, it is not feasible to have such a dataset in practice. Hence, we only can prepare a set of photo images and a set of icon images, without any one-to-one correspondence between the two domains. \par The second difficulty lies in the large style difference between the photo image domain and the icon image domain. For example, the appearance of a person's head is totally different than that represented in icon images, as shown in Figure~\ref{fig:icon_sample}. Thus, the selected machine learning technique must be able to learn a mapping to fill the large gap between both domains. \par The third difficulty lies in the large appearance variations in both domains. Although icon images are simple and plain, they still have large variations in their shapes to represent various objects. Object photo images have even more variations in their shape, color, texture, etc. The mapping between the two domains needs to cope with these variations. \par We, therefore, employ CycleGAN\cite{CycleGAN} and UNIT\cite{UNIT} as the machine learning techniques for our task. Both of them can learn the mapping between the two different domains thanks to a cycle-consistency loss, and this mapping can be used as a domain converter. Note that the original papers of CycleGAN and UNIT tackle rather easier domain conversion tasks, such as horse and zebra and winter and summer scenery. On the other hand, for our task, they have to learn the mapping between a photo image set and an icon image set. So that, the learned mapping can convert arbitrary objects from the photo image to its iconified version. \par The results of our trials with several image datasets reveal that CycleGAN is able to iconify photo images even with the mentioned difficulties, as shown in Figure~\ref{fig:teaser}. This proves that CycleGAN can lean the abstraction and simplification ability. We also reveal that the quality of the generated icons can be improved by limiting both domains to a specific object, such as persons. \section{Related work\label{sec:related}} \subsection{Logos and icons} To the best of our knowledge, there is no computer science research for icons generation, which are defined as abstracted and simplified object images. Instead, we can find many research trials about {\em logo}. In \cite{logo_definition}, logo is defined as ``a symbol, a graphic and visual sign which plays an important role into the communication structure of a company'' and classified into three types: Iconic or symbolic logo, text-based logo, and mixed logo. In this sense, logo is a broader target than icon for visual analytics research. \par Comparing to traditional logo design researches that often focus how the logo design affects human behavior and impression through subjective experiments (e.g., \cite{logo_development,logo_evaluation,logo_move,logo_change}), recent researches become more objective and data-driven. Those works are supported by different logo image datasets, such as FlickrLogos\cite{FlickrLogos}, LOGO-net\cite{LOGO-net}, WebLogo-2M\cite{WebLogo-2M}, Logo-2K+\cite{Logo-2K+}, and LLD\cite{LLD}. Especially, LLD is comprised of 6 million logo images and sufficient as a dataset for data-hungry machine learning techniques. \subsection{Image generation by machine learning} After the proposal of variational autoencoder (VAE), Neural Style Transfer (NST)~\cite{styletransfer} and generative adversarial networks\linebreak (GAN), many image generation methods based on machine learning have been proposed. Especially, GAN-based image generation is a big research trend, while being supported by many quality improvement technologies, such as \cite{WGAN,PGGAN,SinGAN}. \par GANs are also extended to deal with image conversion tasks. Pix2pix~\cite{pix2pix} is a well-known technique for converting an input image from a domain $X$ to an image in a domain $Y$. Pix2pix is trained with a ``paired'' sample set $\{(x,y)\| x\in X, y\in Y\}$. For example, $x$ is a scene image during daytime and $y$ is a nighttime image at the same location. By training pix2pix with such pairs, a day-night converter can be performed. CycleGAN\cite{CycleGAN} and UNIT\cite{UNIT} can also realize a domain conversion task but they are more advanced than pix2pix. Just given two sample sets (i.e., two domains) and without any correspondence between them, they can learn a mapping function between both domains. \par Those image generation and conversion methods are also used for generating visual designs. For example, the idea of NST is applied to attach decoration to font images~\cite{fontST} and logo skeleton~\cite{tugs}. GAN is applied to font generation~\cite{fontGAN,hayashi}. In \cite{icon_color}, a conditional GAN is proposed to paint an edge image with a similar color style to a color image. In \cite{LLD}, GANs are used to generate general logo images from random vectors. In \cite{muhammad}, reinforcement learning is employed for sketch abstraction. \par In this paper, we treat an icon generation task as a domain conversion between the photo image domain and the icon image domain. Since there is no prior correspondence between them, we employ CycleGAN~\cite{CycleGAN} and UNIT~\cite{UNIT}. We will see that those GANs can bridge the huge gap between the two domains and establish a mapping that ``iconify'' a photo image to an icon-like image. \section{GANs to Iconify\label{sec:GAN}} We employ CycleGAN\cite{CycleGAN} and UNIT\cite{UNIT} to transform natural photos to icon-like images. Both of them are a domain conversion method and can determine a mapping between two domains (i.e., image sets) without giving one-to-one correspondence between the elements of the two sets. In our task, it is not feasible to give one-to-one correspondence between a photo and an icon image in advance to training. Therefore CycleGAN and UNIT are reasonable choices. \subsection{CycleGAN} CycleGAN\cite{CycleGAN} determines a mapping between two image sets, $X$ and $Y$, without giving any image-to-image correspondence. Figure~\ref{fig:CycleGAN} illustrates the overall structure of CycleGAN, which is comprised of two generators (i.e., style transformers) $G$ and $F$ and two iscriminators $D_X$ and $D_Y$. In other words, two GANs ($G\leftrightarrow D_Y$ and $F\leftrightarrow D_X$) are coupled to bridge two domains $X$ and $Y$. \par Those modules are co-trained by three loss functions: the adversarial loss $L_{\mathrm GAN}$, the cycle-consistency loss $L_{\mathrm CC}$, and the identity mapping loss $L_{\mathrm IM}$. The adversarial loss is used for training two GANs. The cycle-consistency loss is necessary to realize a bi-directional and one-to-one mapping between $X$ and $Y$ by letting $G^{-1}\sim F$ and vice versa. The identity mapping loss is an optional loss and used for the color constancy on the style transformation by $F$ and $G$. \par In the following experiment, we use the network structure and the original implementation\footnote{https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix} provided by the authors \cite{CycleGAN}. Note that for the experiments to generate black-and-white icons from color photos (Sections \ref{sec:ex1} and \ref{sec:ex2}), the color constancy is not necessary. Therefore we weaken the identity mapping loss for those experiments. \begin{figure}[t] \centering \includegraphics[width=0.4\textwidth]{fig/CycleGAN_loss.pdf} \caption{(a)~Overview of CycleGAN~\cite{CycleGAN}. Two GANs are coupled to bridge two domains $X$ and $Y$. (b)~Cycle-consistency loss, $L_{\mathrm CC}$. (c)~Identity mapping loss, $L_{\mathrm IM}$.} \label{fig:CycleGAN} \end{figure} \subsection{UNIT} UNIT~\cite{UNIT} can be considered as an extended version of CycleGAN, which accomplish style transformation between two image sets, $X$ and $Y$. Its main difference from CycleGAN is the condition that an original image and its transformed image should be represented by the same variable in the latent space $Z$. As illustrated in Figure~\ref{fig:UNIT}, UNIT is comprised of two encoders $E_X$ and $E_Y$, two generators $G_X$ and $G_Y$ and two discriminators $D_X$ and $D_Y$. Note that the generator $G$ of CycleGAN is divided into $E_X$ and $G_Y$ in UNIT. Those modules are co-trained by VAE loss $L_{\mathrm VAE}$, adversarial loss $L_{\mathrm GAN}$, and cycle-consistency loss $L_{\mathrm CC}$. The VAE loss is introduced so that the latent variable contains sufficient information of original images. In the following experiment, we use the network structure and the original implementation\footnote{https://github.com/mingyuliutw/UNIT} provided by the authors \cite{UNIT}. \begin{figure}[tb] \centering \includegraphics[width=1\linewidth]{fig/UNIT_loss.pdf} \caption{(a)~Overview of UNIT~\cite{UNIT}. (b)~VAE-loss, $L_{\mathrm{VAE}}$. (c)~Cycle-Consistency loss, $L_{\mathrm{CC}}$.} \label{fig:UNIT} \end{figure} \section{Image Datasets to Iconify} \subsection{Object photograph data\label{sec:obj_sample}} Since icons have no background in general, we need to prepare object images without background. Unfortunately, there is no large-scale image dataset that satisfies this condition. We, therefore, resort to MS-COCO~\cite{MSCOCO}, which is an image dataset with pixel-level ground-truth for semantic segmentation. Figure~\ref{fig:COCO_sample} shows an image from MS-COCO and its pixel-level ground-truth for three objects, ``person'', ``dog'', and ``skateboard''. Including those three classes, MS-COCO provides ground-truth for 80 object classes. \par Figure~\ref{fig:obj_sample} shows examples of object images extracted by using the pixel-level ground-truth. After removing very small objects, we get 11,041 individual objects from 5,000 images of the MS-COCO. Those images were resized to be 256$\times$256 pixels including a white margin. Note that obtained object images often do not include the whole object. Thus, a part of an object is missed in most samples due to the occlusion in the original image. In addition, the object boundary is often neither smooth nor accurate. Therefore, these object images are not perfect as the training samples for icon generation, although they are the best among the available datasets. \begin{figure}[tb] \centering \includegraphics[width=\linewidth]{fig/COCO_sample.pdf} \\[-5mm] \caption{(a)~An image from MS-COCO. Three object class labels, ``person'', ``dog'', and ``skateboard'', are attached to this image. (b)~Pixel-level ground-truth for those three classes.} \label{fig:COCO_sample} \bigskip\bigskip \includegraphics[width=0.9\linewidth]{fig/obj_sample.pdf}\\[-3mm] \caption{Object photo images extracted from MS-COCO.} \label{fig:obj_sample} \bigskip\bigskip \includegraphics[width=0.8\linewidth]{fig/logo_sample.pdf}\\[-3mm] \caption{Logo images from LLD~\cite{LLD}.} \label{fig:logo_sample} \bigskip\bigskip \begin{minipage}{0.23\textwidth} \centering \includegraphics[width=\textwidth]{fig/obj_person_clean.pdf} \small{(a)} \end{minipage} \begin{minipage}{0.23\textwidth} \centering \includegraphics[width=\textwidth]{fig/icon_person_clean.pdf} \small{(b)} \end{minipage}\\[-3mm] \caption{(a)~Person photos and (b)~person icon images.\label{fig:person_sample}} \vskip -3mm \end{figure} \begin{figure}[t] \centering \includegraphics[width=0.48\textwidth]{fig/person-iconified.pdf}\\[-3mm] \caption{Iconified person photos by GANs trained with icons and photo images depicting persons. (a)~Original person photo. (b)~Iconified result by CycleGAN. (c)~Iconified result by UNIT.} \label{fig:person_result} \bigskip\bigskip \includegraphics[width=0.47\textwidth]{fig/reconst.pdf}\\[-3mm] \caption{Reconstruction results by CycleGAN in two scenarios, (a) and (b).\label{fig:reconst}} \vskip -3mm \end{figure} \subsection{Icon image data} As an icon image dataset, we used black-and-white icon images provided by Microsoft PowerPoint. Figure~\ref{fig:icon_sample} shows examples. Those icons are categorized into 26 classes and the total number of images is 883. Those images are resized to to be 256$\times$256 pixels including a white margin. As data augmentation during the training of GAN, they are translated, rotated, and scaled to increase their number up to 8,830. \subsection{Logo image data as an alternative to icon images} As an alternative to PowerPoint icons, we also examine logo images from LLD~\cite{LLD}. Logos and icons are different in their purpose and shape. For example, texts are often used in logos but not in icons. In addition, we can find more colorful images for logos than icons. However, they are still similar in their abstract design and therefore we also examine logo images. Figure~\ref{fig:logo_sample} shows logo examples from LLD-logo. The 122,920 logo images in LLD-logo were collected from twitter profile images. In our experiment, we select 20,000 images randomly and resize them to be 256$\times$256 pixels (including a white margin) from their original 400$\times$400 pixels. \section{Experimental results\label{sec:experiment}} \subsection{Iconify human photos} \label{sec:ex1} As the first task, we train both GANs using only icons and photo images depicting persons. Figure~\ref{fig:person_sample} shows those training samples. By limiting the shape diversity in the training samples, we can observe the basic ability of GANs to iconify. In advance to training, we excluded person images which only capture a small part of a human body, such as hand and ear. Icon images showing multiple persons are also excluded. Finally, 1,440 icon images augmented from 72 icon images and 1,684 person photos are used as training samples for CycleGAN or UNIT in this experiment. \par \begin{figure*}[t] \centering \includegraphics[width=1\linewidth]{fig/iconified-with-icon.pdf}\\[-4mm] \caption{Iconified general image photos by CycleGAN trained with PowerPoint icon images. In the orange box, results of untrained samples are shown. In the blue box, typical failure results are shown. \label{fig:iconified-with-icon}} \bigskip\bigskip \includegraphics[width=\textwidth]{fig/iconified-with-logo.pdf}\\[-4mm] \caption{Iconified general image photos by CycleGAN trained with LLD-logo images. In the orange box, results for untrained samples are shown. In the blue box, typical failure results are shown.\label{fig:iconified-with-logo}} \vskip -2mm \end{figure*} Figure~\ref{fig:person_result} shows iconified person photos by CycleGAN and UNIT. These result images are the iconified results of the training samples. Since the number of images is very limited for this ``person-only'' experiment, it was not realistic to separate the images for training and testing. It should be noted that showing the results of the training samples is still reasonable. This is because, in our task, there is no ground-truth of the iconified result for each photo image; in other words, we do not use any ground-truth information during training. The results in the later sections contain the iconified results of the untrained samples.\par From Figure~\ref{fig:person_result} we can see that both GANs successfully convert person photos into icon-like images; they are not just a binarization result but showing strong shape abstraction. Especially, CycleGAN generates more abstract icon images with a circular head and a simplified body shape. It is noteworthy that the head is often separated from the body part and it makes the generated images more icon-like. For facial images (in the bottom row), their iconified results are not natural. This is because we did not use icon images that show facial details during training.\par Comparing to CycleGAN, the results by UNIT are less abstract (i.e., keeping the original shape of person photo) and therefore more similar to the binarization results. Since UNIT has a strong condition that the original photo and its iconified image share the same latent variable, it was difficult to realize strong shape abstraction. \par Since CycleGAN has the cycle-consistency loss, it is possible to reconstruct the original photo image from its iconified versions. Figure~\ref{fig:reconst}~(a) shows several reconstruction results. It is interesting to note that the original color image is still reconstructed from the black-and-white iconified result. It is also interesting to note that we can convert icon images to photo-like images by using the same CycleGAN model. The examples in Figure~\ref{fig:reconst}~(b) show the difficulty of this icon-to-photo scenario. However, the reconstructed icon images are almost the same as the original ones. \subsection{Iconify general object photos with PowerPoint icons} \label{sec:ex2} As the second task, we use all photos from MS-COCO (Figure~\ref{fig:obj_sample}) and all icon images from PowerPoint (Figure~\ref{fig:icon_sample}) to train CycleGAN and generate the inconified results of general object photos. Since the first task reveals that CycleGAN has more abstraction ability than UNIT, we only use CycleGAN in this experiment. \par This task is far more difficult than the previous; this is because CycleGAN needs to deal with not only the shape variations by the abstraction in icon images but also the shape variations by different object types (e.g., cars and balls). Moreover, the shape variations of object photo images are very severe due to the partial occlusions and non-accurate extractions, as noted in \ref{sec:obj_sample}. \par To deal with the huge variations, we used a simple coarse-to-fine strategy for training CycleGAN. Specifically, we first train CycleGAN with the training samples resized to be 32$\times$32. Then, we fine-tune the CycleGAN with 64$\times$64, then 128$\times$128, and finally 256$\times$256. Similar coarse-to-fine strategies are used for other GANs, such as PGGAN\cite{PGGAN}, SinGAN\cite{SinGAN}, and DiscoGAN\cite{DiscoGAN}. \par Figure~\ref{fig:iconified-with-icon} shows the iconified results. The top row shows the results of the training samples (as noted \ref{sec:ex1}, showing the result of training samples is still reasonable since our framework is based on CycleGAN and there is no ground-truth). The results in the orange box of the bottom row show the results of untrained samples (collected from copyright-free image sites). The iconified images show reasonable abstraction from the original photo images and it makes the iconified images different from binarization and edge extraction images. \par Although the iconified images are promising to give a hint of icon design, the abstraction is not so strong as Figure~\ref{fig:person_result} of the first task. In addition, the iconified results are different from our ``standard'' icons. For example, the iconified doughnut and clock images in Figure~\ref{fig:iconified-with-icon} are different from the standard doughnut and clock icons in Figure~\ref{fig:icon_sample}, respectively. Since there is neither a common rule nor a strong trend in designing the standard icons of various objects, our iconified results show those differences.\par The results in the blue box of Figure~\ref{fig:iconified-with-icon} are typical failure cases. From left to right, the first (orange) and second (keyboard) cases show too much abstraction. Since the original photo images are rather plain, the iconified results also become rough contour images. The third (car) case shows just a fragment of a car and the result cannot represent any car-like shape. The fourth (person) shows blob-like spurious noise, which are caused by insufficient training steps; in fact, in the early steps of CycleGAN training, we often find such failures. \par The last failure (hot dog) is an interesting but serious case. Although abstraction has been made appropriately, we cannot identify this iconified result as a hot dog. This case suggests that we need to be careful of the selection of the photo image for making its icon --- hot dog has its best appearance, shape, posture, and view angle for a legible icon. Non-legible iconified results occur in other objects by this reason. \subsection{Iconify general object photos with logos} \label{sec:ex3} Figure~\ref{fig:iconified-with-logo} shows the iconified results by CycleGAN trained with logo images from LLD\cite{LLD}. The top row shows the results of training samples (i.e., the object images from MS-COCO) and the orange box in the bottom row shows the results of the untrained samples. The photo images are converted like illustrations and therefore we can confirm CycleGAN can generate color icons. In some iconified results, the outline (i.e., edges) of the object is emphasized. \par Comparing to the second task, it is also observed that the legibility of the icon images is greatly improved by color. For example, the hot dog icon in the top row shows better legibility than its black-and-white version in Figure~\ref{fig:iconified-with-icon}. Other iconified results also depict their original object more easily than black-and-white versions, even though the colors in the iconified images are not the same as the original object colors. \par In the blue box of Figure~\ref{fig:iconified-with-logo}, five typical failure cases are shown: from left to right, no significant change, too much abstraction, text-like icon, text-like spurious noise, and blob-like spurious noise. The first case often occurs when the input photo shows a large object with no background part or a single-color object. The second occurs at fragmentary objects. The third occurs at flat objects; this is maybe due to many logo images from LLD contain a text part. \section{Conclusion and future work} In this paper, we experimentally proved that the transformation of natural photos into icon images is possible by using generative adversarial networks (GAN). Especially, CycleGAN~\cite{CycleGAN} has a sufficient ``abstraction'' ability to generate icon-like images. For example, CycleGAN can generate person icons where each head is represented as a plain circle separated from the body part. From the qualitative evaluations, we can expect that the generated (i.e., iconified) images will give hints to design new icons for some object, although the iconified images sometimes show unnecessary artifacts or severe deformations.\par As future work, it is better to conduct a subjective or objective evaluation of quality of the iconified images. Finding a larger icon dataset is also necessary to improve the quality. A more interesting task is the analysis of the trained GANs for understanding how the abstraction has been made; this will deepen our understanding about the strategy of professional graphic designers. \begin{acks} This work was supported by JSPS KAKENHI Grant Number \linebreak JP17H06100. \end{acks}
1,116,691,501,327
arxiv
\section{Introduction} Structural materials are used in a variety of applications with widely ranging, often complex mechanical and chemical requirements. For example, a car's crumple zone must retain its shape and stability during normal operation while irreversibly deforming during a collision. Several aspects determine the suitability of a material for a given application: for example, its ductility, strength, and toughness. All of these tend to depend in nontrivial ways upon deformation conditions such as strain protocol and strain rate, with corresponding differences in maximal energy dissipation and thermal response. This diversity of requirements produces a need for materials with tunable mechanical properties. Incorporation of supramolecular complexes into polymeric systems provides a route to address these demands. The integration of thermoreversibly associating groups into polymers produces a wide variety of complex behavior arising from the finite lifetime of the ``sticky'', thermoreversible bonds.\cite{brunsveld01,rotello08,annable93,binder07,tanaka02,seiffert12} Examples of thermoreversible bonds include $\pi$-$\pi$-stacking,\cite{burattini10,burattini11,colquhoun02,sivakova05} hydrogen bonding,\cite{cordier08,Edwards2013,colquhoun12,vanbeek07,hirschberg99} and metal ligand bonding.\cite{burnworth08,burnworth11,yount03,xu11,el-ghayoury03,schmatloch02,schmatloch03,fustin07,kumpfer10,doi:10.1021/ma401077d} In associating polymer systems (APs), the presence of thermoreversible bonds leads to exquisitely tunable rheological properties, particularly when the product $\dot{\epsilon}\tau_{sb}$ of the strain rate $\dot{\epsilon}$ and sticky bond lifetime $\tau_{sb}$ is of order unity.\cite{yount03,yount05,loveless05,hoy09} AP networks are also capable of dramatic ``self-healing'' under a variety of conditions, e.g., after fracture.\cite{cordier08,colquhoun12,burnworth11} Such properties have led to an explosion of interest in these systems' rheology over the past decade, in systems ranging from dilute solutions to dense melts. Most studies to date have focused on melts well above their glass transition temperature $T_g$. However, glassy associating polymer systems are also of great interest for their potential as energy-dissipating materials. Potential benefits include rate and temperature dependencies that are stronger than found in non-associating glassy systems, and thermoplastic-elastomer-like response when the sticky bonds form a percolating network (similar to that formed by the chemical crosslinks in thermoplastics). This response can be achieved in systems that are easily melt-processable when the sticky bonds are sufficiently weak that no such percolating network is present at processing temperatures above $T_g$. Complex, nontrivial plastic flow and fracture behavior can arise from the fact that sticky bonds behave like transient covalent bonds and produce a correspondingly transient entanglement network. The latter is of especial interest due to its potential to produce self-healing materials with enhanced fracture toughness arising from recombination of bonds that have broken during deformation, a phenomenon with no analogue in non-associating polymer systems. Analytic and quasi-analytic approaches to AP dynamics and mechanics, e.g.\ Refs.\ \cite{leibler91,tanaka92,rubinstein98,rubinstein01,tanaka02,semenov06,indei07,semenov07}, have made many useful, experimentally verifiable predictions, including nonlinear behaviors such as shear thickening and strain hardening.\cite{pellens04,pellens04b,tripathi07} However, for the sake of tractability, theories have generally neglected one or more features of AP systems that are likely essential to capturing their behavior under certain ambient conditions. For example, as temperature drops towards $T_g$, attractive, non-associative interactions, such as van der Waals forces between non-sticky monomers, become increasingly important.\cite{baschnagel05} For such systems, molecular simulations are necessary to capture the essential features. In this paper we use hybrid molecular dynamics/Monte Carlo simulations to examine the glassy mechanical response of systems composed of model trivalent dendrimers. Such molecules are of interest as simple, small-molecule building blocks for AP glasses\cite{cordier08} that offer advantages in terms of processability, reversibility, and functionality. We show that these systems exhibit a complex mechanical response wherein the sticky bond thermodynamics, temperature $T$, chemical kinetics, and strain rate are all relevant. We examine deformation through fracture under two deformation modes commonly employed in experiments - uniaxial stress and uniaxial strain - and show that different sticky bonding parameters optimize different quantities such as strength, ductility, work-to-fracture and the tendency for ``self-healing'' during deformation. \begin{figure} \includegraphics[width=2.5in]{fig1.pdf} \caption{Coarse-grained model of trivalent dendrimers; green monomers represent the sticky groups. The ``floppy ears'' serve as a qualitative, coarse-grained representation of volume-excluding moieties.} \label{fig:molecules} \end{figure} \section{Model and Methods} We employ the hybrid molecular dynamics/Monte Carlo (MD/MC) algorithm described at length in Ref.~\cite{hoy09} as well as further below. A generic trivalent model monomer is shown in Fig.~\ref{fig:molecules}. The red spheres in the model represent unreactive, linking components. They have no angle potentials and merely serve to exclude volume and define the general topology. They therefore do not represent any chemically distinct species, but rather any chemical configuration that realizes the coarse topology and is sufficiently flexible. The green spheres in Fig.~\ref{fig:molecules} represent reactive or associative components that can bond with each other thermoreversibly. These associating, ``sticky'' monomers (SM), again, do not represent any specific chemistry. The terminating red spheres serve as volume excluding moieties that provide proper separation of the SM. Realizations of such a system may be $CH((CH_2)_nL(CH_2)_x)_3$ or $N((CH_2)_nL(CH_2)_x)_3$ dendrimers with appropriately large $n$ and $x$ \cite{cordier08,Edwards2013}. Here, for the purposes of modeling generic properties, the size of the SMs and the mobility of ``open'' (unbonded) SMs are equal to that of regular monomers. We employ the ``Y'' architecture as the simplest possible model of a dendrimer; although the arms are far too short to be entangled, the overall mechanical response of the glassy systems described below indicates that the response is entangled-like and thus not (to first order) architecture-specific. All monomers have mass $m$ and interact via the truncated and shifted Lennard-Jones (LJ) potential $U_{LJ}(r) = 4u_{0}[(a/r)^{12} - (a/r)^{6} - (a/r_{c})^{12} + (a/r_c)^{6}]$, where $r_{c}=2^{7/6}a$ is the cutoff radius and $U_{LJ}(r) = 0$ for $r > r_{c}$. Covalent bonds between adjacent monomers on a chain are modeled using the finitely extensible nonlinear elastic potential $U_{FENE}(r) = -(1/2)(kR_{0}^2) {\rm ln}(1 - (r/R_{0})^{2})$, with the canonical\cite{kremer90} parameter choices $R_{0} = 1.5a$ and $k = 30u_{0}/a^{2}$. In this study, following the majority of bead-spring studies on permanently crosslinked systems (e.~g.~Refs.\ \cite{combinedgrest90,svaneborg08}), we employ flexible chains with no angular potential. We express all quantities in units of the LJ bead diameter $a$, intermonomer energy $u_{0}$, and the LJ time $\tau_{LJ} = \sqrt{ma^{2}/u_{0}}$. \begin{figure} \includegraphics[width=3.375in]{fig2.pdf} \caption{Interaction potential for sticky monomers as a function of bond length. The blue line represents the Lennard-Jones term, the red line represents $U_{sb}(h,r)$ for $h = 10u_0$, and the green line represents the total bonded potential for a ``closed'' SB. The orange vertical line represents the $r$-dependent energy change for SB association.} \label{fig:interactionpot} \end{figure} Sticky bonds (SBs) interact via the same potential employed in Ref.\ \cite{hoy09}, i.e. \begin{equation} \begin{array}{cccc} U_{sb}(r) & = & U_{FENE}(r) - h, & r < R_0\\ & & & \\ & = & 0, & r > R_0 \end{array} \label{eq:ufenesb} \end{equation} where $h$ is the thermodynamic strength of the sticky bonds (Fig.~\ref{fig:interactionpot}). Newton's equations of motion are integrated using MD with a time step $\delta t = .008\tau_{LJ}$, and sticky bonds are formed and broken using standard Metropolis Monte Carlo\cite{hoy09}. MC moves are executed every 25 MD timesteps, i.e.~every $\tau_0 = 0.2\tau_{LJ}$. A typical SB pair is considered for dissociation (or an SM pair for association) once every $\tau_{MC}$. As in Ref.~\cite{hoy09}, the use of different $\tau_{MC}$ corresponds in a qualitative, coarse-grained manner to modeling ligands $L$ with different chemical kinetics; for example, bulky ligands would have slower kinetics.\cite{yount03,yount05,loveless05} At each MC timestep, a fraction $f_{MC}$ of open SB pairs are considered for SB formation and the same fraction of closed SB pairs are considered for dissociation. The characteristic kinetic time\cite{hoy09} is $\tau_{MC} = \tau_0/f_{MC}$, e.g.\ for $f_{MC} = .002$, $\tau_{MC} = 100\tau_{LJ}$. SM association is binary; SMs are bonded to either zero or one other SM at any given time. Thus the model has second-order association (i.e. $2A \to A_2$) and first-order dissociation kinetics ($A_2 \to 2A$), where $A$ represents a sticky monomer. We prepare our samples as follows. All systems have $N_{ch} = 16000$ chains of the architecture shown in Fig.~\ref{fig:molecules} and are prepared at a monomer density $\rho = .85a^{-3}$. Periodic boundary conditions are applied along all three directions of simulation cells that are initially cubic with side lengths $L_x=L_y=L_z=L_0$. We thoroughly equilibrate systems at $T=T_{eq}=1.125$ and zero pressure for several times the sticky bond lifetime $\tau_{sb}(h,\tau_{MC},T)$, verifying that the fraction of ``closed'' SBs as a function of equilibration time $t$, $p_c(t)$, has plateaued according to fits of $p_c(t)$ to the function\cite{hoy09} \begin{equation} p_c(t) = d-\frac{\left(d^2-1\right) \tanh \left(2z\sqrt{d^2-1} t \right) + d\sqrt{d^2-1}}{d \tanh \left(2z\sqrt{d^2-1} t \right)+\sqrt{d^2-1}}, \label{eq:specpoft} \end{equation} i.e.\ $p_c^{eq} \simeq d(h)$ where $z = \rho c_{st} k_f(h)$, $d = 1 + k_b(h)/4z$, $c_{st}$ is the SM concentration (i.e.\ the mole fraction: $c_{st}=3/16$), and $k_f(h)$ and $k_b(h)$ are respectively the rate constants for SM association and SB dissociation. We then perform a slow, zero-pressure quench (rate $\dot{T} = -10^{-5}/\tau_{LJ}$) to a final temperature $T_f < T_g \simeq 0.43$. Temperature is controlled using a Langevin thermostat. At such temperatures, more than 99.5\% of all SMs are bonded into SBs. We focus on the mechanical properties of systems deep in the glassy state, i.e.\ $T_f$ well below $T_g$. After quenching, we perform mechanical properties tests using two standard deformation protocols.\cite{rottler03,rottler03b} Uniaxial stress and uniaxial strain runs are performed at $T_f=0.3\approx 0.7 T_g$ at two constant tensile strain rates, $\dot{\epsilon} = \dot{L}_z/L_{0} = 10^{-5.5}/\tau_{LJ}$ and $\dot{\epsilon} = \dot{L}_z/L_{0} = 10^{-4}/\tau_{LJ}$. In the uniaxial stress simulations, pressure along the transverse directions is maintained at zero using a Nose-Hoover barostat, and deformation takes place at nearly constant volume. We thus report stress-strain curves against the Green-Lagrange strain $g(\lambda) = \lambda^2 - 1/\lambda$, where $\lambda=L_z/L_0$; systems having response analogous to linear rubber elasticity produce stress-strain curves linear in $g(\lambda)$, while chain-stretching produces supralinear behavior (``Langevin'' hardening\cite{treloar75}) and SB-breaking produces sublinear behavior. Uniaxial strain simulations are performed at constant cross-sectional area ($L_x = L_y = L_0$), and stress-strain curves are reported vs. $\lambda$. As described below, we will examine the mechanical properties of associating dendrimer glasses for a variety of $h$, $\tau_{MC}$ (Table \ref{tab:systems}), and deformation protocols and show nontrivial dependencies on each. In particular, we will show that fracture toughness and ductility cannot both be optimized by a single choice of $h$ and $\tau_{MC}$ for different deformation protocols, and relate this to protocol-dependent self-healing. During deformation, we monitor stress $\sigma$ (reported in units of $u_0/a^{3}$), the total mechanical work performed up to strain $\epsilon=ln(\lambda)$ is \begin{equation} W(\lambda) = \int_{0}^{\lambda} \sigma d\lambda', \label{eq:work} \end{equation} and several metrics of SB formation and recombination. $P_{surv}(\epsilon)$ is the fraction of initially closed sticky bonds that persist continuously from zero strain through strain $\epsilon$. $P_{recomb}(\epsilon)$ is the fraction of SM pairs $A-B$ (initially bonded at $\epsilon = 0$) that have broken and recombined by strain $\epsilon$. Finally, $P_{switch}(\epsilon)$ is the fraction of $A-B$ pairs (initially bonded at $\epsilon = 0$) that have performed a ``partner switch'' (i.e.\ formed a bond $A-C$, where $C \neq B$) by strain $\epsilon$. As we will show, $P_{surv}$, $P_{recomb}$, and $P_{switch}$ are important metrics for understanding fracture and self-healing. \begin{table}[htbp] \caption{Systems employed in this study. ``W'' stands for ``weak'' and ``S'' for ``strong'' sticky bonds, ``f'' for ``fast'' and ``s'' for ``slow'' kinetics, and the numerals at the end indicate applied strain rates.} \begin{ruledtabular} \begin{tabular}{lcccl} System Name & $h$ & $\tau_{MC}$ & $\dot{\epsilon}$ & \parbox[t]{8em}{Line convention in plots.}\\ Wf4 & 10 & 1 & $10^{-4}$ & red, solid, thick\\ Ws4 & 10 & 100 & $10^{-4}$ & red, dashed, thick\\ Sf4 & 15 & 1 & $10^{-4}$ & blue, solid, thick\\ Ss4 & 15 & 100 & $10^{-4}$ & blue, dashed, thick\\ Wf55 & 10 & 1 & $10^{-5.5}$ & red, solid, thin\\ Ws55 & 10 & 100 & $10^{-5.5}$ & red, dashed, thin\\ Sf55 & 15 & 1 & $10^{-5.5}$ & blue, solid, thin\\ Ss55 & 15 & 100 & $10^{-5.5}$ & blue, dashed, thin\\ \end{tabular} \end{ruledtabular} \label{tab:systems} \end{table} \section{Results} \begin{figure} \includegraphics[width=3.25in]{fig3a.pdf} \includegraphics[width=3.25in]{fig3b.pdf} \caption{Mechanical responses of studied systems deformed at $T = 0.3$. Solid and dashed lines indicate results for $\tau_{MC} = \tau_{LJ}$ and $\tau_{MC} = 100\tau_{LJ}$, respectively. Blue and red curves indicate results for $h=15$ and $h=10$, respectively. Thick and thin lines show results for $\dot{\epsilon}=10^{-4}$ and $\dot{\epsilon}=10^{-5.5}$, respectively. Top: uniaxial tensile stress vs. Green-Lagrange strain, $g(\lambda) = \lambda^{2}-1/\lambda$; Bottom: uniaxial tensile strain deformation, stress vs. stretch $\lambda = L_z/L_z^{0}$. } \label{fig:stressstrain2} \end{figure} In this section, we analyze the effects of SB thermodynamics, kinetics, and deformation rate on the nonlinear mechanics of thermoreversibly associating dendrimer glasses. Figure \ref{fig:stressstrain2} shows results for uniaxial stress and strain deformation for all systems considered in this study. Note that for both protocols, although our systems possess no physical entanglements, systems show a response typical of thermoplastic elastomers or entangled glasses\cite{haward97}; initial yield followed by strain hardening (for uniaxial stress) or craze drawing (for uniaxial strain). Results for systems without sticky monomers (not shown) show far lower stresses, indicating SBs are the dominant contributor to the toughness. This is unsurprising since our systems have no topological entanglements, but serves to illustrate the dramatic influence of SBs on glassy mechanical response. The top panel of Fig.\ \ref{fig:stressstrain2} shows results for uniaxial stress. At the lower strain rate, which is closer to the quasi static limit\cite{rottler03b}, strong sticky bonds and slow kinetics (blue, dashed curves) yield a more ``elastic'' response, with a relatively high maximum stress and a relatively low fracture strain. In contrast, weak sticky bonds and fast kinetics display opposite trends: much more ``plastic'' behavior, with a lower stress maximum and higher fracture strain. Results for uniaxial strain are illustrated in the bottom panel of Fig.\ \ref{fig:stressstrain2} and show markedly different trends. The ``strong, slow'' (Ss4 and Ss55) systems both support a higher stress at all strains beyond yield, a significantly larger fracture strain, and clearly larger toughness (Table \ref{tab:WPSPS}) than their counterparts at the same strain rate. Note that the uniaxial strain protocol is inherently dilational (system volume $V=\lambda V_0$). Such a protocol naturally suppresses sticky bond recombination, partner-switching, and self-healing, since SBs are more dispersed. From this result, we can infer that SB partner switching can play a dominant role in controlling material toughness (cf.\ Fig.\ \ref{fig:sbps}). \begin{table*} \caption{Numerical values for the maximum work ($W_{max}$), the fraction of initial SB's surviving ($P_{surv}$) and the fractions of SB's that perform a "partner switch" ($P_{switch}$) for all systems. Values of $W_{max}$ are calculated post-fracture, and the values for $P_{surv}$ and $P_{switch}$ are taken at the strain where the fracture rate $\partial P/\partial\lambda$ is maximized, i.e. at a stretch $\lambda\simeq 4$ ($g(\lambda)\simeq 15$).} \begin{ruledtabular} \begin{tabular}{lcccccc} System & $W_{max} $ & $W_{max}$ & $P_{surv}$ & $P_{surv}$ & $10^{3}P_{switch}$ & $10^{3}P_{switch}$\\ & [uniaxial stress] & [uniaxial strain] & [uniaxial stress] & [uniaxial strain]& [uniaxial stress] & [uniaxial strain]\\ Wf4 & 202 & 23.3 & 0.962 & 0.980 & $10.4$ & $11.5$\\ Ws4 & 235 & 35.8 &0.979 & 0.984 &$1.67$ & $6.71$\\ Sf4 & 267 & 60.5 & 0.978 & 0.976 & $3.67$ & $11.2$ \\ Ss4 & 295 & 85.4 & 0.989 & 0.988 & $0.667$ & $1.00$ \\ Wf55 & 200 & 14.0 & 0.935 & 0.955 & $16.1$ & $26.0$\\ Ws55 & 228 & 36.4 & 0.977 & 0.972 & $5.04$ & $1.5$\\ Sf55 & 252 & 26.7 & 0.961 & 0.960 &$5.01$ & $1.5$\\ Ss55 & 218 & 56.1 & 0.992 & 0.980 & $1.67$ & $7.1$\\ \end{tabular} \end{ruledtabular} \label{tab:WPSPS} \end{table*} The data in Figure \ref{fig:stressstrain2} can also be used to isolate the effect of thermodynamics ($h/k_BT$) from that of chemical kinetics. Results for the lower strain rate are as follows: for ``strong'' $h=15$ systems (thin blue curves, Ss55/Sf55), faster kinetics (solid curve, Sf55) lead to greater ductility in uniaxial stress, but substantially lower toughness for uniaxial strain. The higher maximum stress and lower fracture strain in the top panel indicates that slower kinetics lead to greater elasticity, and in contrast, faster kinetics lead to greater self-healing effects. For $h=10$ (thin red curves, Ws55/Wf55), results are consistent for both deformation protocols; slower kinetics lead to greater toughness. For uniaxial strain (but not stress), it is clear that larger $\tau_{sb}$ always leads to greater toughness (cf.\ Fig. \ref{fig:work}). One interpretation of this result is that the ``stronger'' and ``slower'' systems are in the limit $\epsilon\tau_{sb} \gg 1$, where fracture is activated by $\lambda$-dependent stretching of the covalent bonds, i.e.\ the sticky bonds behave similarly to (breakable) chemical crosslinks and the overall behavior is rather like that of a standard (non-associating) thermoplastic elastomer. Note that in all cases studied here, SB-breaking is stress- or strain-activated since $(h/k_BT)$ and quiescent values\cite{hoy09} of $\tau_{sb}$ are very large. Next we examine the effect of increased strain rate for systems with the same $h$ and $\tau_{MC}$. For uniaxial stress, for all systems the peak stress increases and the fracture strain decreases at increased $\dot{\epsilon}=10^{-4}$ (thick curves). The former is expected due to a greater effective viscosity and reduced $\dot{\epsilon}\tau_{sb}$ at the higher strain rates, and the latter is also expected since fracture is stress- or strain-activated. This competition between higher peak stress and lower fracture strain reveals an interesting competition v/v toughness, which we will examine further below. Uniaxial strain again shows trends that are different from uniaxial stress. For all systems (with the possible exception of weak/slow), both the peak stress and fracture strain are larger at the higher strain rate. The difference may be attributable to the greater chain-stretching at fixed $\lambda$ for uniaxial strain (affine stretching of chains leads to larger extension in uniaxial strain). Another possible reason for the differences between the deformation protocols is that the cavitation and craze fibril formation\cite{kramer83,rottler03} processes occurring for uniaxial strain have no counterpart in uniaxial-stress deformation. It is interesting that the apparent fracture toughness of these systems is clearly a coupled function of thermodynamics, chemical kinetics, and deformation protocol including strain rate. However, only qualitative insights may be obtained by visual inspection of stress strain curves, and analysis is further complicated by the abovementioned competitions. We therefore turn to a quantitative, comparative study of toughness (integrated work-to-fracture $W_{max}$) of systems with these different parameters. Figure \ref{fig:work} plots $W(\epsilon)$ for all systems. At small strains, for both deformation protocols, work functions lie on the same line of (elastic) response. As strain increases, work drops below the elastic curves at larger strains for larger $h$ as well as $\tau_{MC}$, while it decreases for lower $\dot{\epsilon}$. This drop-off coincides with (i.e.\ occurs at the same $\lambda$ as) the onset of sticky bond breaking (cf.\ Fig. \ref{fig:ISB}). Finally, at large strains, work plateaus at $W=W_{max}$ (i.e.\ the fracture toughness of systems) as fracture ensues. \begin{figure} \includegraphics[width=3in]{fig4a.pdf} \includegraphics[width=3in]{fig4b.pdf} \caption{Top: Work per unit volume $W(\epsilon)$ vs. $g(\lambda)$ for uniaxial stress tests. Bottom: $W(\epsilon)$ vs $\lambda$ for uniaxial strain. Line colors, widths and dashing are the same as in Fig.~\ref{fig:stressstrain2}.} \label{fig:work} \end{figure} Values of $W_{max}$ are ranked in decreasing order from maximum to minimum as follows: For uniaxial stress: (1) Ss4: (2) Sf4: (3) Sf55: (4) Ws4: (5) Ws55: (6) Ss55: (7) Wf4: (8) Wf55. The work to fracture varies by a factor of $\sim 1.5$ from maximum to minimum. As expected and shown in Table \ref{tab:WPSPS}, the toughest systems are the ``strong, slow'' systems deformed at the higher strain rate (Ss4) and the most brittle systems are the ``weak, fast'' systems deformed at the lower strain rate (Wf55). However, these dependencies are not monotonic; systems with weaker SBs under high-strain-rate conditions can be tougher than systems with stronger SBs deformed at low strain rates, illustrating the complexity of these systems' mechanical response. As we have surmised above, the fracture toughness shows a deformation protocol dependence. The ordering of $W_{max}$ from maximum to minimum is different from that for uniaxial stress: (1) Ss4: (2) Sf4: (3) Ss55: (4) Ws55: (5) Ws4: (6) Wf55: (6) Ss55: (7) Wf4: (8) Wf55. Further, in contrast to the small fractional variations in $W_{max}$ for uniaxial stress, for uniaxial strain the toughest systems' $W_{max}$ is about six times higher (Table \ref{tab:WPSPS}) than that of the most brittle systems. We expect this more dramatic difference is closely associated with the dilatative nature of crazing. Specifically, the toughest (Ss and Sf) systems (similarly to entangled polymer glasses\cite{kramer83,rottler03}) show strain hardening at the end of the craze drawing plateau, indicating chains are stretching between SB junctions, while the more brittle systems fracture before the strain hardening regime is reached. \begin{figure} \includegraphics[width=3in]{fig5a.pdf} \includegraphics[width=3in]{fig5b.pdf} \caption{Top: Percentage of broken SBs vs g($\lambda$) for uniaxial stress. Bottom: Percentage of broken SBs vs $\lambda$ for uniaxial strain. Here ``breaking'' is defined through single events, i.e. the plot shows the percentage of SB pairs that ``survive'' through the entire deformation up to the given strain. Line colors, widths and dashing are the same as in Fig.~\ref{fig:stressstrain2}.} \label{fig:ISB} \end{figure} To relate these differences to the underlying associating polymer physics, we next examine measures of sticky bond breaking and partner-switching. Figure \ref{fig:ISB} illustrates the breaking of sticky bonds, i.e.\ the strain-dependent SB survival probability $P_{surv}$. The onset of SB-breaking closely corresponds to the divergence of stress-strain curves for different systems that occurs beyond the elastic limit (Fig.~\ref{fig:stressstrain2}). This generally occurs at lower strains for smaller $h$, faster kinetics, and lower strain rates. Differences can be quite dramatic, e.g.\ the differences between uniaxially stressed $h=10$ systems at low and high strain rates are large because: \textbf{(i):} more slowly deformed systems (*55) are in a regime where $\dot{\epsilon}\tau_{sb} < 1$: \textbf{(ii):} more slowly deformed systems are more ductile. In all cases, the SB-breaking rate accelerates to a maximum (indicated by the maximum slopes in Fig.~\ref{fig:ISB}) as fracture initiates, then plateaus after fracture as the systems are no longer under stress. One interesting if unsurprising effect is that $P_{surv}$ at the termination of deformation can be much lower for uniaxial stress. This is because fracture under uniaxial stress can be much less localized than crazing-type fracture. Visual inspection (Figure \ref{fig:movies}) shows that systems with high $P_{surv}$ fracture along a single plane with a single large void, while systems with lower $P_{surv}$ form many smaller voids and fracture in multiple locations simultaneously. \begin{figure} \centering \begin{subfigure} \centering \includegraphics[width=1.625in]{fig6a.pdf} \end{subfigure} \begin{subfigure} \centering \includegraphics[width=1.625in]{fig6b.pdf} \end{subfigure} \caption{Relating $P_{surv}$ to fracture geometry. Left: The systems with the highest fraction of surviving bonds fracture along a single plane via formation of a single large void. Right: The systems with the lowest fraction of surviving bonds exhibit a more complex fracture geometry. Both images show only small cross-sections of systems; regions which are not shown remain in a dense, nearly undisturbed state.} \label{fig:movies} \end{figure} \begin{figure} \includegraphics[width=3in]{fig7a.pdf} \includegraphics[width=3in]{fig7b.pdf} \caption{Top: SB partner switching probability $P_{switch}$ vs. $g(\lambda)$, for uniaxial stress tests. Bottom: SB partner switching vs $\lambda$ for uniaxial strain. Partner switching is defined as when a sticky bond is broken, and rebonds, but does not rebond to its original partner. Line colors, widths and dashing are the same as in Fig.~\ref{fig:stressstrain2}.} \label{fig:sbps} \end{figure} Next we turn to other measures of stress relaxation. Sticky-bond recombination, wherein a bond pair $A-B$ (where $A$ and $B$ are distinct sticky monomers) breaks and then reforms, is a fast stress-relaxation mechanism. While the bond is ``open'', stress can locally relax, and the stress previously borne by the open bond is transferred to other closed bonds. This eases the onset of the mechanical instability corresponding to fracture. We find that SB recombination is important only for the lower strain rate and in systems with fast kinetics in the uniaxial stress protocol (thin, solid curves, see Fig.~\ref{fig:sbps}). These trends are expected since recombination requires a finite amount of time (i.e.\ of order $\tau_{MC}$), and higher strain rates and slower kinetics both cause broken $A-B$ pairs to move further away from one another before they can recombine. Far more significant from the point of view of stress relaxation in these systems is sticky-bond partner exchange, wherein an $A-B$ pair breaks and $A$ recombines with a different sticky monomer $C$. This is typically an irreversible process corresponding to plastic deformation. As shown in Figure \ref{fig:sbps}, the onset of recombination corresponds to the onset of bond-breaking. The slope is similarly maximized when systems are plastically deforming (Table \ref{tab:WPSPS}). SB partner switching is most significant for ``weak, fast'' bonds and at low strain rates (Wf55). As shown in Fig.\ \ref{fig:stressstrain2}, these systems also display the greatest degree of plastic flow, i.e.\ flow at nearly constant stress. We expect that SB partner switching is the primary mechanism of self-healing in these materials. In other words, partner switching allows for greater ductility because it allows the materials to heal themselves even when a finite strain rate is applied. \section{Conclusions} The properties of thermoreversibly associating polymer melts and glasses have attracted great interest over the past fifteen years on a variety of fronts. Of particular promise is the ability to control the thermodynamics and kinetics of the sticky bonds independently, e.g.~by varying the chemistry of metal-ligand groups forming the bonds\cite{yount03,yount05,loveless05,Rinderspacher201296,doi:10.1021/ma401077d}. Specifically, if $h$ is the binding energy of sticky monomers, $T$ is temperature, $\nu_0$ is a characteristic kinetic ``attempt rate'' determined by the geometry of the binding groups, and $\tau_{sb}$ is the lifetime of sticky bonds, then $\tau_{sb} = f(h/k_BT)/\nu_0$. In other words, $\tau_{sb}$ can be written as a product of factors controlled by thermodynamics (i.e.~$f(h/k_BT)$) and chemical kinetics. Frequency-dependent properties such as the shear modulus $G(\omega)$ can often be scaled by these kinetic rates, i.e. systems with different $G(\omega)$ have the same $G(\omega/\nu_0)$.\cite{yount03,yount05,loveless05} However, this collapse can break down when $\nu_0$ is comparable to the characteristic relaxation rates $\tau_{pol}^{-1}$ of the parent chains in the absence of sticky bonding, in a complex regime of interplay between SB and parent-chain relaxation dynamics.\cite{hoy09} It is exactly this complex regime that we have studied here, with emphasis on the nonlinear mechanics of glassy systems. We have characterized the mechanical properties of model thermoreversibly associating dendrimer glasses using a hybrid molecular dynamics/Monte Carlo method.\cite{hoy09} The short, unentangled ``Y'' architecture employed here is a simple model of a dendrimer with unentangled arms; the sticky bond network produces an entangled-like mechanical response that is not (to first order) architecture-specific. Such glasses are of interest because they are easily melt-processable yet can show remarkably high ductility and fracture toughness. We examined the entire range of mechanical response from the elastic regime through fracture. At small strains, all systems fall on a common stress-strain curve since SB-breaking has not yet initiated. Local ``yield'' (i.e.\ the divergence of stress curves for systems with different SB parameters) corresponds to the onset of SB-breaking and naturally occurs in systems with thermodynamically weak SBs possessing fast chemical kinetics. All systems exhibit entangled-like response\cite{haward97,rottler03} at small and moderate strains because the sticky bonds act like (transient) chemical crosslinks. For larger strains, mechanical strength is maximized by ``strong, slow'' (Ss) sticky bonds, while ductility is maximized by ``weak, fast'' (Wf) SBs because they ``partner-switch'' in a dynamical self-healing process. Investigations of such healing processes during active deformation are in their infancy; to our knowledge, this is the first time they have been reported in thermoreversibly associating glasses. We have also examined toughness (work-to-fracture). In general, toughness is maximized for Ss systems and minimized for Wf systems. However, this result is nontrivial since Wf systems exhibit larger fracture strains under some deformation conditions. Strain-rate dependence studies show that these systems exhibit a complex rheology; our systems are often, but not always, tougher at higher strain rates, but they are more ductile at lower strain rates. This illustrates the need for further studies over a broad range of strain rate to examine the response under conditions ranging from quasi-static to shock loading. Future work will consider polymers of different (e.g.\ physically entangled) topology, varying SB concentration, response under other deformation protocols such as creep, and temperature dependence. \section{Acknowlegements} All simulations were performed using an enhanced version of LAMMPS.\cite{plimpton95} This work was partially funded by ARL contract TCN-11042 and the US ARL Enterprise for Multi-Scale Research of Materials. AS was supported by the REU program at USF (NSF Grant No.\ DMR-1263066). We would like to thank Yelena R. Sliozberg and Robert H. Lambeth for helpful discussions.
1,116,691,501,328
arxiv
\section{Introduction} \label{sec:intro} Many dependent type theories support some form of inductive types. An inductive type is given by its constructors, along with an elimination principle which expresses that it is enough to consider the constructors when defining a function out of the inductive type. For example, the inductive type of natural numbers $\mathsf{Nat}$ is given by the constructors $\mathsf{zero}:\mathsf{Nat}$ and $\mathsf{suc}:\mathsf{Nat} \rightarrow \mathsf{Nat}$, and has the well-known induction principle: \[ \mathsf{Elim}\mathsf{Nat}:(P:\mathsf{Nat} \rightarrow \mathsf{Type})(pz: P\,\mathsf{zero})\big(ps:(n:\mathsf{Nat})\rightarrow P\,n\rightarrow P\,(\mathsf{suc}\,n)\big)(n:\mathsf{Nat})\rightarrow P\,n \] \noindent $P$ is a family of types (i.e.\ a proof-relevant predicate) over natural numbers, which is called the \emph{induction motive}. The arguments $pz$ and $ps$ are called the \emph{induction methods}. The behavior of induction is described by a \emph{computation rule} ($\beta$-rule) for each constructor and induction method: \begin{alignat*}{5} & \mathsf{Elim}\mathsf{Nat}\,P\,pz\,ps\,\mathsf{zero} && \equiv pz \\ & \mathsf{Elim}\mathsf{Nat}\,P\,pz\,ps\,(\mathsf{suc}\,n) && \equiv ps\,n\,(\mathsf{Elim}\mathsf{Nat}\,P\,pz\,ps\,n) \end{alignat*} Indexed families of types can be also considered, such as length-indexed vectors of $A$-elements $\mathsf{Vec}_A: \mathsf{Nat} \rightarrow \mathsf{Type}$. Mutual inductive types are yet another generalization, but they can be reduced to indexed families where indices classify constructors for each mutual type. Inductive-inductive types \cite{forsberg-phd} are mutual definitions where this reduction does not work: here a type can be defined together with a family indexed over it. An example is the following fragment of a well-typed syntax of a type theory, where the second $\mathsf{Ty}$ type constructor is indexed over $\mathsf{Con}$, but constructors of $\mathsf{Con}$ also refer to $\mathsf{Ty}$: \begin{alignat*}{3} & \mathsf{Con} && : \mathsf{Type} && \text{contexts} \\ & \mathsf{Ty} && : \mathsf{Con} \rightarrow \mathsf{Type} && \text{types in contexts} \\ & \bullet && : \mathsf{Con} && \text{constructor for the empty context} \\ & \mathord{\hspace{1pt}\text{--}\hspace{1pt}}\rhd\mathord{\hspace{1pt}\text{--}\hspace{1pt}} && : (\Gamma:\mathsf{Con})\rightarrow\mathsf{Ty}\,\Gamma\rightarrow\mathsf{Con} && \text{constructor for context extension} \\ & \iota && : (\Gamma:\mathsf{Con})\rightarrow\mathsf{Ty}\,\Gamma && \text{constructor for a base type} \\ & \Pi && : (\Gamma:\mathsf{Con})(A:\mathsf{Ty}\,\Gamma)\rightarrow\mathsf{Ty}\,(\Gamma\rhd A)\rightarrow\mathsf{Ty}\,\Gamma \hspace{1em} && \text{constructor for dependent functions} \end{alignat*} There are two eliminators for this type: one for $\mathsf{Con}$ and one for $\mathsf{Ty}$. Both take the same arguments: two motives ($P:\mathsf{Con}\rightarrow\mathsf{Type}$ and $Q:(\Gamma:\mathsf{Con})\rightarrow P\,\Gamma\rightarrow\mathsf{Ty}\,\Gamma\rightarrow\mathsf{Type}$) and four methods (one for each constructor, which we omit). \begin{alignat*}{6} & \mathsf{Elim}\mathsf{Con} && : (P:\dots)(Q:\dots)\rightarrow\dots\rightarrow(\Gamma:\mathsf{Con}) && \rightarrow P\,\Gamma \\ & \mathsf{Elim}\mathsf{Ty} && : (P:\dots)(Q:\dots)\rightarrow\dots\rightarrow(A:\mathsf{Ty}\,\Gamma) && \rightarrow Q\,\Gamma\,(\mathsf{Elim}\mathsf{Con}\,\Gamma)\,A \end{alignat*} Note that the type of $\mathsf{Elim}\mathsf{Ty}$ refers to $\mathsf{Elim}\mathsf{Con}$; for this reason this elimination principle is sometimes called ``recursive-recursive'' (analogously to ``inductive-inductive''). Higher inductive types (HITs, \cite[Chapter 6]{HoTTbook}) generalize inductive types in a different way: they allow constructors expressing equalities of elements of the type being defined. This enables, among others, the definition of types quotiented by a relation. For example, the type of integers $\mathsf{Int}$ can be given by a constructor $\mathsf{pair}:\mathsf{Nat}\rightarrow\mathsf{Nat}\rightarrow\mathsf{Int}$ and an equality constructor $\mathsf{eq}:(a\,b\,c\,d:\mathsf{Nat})\rightarrow a+d=_\mathsf{Nat} b+c\rightarrow \mathsf{pair}\,a\,b=_{\mathsf{Int}}\mathsf{pair}\,c\,d$ targetting an equality of $\mathsf{Int}$. The eliminator for $\mathsf{Int}$ expects a motive $P:\mathsf{Int}\rightarrow\mathsf{Type}$, a method for the $\mathsf{pair}$ constructor $p:(a\,b:\mathsf{Nat})\rightarrow P\,(\mathsf{pair}\,a\,b)$ and a method for the equality constructor $\mathsf{path}$. This method is a proof that given $e:a+d=_\mathsf{Nat} b+c$, $p\,a\,b$ is equal to $p\,c\,d$ (the types of which are equal by $e$). Thus the method for the equality constructor ensures that all functions defined from the quotiented type respect the relation. Since the integers are supposed to be a set (which means that any two equalities between the same two integers are equal), we would need an additional higher equality constructor $\mathsf{trunc}:(x\,y:\mathsf{Int})\rightarrow(p\,q:x=_\mathsf{Int} y)\rightarrow p=_{x=_\mathsf{Int} y} q$. HITs may have constructors of iterated equality types as well. With the view of types as spaces in mind, point constructors add points to spaces, equality constructors add paths and higher constructors add homotopies between higher-dimensional paths. Not all constructor expressions make sense. For example \cite[Example 6.13.1]{HoTTbook}, given an $f:(X:\mathsf{Type})\rightarrow X\rightarrow X$, suppose that an inductive type $\mathsf{Ival}$ is generated by the point constructors $\mathsf{a}:\mathsf{Ival}$, $\mathsf{b}:\mathsf{Ival}$ and a path constructor $\sigma:f\,\mathsf{Ival}\,\mathsf{a} =_{\mathsf{Ival}}f\,\mathsf{Ival}\,\mathsf{b}$. The eliminator for this type should take a motive $P:\mathsf{Ival}\rightarrow\mathsf{Type}$, two methods $p_a : P\,\mathsf{a}$ and $p_b : P\,\mathsf{b}$, and a path connecting elements of $P\,(f\,\mathsf{Ival}\,\mathsf{a})$ and $P\,(f\,\mathsf{Ival}\,\mathsf{b})$. However it is not clear what these elements should be: we only have elements $p_a:P\,\mathsf{a}$ and $p_b:P\,\mathsf{b}$, and there is no way in general to transform these to have types $P\,(f\,\mathsf{Ival}\,\mathsf{a})$ and $P\,(f\,\mathsf{Ival}\,\mathsf{b})$. Another invalid example is an inductive type $\mathsf{Neg}$ with a constructor $\mathsf{con}:(\mathsf{Neg} \rightarrow \bot) \rightarrow \mathsf{Neg}$ where $\bot$ is the empty type. An eliminator for this type should (at least) yield a projection function $\mathsf{proj}: \mathsf{Neg} \rightarrow (\mathsf{Neg} \rightarrow \bot)$. Given this, we can define $u :\equiv \mathsf{con}\,(\lambda x . \mathsf{proj}\, x\,x):\mathsf{Neg}$ and then derive $\bot$ by $\mathsf{proj}\,u\,u$. The existence of $\mathsf{Neg}$ would make the type theory inconsistent. A common restriction to avoid such situations is \emph{strict positivity}. It means that the type being defined cannot occur on the left hand side of a function arrow in a parameter of a constructor. This excludes the above constructor $\mathsf{con}$. In this paper we propose a notion of signatures for higher inductive-inductive types (HIITs) which includes the above valid examples and excludes the invalid ones. Our signatures allow any number of inductive-inductive type constructors, possibly infinitary higher constructors of any dimension and restricts constructors to strictly positive ones. It also allows equalities between type constructors, free usage of $\J$ (path induction) and $\mathsf{refl}$ in HIIT signatures, and allows mixing type, point and path constructors in any order. The core idea is to represent HIIT specifications as contexts in a domain-specific type theory which we call the \emph{theory of signatures}. Type formers in the theory of signatures are restricted in order to enforce strict positivity. For example, natural numbers are defined as the three-element context \[ Nat:\mathsf{U},\,\,\, zero:\underline{Nat},\,\,\, suc : Nat \rightarrow \underline{Nat} \] where $Nat$, $zero$ and $suc$ are simply variable names, and underlining denotes $\mathsf{El}$ (decoding) for the Tarski-style universe $\mathsf{U}$. We also show how to derive induction and recursion principles for each signature. We use variants of \emph{syntactic logical relation translations} to compute notions of \emph{algebras}, \emph{homomorphisms}, \emph{displayed algebras} and \emph{displayed algebra sections}, and then define induction and recursion in terms of these notions. To our knowledge, this is the first proposal for a definition of HIITs. However, we do not provide complete (higher) categorical semantics for HIITs, nor do we show that initial algebras exist for specified HIITs. The present paper is an expanded version of our conference paper \cite{hiit}. In this version, we extend signatures with paths between type constructors, and in Section \ref{sec:morphisms} we also compute notions of homomorphisms from signatures. We also explain a coherence problem in interpreting syntaxes of type theories, and how it influenced the current paper, in Section \ref{sec:coherence} and Section \ref{sec:categorical}. \subsection{Overview of the Paper} We start by describing the theory of HIIT signatures in Section \ref{sec:signatures}. Here, we also describe the syntax for an external type theory, which serves as the source of constants which are external to a signature, like natural numbers in the case of length-indexed vectors. In Section \ref{sec:general}, we give a general definition of induction and recursion. In Section \ref{sec:coherence} we explain the choice of using syntactic translations in the rest of the paper. In Sections \ref{sec:algebras} to \ref{sec:sections}, we describe four syntactic translations from the theory of signatures to the syntax of the external type theory, respectively computing algebras, displayed algebras, homomorphisms, and sections of displayed algebras. In Section \ref{sec:categorical}, we consider extending the previous translations with additional components of a categorical semantics (e.g.\ identity and composition for homomorphisms), and explain why the approach in this paper does not make this feasible. Section \ref{sec:formalization} describes the Agda formalization and the Haskell implementation. We conclude in Section \ref{sec:summary}. \subsection{Related Work} Schemes for inductive families are given in \cite{Dybjer97inductivefamilies,paulinmohring}, and for inductive-recursive types in \cite{dybjer00ir}. A symmetric scheme for both inductive and coinductive types is given in \cite{henning}. Basold et al. \cite{niels} define a syntactic scheme for higher inductive types with only 0-constructors and compute the types of induction principles. In \cite{nielsmsc} a semantics is given for the same class of HITs but with no recursive equality constructors. Dybjer and Moeneclaey define a syntactic scheme for finitary HITs and show their existence in a groupoid model \cite{moeneclaey}. Internal codes for simple inductive types such as natural numbers, lists or binary trees can be given by containers which are decoded to W-types \cite{abbot05containers}. Morris and Altenkirch \cite{morris09indexed} extend the notion of container to that of indexed container which specifies indexed inductive types. Codes for inductive-recursive types are given in \cite{Dybjer99afinite}. Inductive-inductive types were introduced by Forsberg \cite{forsberg-phd}. Sojakova \cite{sojakova} defines a subset of HITs called W-suspensions by a coding scheme similar to W-types. She proves that the induction principle is equivalent to homotopy initiality. Quotient types \cite{hofmann95extensional} are precursors of higher inductive types (HITs). The notion of HIT first appeared in \cite{HoTTbook}, however only through examples and without a general definition. Lumsdaine and Shulman give a general specification of models of type theory supporting higher inductive types \cite{lumsdaineShulman}. They introduce the notion of cell monad with parameters and characterize the class of models which have initial algebras for a cell monad with parameters. \cite{cubicalhits} develop semantics for several HITs (sphere, torus, suspensions, truncations, pushouts) in certain presheaf toposes, and extend the syntax of cubical type theory \cite{ctt} with these HITs. Kraus \cite{krausprop} and Van Doorn \cite{doorn} construct propositional truncation as a sequential colimit. The schemes mentioned so far do not support inductive-inductive types. Cartmell's generalized algebraic theories (GATs) \cite{gat} pioneered a type-theoretic notion of algebraic signature. GATs can be viewed as a variant of finitary quotient inductive-inductive signatures (QIITs), although GATs also support equations between sorts (type constructors), which so far have not been considered in QIIT and HIT literature. The article of Altenkirch et al.\ \cite{gabe} gives specification and semantics of QIITs in a set-truncated setting. Signatures are given as lists of functors which can be interpreted as complete categories of algebras, and completeness is used to talk about notions of induction and recursion. However, no strict positivity restriction is given, nor a construction of initial algebras. Closely related to the current work is the paper by the current authors and Altenkirch \cite{kaposi2019constructing}, which also concerns QIITs. There, signatures for QIITs are essentially a restriction of the signatures given here, but in contrast to the current work, the restricted set-truncated setting enables building initial algebras and detailed categorical semantics. The logical predicate syntactic translation was introduced by Bernardy et al.\ \cite{bernardy2010parametricity}. The idea that a context can be seen as a signatures and the logical predicate translation can be used to derive the types of induction motives and methods was described in \cite[Section 5.3]{ttintt}. Logical relations are used to derive the computation rules in \cite[Section 4.3]{kaposi-phd}, but only for closed QIITs. Syntactic translations in the context of the calculus of inductive constructions are discussed in \cite{next700}. Logical relations and parametricity can also be used to justify the existence of inductive types in a type theory with an impredicative universe \cite{atkey}. \section{Signatures for HIITs} \label{sec:signatures} In this section we define signatures for HIITs. First, we list the main considerations behind our definition. \begin{itemize} \item\emph{Ubiquitous type dependencies.} Recall the inductive-inductive $\mathsf{Con}$-$\mathsf{Ty}$ example from Section \ref{sec:intro}: there, types of constructors may refer back to previous constructors. Additionally, $\mathsf{Ty}$ is indexed over the previously declared $\mathsf{Con}$ type constructor. This suggests that we should not attempt to stratify signatures, and instead use a fully dependent type theory. At this level of generality, stratification seems to complicate matters and remove the syntax further from familiar type theories. \item\emph{Referring to external types.} We would like to mention types which are external to the signature. For example, length-indexed vectors refer to natural numbers which are supposed to already exist. Hence, we also assume a syntax for an \emph{external type theory}, which is the source of such types, and constructions in the theory of signatures may depend on a context in the external type theory. \item\emph{Strict positivity.} In prior literature, schemes for inductive types usually include structural restrictions for this. In our case, a \emph{universe} is used to make size restrictions which also entail strict positivity. \item\emph{Iterated equalities and type constructor equalities, with path induction.} We support iterated equalities by closing the universe under equality type formation of point and path constructors, together with a standard (although size-restricted) definition of path induction. We also introduce an additional type former for equalities between type constructors. \end{itemize} \subsection{Theory of Signatures} \label{sec:tos} \begin{figure} (1) Contexts and variables \[ \begin{gathered} \infer{\hat{\Gamma}\vdash\cdot}{\mathbin{\hat\vdash}\,\hat{\Gamma}} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\vdash\Delta,x:A}{\hat{\Gamma}\semicol\Delta\vdash A} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta,x:A\vdash x : A}{\hat{\Gamma}\semicol\Delta\vdash A} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta,y:B\vdash x : A}{\hat{\Gamma}\semicol\Delta\vdash x : A && \hat{\Gamma}\semicol\Delta\vdash B} \end{gathered} \] \vspace{0.5em} (2) Universe \[ \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash \mathsf{U}}{\hat{\Gamma}\semicol\vdash\Delta} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash \underline{a}}{\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U}} \end{gathered} \] \vspace{0.5em} (3) Inductive parameters \[ \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash (x:a)\rightarrow B}{\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U} && \hat{\Gamma}\semicol\Delta,x:\underline{a} \vdash B} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash t\, u : B[x \mapsto u]}{\hat{\Gamma}\semicol\Delta \vdash t : (x:a)\rightarrow B && \hat{\Gamma}\semicol\Delta \vdash u : \underline{a}} \end{gathered} \] \vspace{0.5em} (4) Paths between point and path constructors \[ \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash t=_a u : \mathsf{U}}{\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U} && \hat{\Gamma}\semicol\Delta \vdash t : \underline{a} && \hat{\Gamma}\semicol\Delta \vdash u : \underline{a}} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash \mathsf{refl} : \underline{t=_a t}}{\hat{\Gamma}\semicol\Delta \vdash t : \underline{a}} \end{gathered} \] \[ \infer{\hat{\Gamma}\semicol\Delta \vdash \J_{a\,t\,\,(x.z.p)}\,pr\,_u\,eq : \underline{p[x\mapsto u, z\mapsto eq]}} {\begin{array}{l l l} & \hat{\Gamma}\semicol\Delta,x:\underline{a},z:\underline{t=_a x}\vdash p : \mathsf{U} & \hat{\Gamma}\semicol\Delta \vdash u : \underline{a} \\ \hat{\Gamma}\semicol\Delta \vdash t : \underline{a} \hspace{1.5em} & \hat{\Gamma}\semicol\Delta \vdash pr : \underline{p[x\mapsto t, z\mapsto \mathsf{refl}]} \hspace{1.5em} & \hat{\Gamma}\semicol\Delta \vdash eq : \underline{t=_a u} \end{array}} \] \[ \infer{\hat{\Gamma}\semicol\Delta \vdash \J\beta_{a\,t\,\,(x.z.p)}\,pr:\underline{(\J_{a\,t\,\,(x.z.p)}\,pr\,_t\,\mathsf{refl}) =_{p[x\mapsto t, z\mapsto \mathsf{refl}]} pr}} {\hat{\Gamma}\semicol\Delta \vdash t : \underline{a} && \hat{\Gamma}\semicol\Delta,x:\underline{a},z:\underline{t =_a x}\vdash p : \mathsf{U} && \hat{\Gamma}\semicol\Delta \vdash pr : \underline{p[x\mapsto t, z\mapsto \mathsf{refl}]} } \] \vspace{0.5em} (5) Paths between type constructors \[ \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash a=_\mathsf{U} b}{\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U} && \hat{\Gamma}\semicol\Delta \vdash b : \mathsf{U}} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\semicol\Delta \vdash \mathsf{refl} : a=_\mathsf{U} a}{\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U}} \end{gathered} \] \[ \infer{\hat{\Gamma}\semicol\Delta \vdash \J_{a\,\,(x.z.p)}\,pr\,_b\,eq : \underline{p[x\mapsto b, z\mapsto eq]}} {\begin{array}{l l l} & \hat{\Gamma}\semicol\Delta,x:\mathsf{U},z:a=_\mathsf{U} x\vdash p : \mathsf{U} & \hat{\Gamma}\semicol\Delta \vdash b : \mathsf{U} \\ \hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U} \hspace{1.5em} & \hat{\Gamma}\semicol\Delta \vdash pr : \underline{p[x\mapsto a, z\mapsto \mathsf{refl}]} \hspace{1.5em} & \hat{\Gamma}\semicol\Delta \vdash eq : a=_\mathsf{U} b \end{array}} \] \[ \infer{\hat{\Gamma}\semicol\Delta \vdash \J\beta_{a\,\,(x.z.p)}\,pr:\underline{(\J_{a\,\,(x.z.p)}\,pr\,_a\,\mathsf{refl}) =_{p[x\mapsto a, z\mapsto \mathsf{refl}]} pr}} {\hat{\Gamma}\semicol\Delta \vdash a : \mathsf{U} && \hat{\Gamma}\semicol\Delta,x:\mathsf{U},z:t =_\mathsf{U} x\vdash p : \mathsf{U} && \hat{\Gamma}\semicol\Delta \vdash pr : \underline{p[x\mapsto a, z\mapsto \mathsf{refl}]} } \] \caption{The theory of HIIT signatures, parts (1)--(5). Weakenings are implicit, we assume fresh names everywhere and consider $\alpha$-convertible terms equal. The $\hat{\Gamma;}$ assumptions are only used in parts (6)--(7), see Figure \ref{sigrules2}.} \label{sigrules1} \end{figure} \begin{figure} (6) External parameters \[ \begin{gathered} \infer{\hat{\Gamma}\hat;\,\Delta \vdash (\hat{x}\in \hat{A})\rightarrow B}{\hat{\Gamma}\mathbin{\hat\vdash}\hat{A}\in\hat{\mathsf{Type}}_{\hat{0}} && \hat{\Gamma}\hat;\,\vdash\Delta && \hat{(\hat{\Gamma}\hat,\,\hat{x}\in \hat{A})}\hat;\, \Delta \vdash B} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\hat;\,\Delta \vdash t\, \hat{u} : B[\hat{x}\mapsto \hat{u}]}{\hat{\Gamma}\hat;\,\Delta \vdash t : (\hat{x}\in \hat{A})\rightarrow B && \hat{\Gamma}\mathbin{\hat\vdash} \hat{u} \in \hat{A}} \end{gathered} \] \vspace{0.5em} (7) Infinitary parameters \[ \begin{gathered} \infer{\hat{\Gamma}\hat;\,\Delta \vdash (\hat{x}\in \hat{A})\rightarrow b : \mathsf{U}}{\hat{\Gamma}\mathbin{\hat\vdash}\hat{A}\in\hat{\mathsf{Type}}_{\hat{0}} && \hat{\Gamma}\hat;\,\vdash\Delta && (\hat{\Gamma}\hat,\,\hat{x}\in \hat{A})\hat;\, \Delta \vdash b : \mathsf{U}} \end{gathered} \hspace{2em} \begin{gathered} \infer{\hat{\Gamma}\hat;\,\Delta \vdash t\, \hat{u} : \underline{b[\hat{x}\mapsto \hat{u}]}} {\hat{\Gamma}\hat;\,\Delta \vdash t : \underline{(\hat{x}\in \hat{A})\rightarrow b} && \hat{\Gamma}\,\hat{\vdash}\, \hat{u} \in \hat{A}} \end{gathered} \] \caption{The theory of HIIT signatures, parts (6)--(7). Parts (1)--(5) are given in Figure \ref{sigrules1}.} \label{sigrules2} \end{figure} We list typing rules for the theory of signatures in Figures \ref{sigrules1} and \ref{sigrules2}. We consider the following judgments: \begin{alignat*}{4} & \hat{\Gamma}\vdash\Delta && \text{$\Delta$ is a context in the external context $\hat{\Gamma}$}\\ & \hat{\Gamma}\hat;\,\Delta\vdash A && \text{$A$ is a type in context $\Delta$ and external context $\hat{\Gamma}$} \\ & \hat{\Gamma}\hat;\,\Delta\vdash t : A \hspace{3em} && \text{$t$ is a term of type $A$ in context $\Delta$ and external context $\hat{\Gamma}$} \end{alignat*} We have the convention that constructions in the external type theory are notated in {\color{BrickRed}brick red} color. Although every judgement is valid up to a context in the external type theory, note that none of the rules in (1)--(4) depend on or change these assumptions, and even after that, we do not refer to any particular type former from the external theory. We describe the external theory in more detail in Section \ref{sec:external}. Also, the rules presented here are informal and optimized for readability; we describe the Agda formalizations in Section \ref{sec:formalization}. We explain the rules for the theory of signatures in order below. (1) The rules for context formation and variables are standard. We build signatures in a well-formed external context. We assume fresh names everywhere to avoid name capture, and leave weakenings implicit. (2) There is a universe $\mathsf{U}$, with decoding written as an underline instead of the usual $\mathsf{El}$, to improve readability. With this part of the syntax, we can already define contexts specifying the empty type, unit type and booleans, or in general, finite sets of finite sets: \[ \boldsymbol{\cdot},\,\,\,Empty:\mathsf{U} \hspace{3em} \boldsymbol{\cdot},\,\,\,Unit:\mathsf{U},\,\,\,tt:\underline{Unit} \hspace{3em} \boldsymbol{\cdot},\,\,\,Bool:\mathsf{U},\,\,\,true:\underline{Bool},\,\,\,false:\underline{Bool} \] (3) We have a function space with small domain and large codomain, which we call the inductive function space. This can be used to add inductive parameters to all kinds of constructors. As $\mathsf{U}$ is not closed under this function space, these function types cannot (recursively) appear in inductive arguments, which ensures strict positivity. When the codomain does not depend on the domain, $a\rightarrow B$ can be written instead of $(x:a)\rightarrow B$. Now we can specify the natural numbers as a context: \[ \boldsymbol{\cdot},\,\,\,Nat : \mathsf{U},\,\,\,zero:\underline{Nat},\,\,\,suc:Nat\rightarrow\underline{Nat} \] We can also encode inductive-inductive definitions such as the fragment of the well-typed syntax of a type theory mentioned in the introduction: \begin{alignat*}{5} & \boldsymbol{\cdot},\,\,\,Con:\mathsf{U},\,\,\,Ty:Con\rightarrow\mathsf{U},\,\,\,\bullet:\underline{Con},\,\,\,\mathord{\hspace{1pt}\text{--}\hspace{1pt}}\rhd\mathord{\hspace{1pt}\text{--}\hspace{1pt}}:(\Delta:Con)\rightarrow Ty\,\Delta\rightarrow\underline{Con}, \\ & U : (\Delta:Con)\rightarrow \underline{\mathsf{Ty}\,\Delta},\,\,\,\Pi:(\Delta:Con)(A:Ty\,\Delta)(B:Ty\,(\Delta\rhd A))\rightarrow\underline{Ty\,\Delta} \end{alignat*} Note that this notion of inductive-inductive types is more general than the one considered in previous works \cite{forsberg-phd}, as we allow any number of type constructors, and arbitrary mixing of type and point constructors. (4) $\mathsf{U}$ is closed under the equality type, with eliminator $\J$ and a weak (propositional) $\beta$-rule. Weakness is required because the translations in Sections \ref{sec:morphisms} and \ref{sec:sections} do not preserve this $\beta$-rule strictly. We explain this in more detail in Sections \ref{sec:coherence} and \ref{sec:categorical}. Adding equality to the theory of signatures allows higher constructors and inductive equality parameters as well. We can now define the higher inductive circle as the following context: \[ \boldsymbol{\cdot},\,\,\,S^1:\mathsf{U},\,\,\,base:\underline{S^1},\,\,\,loop:\underline{base =_{S^1} base} \] The $\J$ rule allows constructors to mention operations on paths as well. For instance, the definition of the torus depends on path composition, which can be defined using $\J$: given $p:\underline{t=_a u}$ and $q:\underline{u=_a v}$, $p \sqcdot q$ abbreviates $\J_{a\,u\,x.z.(t=x)}\,p\,_v\,q : \underline{t=_a v}$. The torus is given as follows. \begin{alignat*}{5} & \boldsymbol{\cdot},\,\,\,T^2:\mathsf{U},\,\,\,b : \underline{T^2},\,\,\, p:\underline{b =_{T^2} b},\,\,\,q:\underline{b=_{T^2} b},\,\,\, t:\underline{p\sqcdot q=_{(b=_{T^2} b)} q\sqcdot p} \end{alignat*} With the equality type at hand, we can define a full well-typed syntax of type theory as given e.g.\ in \cite{ttintt} as an inductive type. Also, see the examples in the Agda formalization described in Section \ref{sec:formalization}. The question may arise whether our $\J$ is sufficient to define all constructions on paths which we want to express, as we do not have $\Sigma$-types in signatures, and we only have a weak version of $\Pi$. First, as far as we know, all path constructions which occur in HITs in the literature are expressible using our $\J$, so it is certainly adequate in this sense. Second, we provide a sketch in our Agda formalization (see Section \ref{sec:formalization}) that a Frobenius variant of $\J$ (first considered by Garner in \cite[p.~13]{garner2009two}) is derivable from the $\J$ given in Figure \ref{sigrules1}, which suggests that our $\J$ does not lose expressiveness because of the lack of $\Sigma$ and $\Pi$-types. (5) There is a different equality type former, which can be used to express paths between type constructors. To our knowledge, this has not been considered in previous HIT literature, although Cartmell considered equations between sorts in generalized algebraic theories \cite{gat}. For an example, paths between type constructors allow a compact definition of integers: \begin{alignat*}{5} & \boldsymbol{\cdot},\,\,Int : U,\,\,zero : \underline{Int},\,p : Int =_\mathsf{U} Int \end{alignat*} Here, successor and predecessor functions can be recovered from the $p : Int =_\mathsf{U} Int$ equality, by transporting an $Int$ along $p$ or $p^{-1}$. This signature could be unfolded to a larger one, by replacing $p : Int =_\mathsf{U} Int$ with an explicit $suc : Int \rightarrow \underline{Int}$ constructor and additional constructors expressing that $suc$ is an equivalence. The definition of $Int$ with a successor equivalence was previously suggested by Altenkirch and Pinyo \cite{pinyo2018integers} and Cavallo and M\"ortberg \cite{cavallointegers}. For another example, we may have a HIIT definition for a type theory where Russell-style universes are compactly specified with an equality: \begin{alignat*}{5} & ...,\,\, russell : (\Gamma : Con)\rightarrow Tm\,\Gamma\,U =_\mathsf{U} Ty\,\Gamma,\,\, ... \end{alignat*} In the presence of the univalence axiom, type constructor equations can be always equivalently represented using (4) path constructors, by adding explicit equivalences to a signature. In this case, type constructor equations serve as a shorthand for equivalences. So far we were only able to define closed HIITs, which do not refer to external types. We add rules which include external types into signatures. A context $\Delta$ for which $\hat{\Gamma}\vdash\Delta$ holds can be seen as a specification of an inductive type which depends on an external $\hat{\Gamma}$ signature. For example, in the case of lists for arbitrary external element types, $\hat{\Gamma}$ will be $\hat{A}\in\hat{\mathsf{Type}}_{\hat{0}}$. (6) is a function space where the domain is a type in the external theory. We distinguish it from (3) by using red brick color in the domain specification. We specify lists and integers as follows, with integers now given as quotients of pairs of natural numbers: \begin{alignat*}{4} & \hat{A}\in\hat{\mathsf{Type}}_{\hat{0}} && \vdash\,\boldsymbol{\cdot},\,\, && List:\mathsf{U},\,\,\,nil:\underline{List},\,\,\,cons:(\hat{x}\in \hat{A})\rightarrow List\rightarrow\underline{List} \\ & \hat{\Gamma} && \vdash\,\boldsymbol{\cdot},\,\,&& Int:\mathsf{U},\,\,\,pair:(\hat{x}\,\hat{y}\in\hat{Nat})\rightarrow\underline{Int},\,\,\, \\ & && && eq:(\hat{a}\,\hat{b}\,\hat{c}\,\hat{d}\in \hat{Nat})(\hat{p}\in \hat{a}\hat{\mathbin{+}}\hat{d}\mathbin{\hat=}_{\hat{Nat}} \hat{b}\hat{\mathbin{+}}\hat{c})\rightarrow \underline{pair\,\hat{a}\,\hat{b}=_{Int}\mathsf{pair}\,\hat{c}\,\hat{d}}, \\ & && && trunc:(x y : Int)(p\,q : a=_{Int} b)\rightarrow \underline{p=_{x=_{Int} y} q} \end{alignat*} In the case of integers, $\hat{\Gamma}$ is $\hat{Nat}\in\hat{\mathsf{Type}}_{\hat{0}}\hat{,}\,\mathord{\hspace{1pt}\text{--}\hspace{1pt}}\hat{\mathbin{+}}\mathord{\hspace{1pt}\text{--}\hspace{1pt}}\in\hat{Nat}\mathbin{\hat\ra} \hat{Nat}\mathbin{\hat\ra}\hat{Nat}$, or alternatively, we could require natural numbers in the external theory. As another example, propositional truncation for a type $\hat{A}$ is specified as follows. \[ \hat{A}\in\hat{\mathsf{Type}}_{\hat{0}}\vdash\, \boldsymbol{\cdot},\,\,\,tr:\mathsf{U},\,\,\,emb : (\hat{x}\in \hat{A})\rightarrow \underline{tr},\,\,\,eq:(x\,y:tr)\rightarrow \underline{x=_{tr} y} \] The smallness of $\hat{A}$ is required in (6). It is possible to generalize signatures to arbitrary universe levels, but it is not essential to the current development. Note that we can assume arbitrary structures in the external $\hat{\Gamma}$ context, which in particular allows us to specify HIITs depending on other (external) HIITs. We can do this by first specifying a HIIT, then using the translations in Sections \ref{sec:algebras}-\ref{sec:sections} to compute notions of algebras and induction, then assume the HIIT in the external context of another HIIT signature. The (6) function space preserves strict positivity, since in the external theory there is no way to recursively refer to the inductive type \emph{being defined}. The situation is analogous to the case of $W$-types \cite{abbot05containers}, where shapes and positions can contain arbitrary types but they cannot recursively refer to the $W$-type being defined. This setup rules out some signatures; for example, rose trees cannot be specified as follows, because we cannot apply the external $\hat{List}$ to the inductive $T$: \[ \hat{A : \hat{\mathsf{Type}}_0,\,\,List : \hat{\mathsf{Type}}_0 \rightarrow \hat{\mathsf{Type}}_0} \vdash \boldsymbol{\cdot},\,\,T : \mathsf{U},\,\,node : \hat{A} \rightarrow \hat{List}\,T \rightarrow \underline{T} \] Analogously nested HIT examples are the ``hubs and spokes'' definitions in \cite[Section 6.7]{HoTTbook}. To allow such definitions, we would have to analyze external constructions to check whether they preserve strict positivity. This is out of the scope of the current paper. (7) $\mathsf{U}$ is also closed under a function space where the domain is an external type and the codomain is a small source theory type. We overload the application notation for external parameters, as it is usually clear from context which application is meant. The rules allow types with infinitary constructors, for example, trees branching by a possibly infinite external type $\hat{A}$: \[ \hat{A}\in\hat{\mathsf{Type}}_{\hat{0}}\vdash\,\boldsymbol{\cdot},\,\,\,T:\mathsf{U},\,\,\,leaf:\underline{T},\,\,\,node:((\hat{x}\in \hat{A})\rightarrow T)\rightarrow\underline{T} \] Here, $node$ has a function type (3) with a function type (7) in the domain. More generally, we can define $W$-types \cite{abbot05containers} as follows. $\hat{S}$ describes the ``shapes'' of the constructors and $\hat{P}$ the ``positions'' where recursive arguments can appear. \[ \hat{S : \hat{\mathsf{Type}}_0, P : S \rightarrow \hat{\mathsf{Type}}_0} \vdash\,\boldsymbol{\cdot},\,\,\,W:\mathsf{U},\,\,\,sup: (\hat{s} \in \hat{S})\rightarrow((\hat{p} \in \hat{P}\,\hat{s})\rightarrow W)\rightarrow \underline{W} \] For a more complex infinitary example, see the definition of Cauchy reals in \cite[Definition 11.3.2]{HoTTbook}. It can be also found as an example file in our Haskell implementation. Note that we do not include a $\lambda$ for infinitary function types, although we could possibly use it when describing paths between such functions. The reason is that $\lambda$ can be always represented in signatures by introducing additional function parameters which are constrained by pointwise equalities. For example, consider adding the following path constructor to the previously described $\hat{A}$-branching trees: \[ eq : \underline{node\,(\lambda\,\hat{x}.\,leaf) =_T leaf} \] This can be rewritten without $\lambda$ as follows: \[ eq : (f : \hat{A}\rightarrow T)\rightarrow ((\hat{x : A})\rightarrow f \hat{x} =_T leaf)\rightarrow\underline{node\,f =_T leaf} \] The benefit of supporting $\lambda$ would be less encoding noise in signatures. However, we observe that such usage of $\lambda$ is rather rare in HITs in practice, and hence omit $\lambda$ for the sake of simplicity. The invalid examples $\mathsf{Ival}$ and $\mathsf{Neg}$ from Section \ref{sec:intro} cannot be encoded by the theory of signatures. For $\mathsf{Ival}$, we can go as far as \[ \boldsymbol{\cdot},\,\,\,{Ival}:\mathsf{U},\,\,\,a:\underline{{Ival}},\,\,\,b:\underline{{Ival}},\,\,\,\sigma:\underline{? =_{{Ival}} ?}. \] The first argument of the function $\hat{\hat{f}\in(\hat{X}:\hat{\mathsf{Type}})\mathbin{\hat\ra}\hat{X}\mathbin{\hat\ra}\hat{X}}$ is an external type, but we only have ${Ival}:\mathsf{U}$ in the theory of signatures. $\mathsf{Neg}$ cannot be typed because the first parameter of the constructor $\mathsf{con}$ is a function from a small type to an external type, and no such functions can be formed. \subsection{External Type Theory} \label{sec:external} The external syntax serves two purposes: it is a source of types external to a HIIT signature, and it also serves as the target for the syntactic translations described in Sections \ref{sec:algebras} to \ref{sec:sections}. It is not essential that we use the same theory for both purposes; we do so only to simplify the presentation by skipping an additional translation or embedding step. Also, we do not specify the external theory in formal detail, since it is a standard type theory. We only make some assumptions about supported type formers. We generally keep the notation close to Agda, and use {\color{BrickRed}brick red} color to distinguish from constructions in the theory of signatures. There is a cumulative Russell-style hierarchy of universes $\hat{\mathsf{Type}}_{\hat{i}}$, with universes closed under $\Pi$, $\Sigma$, equality and unit types. Importantly, we do not assume uniqueness of identity proofs. The unit type is denoted $\hat{\top}$ with constructor $\tt$. Dependent function space is denoted $\hat{(\hat{x}\in \hat{A})\mathbin{\hat\ra} \hat{B}}$. We write $\hat{A}\mathbin{\hat\ra} \hat{B}$ if $\hat{B}$ does not depend on $\hat{x}$, and $\mathbin{\hat\ra} $ associates to the right, $\hat{(\hat{x}\in \hat{A})(\hat{y}\in \hat{B})\mathbin{\hat\ra} \hat{C}}$ abbreviates $\hat{(\hat{x}\in \hat{A})\mathbin{\hat\ra} (\hat{y}\in \hat{B})\mathbin{\hat\ra} \hat{C}}$ and $\hat{(\hat{x}\,\hat{y}\in \hat{A})\mathbin{\hat\ra} \hat{B}}$ abbreviates $\hat{(\hat{x}\in \hat{A})(\hat{y}\in \hat{A})\mathbin{\hat\ra} \hat{B}}$. We write $\hat{\lambda x. t}$ for abstraction and $\hat{t}\,\hat{u}$ for left-associative application. $\hat{(\hat{x}\in \hat{A})\mathbin{\hat\times} \hat{B}}$ stands for $\Sigma$ types, $\hat{A}\mathbin{\hat\times} \hat{B}$ for the non-dependent version. We sometimes use a short re-associated notation for left-nested iterated $\Sigma$ types, for example $\hat{(A : \hat{\mathsf{Type}}_{\hat{0}})\times A \times A}$ may stand for the left-nested $\hat{(x : (A : \hat{\mathsf{Type}}_{\hat{0}})\times A) \times \mathsf{proj}_1\,A}$. The constructor for $\Sigma$ is denoted $\hat{(t,\,u)}$ with eliminators $\hat{\mathsf{proj}_1}$ and $\hat{\mathsf{proj}_2}$. Both $\Pi$ and $\Sigma$ have definitional $\beta$ and $\eta$ rules. The equality type for a type $\hat{A}$ and elements $\hat{t}\in \hat{A}$, $\hat{u}\in \hat{A}$ is denoted $\hat{t}\mathbin{\hat=}_{\hat{A}}\hat{u}$, and we have the constructor $\hat{\mathsf{refl}}_{\hat{t}}$ and the eliminator $\hat{\J}$ with definitional $\beta$-rule. The notation is $\hat{\J}_{\hat{A}\,\hat{t}\,\hat{P}}\,\hat{pr}\,_{\hat{u}}\,\hat{eq}$ for $\hat{t}\in \hat{A}$, $\hat{P}\in (\hat{x}\in \hat{A})\mathbin{\hat\ra} \hat{t}\mathbin{\hat=}_\hat{A}\hat{x}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}}$, $\hat{pr}\in \hat{P}\,\hat{t}\,\hat{\mathsf{refl}}$ and $\hat{eq} \in \hat{t}\mathbin{\hat=}_\hat{A}\hat{u}$. Sometimes we omit parameters in subscripts. We will use the following functions defined using $\hat{\J}$ in the standard way. We write $\hat{\mathsf{tr}}_{\hat{P}}\,\hat{e}\,\hat{t}\in \hat{P}\,\hat{v}$ for transport of $\hat{t} \in \hat{P}\,\hat{u}$ along $\hat{e} \in \hat{u}\mathbin{\hat=} \hat{v}$ and $\hat{\mathsf{coe}} \newcommand{\vz}{\mathsf{vz}\,e\,t : B}$ for coercion of $\hat{t:A}$ along $\hat{e : A =_\hat{\mathsf{Type}} B}$. We write $\hat{\mathsf{ap}}\,\hat{f}\,\hat{e}\in\hat{f}\,\hat{u}\mathbin{\hat=}\hat{f}\,\hat{v}$ where $\hat{f}:\hat{A}\mathbin{\hat\ra}\hat{B}$ and $\hat{e}\in\hat{u}\mathbin{\hat=}\hat{v}$, also $\hat{\hat{\mathsf{apd}}\,\hat{f}\,\hat{e} \in \hat{\mathsf{tr}}_{\hat{P}}\,\hat{e}\,(\hat{f}\,\hat{u}) \mathbin{\hat=} \hat{f}\,\hat{v}}$ where $\hat{\hat{f}\in(\hat{x}\in \hat{A})\mathbin{\hat\ra} \hat{B}}$ and $\hat{e} \in \hat{u}\mathbin{\hat=} \hat{v}$. Borrowing notation from the homotopy type theory book \cite{HoTTbook}, we write $\hat{p \sqcdot q}$ for transitivity and $\hat{p^{-1}}$ for symmetry. We also make use of the groupoid law $\hat{\mathsf{inv}\,p : p^{-1}\sqcdot p = \mathsf{refl}}$. \section{General Definitions for Induction and Recursion} \label{sec:general} Armed with a definition for HIIT signatures, we would like to have of notions of \emph{induction} and \emph{recursion} for each signature. However, instead of trying to directly extract them from signatures, it is more helpful to have more fundamental (categorical) semantic concepts: \emph{algebras}, \emph{homomorphisms}, \emph{displayed algebras} and \emph{sections of displayed algebras}. Then, we can express induction and recursion using these. Let us first consider natural numbers, and see how the usual definition of induction arises. For $\mathsf{Nat}$, algebras are simply a triple consisting of a type, a value and an endofunction. Below, we leave universe indices explicit, and we use the notation of the external type theory described in Section \ref{sec:external}. \begin{alignat*}{5} & \mathsf{Alg} : \mathsf{Type} \\ & \mathsf{Alg} \equiv (N : \mathsf{Type}) \times N \times (N \rightarrow N) \end{alignat*} \noindent Displayed $\mathsf{Nat}$-algebras (sometimes called fibered algebras, as in \cite{sojakova}) are likewise triples, but each component depends on the corresponding components of a $\mathsf{Nat}$-algebra. We borrow the term ``displayed'' from Ahrens and Lumsdaine \cite{displayedCategories}, as our displayed algebras generalize their displayed categories. \begin{alignat*}{5} & \mathsf{DisplayedAlg} : \mathsf{Alg} \rightarrow \mathsf{Type} && \\ & \mathsf{DisplayedAlg}\,(N,\,z,\,s) \equiv && \\ & \hspace{3em} (N^D : N \rightarrow \mathsf{Type})\times (z^D : N^D\,z)\times ((n : N)\rightarrow N^D\,n\rightarrow N^D\,(s\,n)) \end{alignat*} \noindent Homomorphisms, as usual in mathematics, are structure-preserving functions: \begin{alignat*}{5} & \mathsf{Morphism} : \mathsf{Alg} \rightarrow \mathsf{Alg} \rightarrow \mathsf{Type} && \\ & \mathsf{Morphism}\,(N_0,\,z_0,\,s_0)\,(N_1,\,z_1,\,s_1) \equiv && \\ & \hspace{3em} (N^M : N_0 \rightarrow N_1)\times (z^M : N^M\,z_0 = z_1)\times ((n : N_0)\rightarrow N^M\,(s_0\,n) = s_1\,(N^M\,n)) \end{alignat*} \noindent Sections of displayed algebras can be viewed as a dependently typed analogue of homomorphisms: \begin{alignat*}{5} & \mathsf{Section} : (\alpha : \mathsf{Alg}) \rightarrow \mathsf{DisplayedAlg}\,\,\alpha \rightarrow \mathsf{Type} && \\ & \mathsf{Section}\,(N,\,z,\,s)\,(N^D,\,z^D,\,s^D) \equiv && \\ & \hspace{3em} (N^S : (n : N) \rightarrow N^D\,n)\times (z^S : N^S\,z = z^D)\times ((n : N)\rightarrow N^S\,(s\,n) = s^D\,n\,(N^S\,n)) \end{alignat*} \noindent Now, we can reformulate induction for $\mathsf{Nat}$. First, we assume that there exists a distinguished $\mathsf{Nat}$-algebra, named $\mathsf{Nat}^*$. The induction principle for this algebra has the following type: \[ \mathsf{Induction} : (M : \mathsf{\mathsf{DisplayedAlg}}\,\,\mathsf{Nat}^*) \rightarrow \mathsf{Section}\, M \] \noindent Unfolding the definitions, it is apparent that this is the same notion of $\mathsf{Nat}$-induction as we gave before. The initial algebra consists of type and value constructors, the induction motives and methods are bundled into a displayed algebra, and as result we get a section, containing an eliminator function together with its $\beta$-rules. Additionally, we can define recursion using homomorphisms: \[ \mathsf{Recursion} : (\alpha : \mathsf{Alg}) \rightarrow \mathsf{Morphism}\,\mathsf{Nat}^* \,\alpha \] This corresponds to \emph{weak initiality} in the sense of category theory: for each algebra, there is a morphism from the weakly initial algebra to it. Strong initiality in the setting of higher inductive types is called \emph{homotopy initiality} \cite{sojakova}, and it is defined as follows for $\mathsf{Nat}$: \begin{alignat*}{5} \mathsf{Initiality} : (\alpha : \mathsf{Alg}) \rightarrow \mathsf{isContr}\,(\mathsf{Morphism}\,\mathsf{Nat}^*\,\alpha) \end{alignat*} \noindent where $\mathsf{isContr}\,A \equiv (a : A)\times((a' : A)\rightarrow a = a')$. Hence, there is a unique morphism from the initial algebra, but in the setting of homotopy type theory, unique inhabitation can be viewed instead as contractibility. Observe that the definitions for $\mathsf{Induction}$, $\mathsf{Recursion}$ and $\mathsf{Initiality}$ need not refer to natural numbers, and can be used similarly in cases of other structures. Thus, the task in the following is to derive algebras, homomorphisms, displayed algebras and sections from HIIT signatures, in a way which generalizes beyond the current $\mathsf{Nat}$ example to indexed types, induction-induction and higher constructors. But even in the general case, displayed algebras yield induction motives and methods, and homomorphisms and sections yield a function for each type constructor and a $\beta$-rule for each point or path constructor. We compute algebras and the other notions by induction on the syntax of the theory of signatures. However, first we need to clarify the formal foundations of these computations. \section{The Coherence Problem and Syntactic Translations} \label{sec:coherence} The next task would be to define a computation which takes as input a $\hat{\Gamma}\vdash\Delta$ signature, and returns the corresponding type of algebras in some type theory. This would behave as a ``standard'' model of signatures, which simply maps each construction in the syntax to its counterpart: types to types, universe to universe, functions to functions, and so on. However, it is important to interpret signatures into a theory without uniqueness of identity proofs (UIP), because we are considering \emph{higher} inductive types, and hence must remain compatible with higher-dimensional interpretations. In particular, we need to interpret type constructors in signatures into type universes which are not truncated to any homotopy level. In this setting, even the mundane $\alpha : (N : \mathsf{Type}) \times N \times (N \rightarrow N)$ natural number algebras may have arbitrary higher-dimensional structure. On first look, we might think that the simplest way to formalize the algebra interpretation is the following: \begin{enumerate} \item Assume as metatheory a type theory without UIP. \item In this setting, define a formal syntax of the theory of signatures. \item Give a standard interpretation of signatures into the UIP-free metatheory. \end{enumerate} It may come as a surprise that realizing the above steps is an \emph{open problem}, for any syntax of a dependent type theory. We call the problem of interpreting syntaxes of type theories into a UIP-free metatheory a \emph{coherence problem}. This issue appears to arise with all known ways of defining syntaxes for dependent type theories. Shulman previously discussed this problem in \cite{hottshouldeat}. It is also related to the problem of constructing semisimplicial types in homotopy type theory; as explained in \cite{hottshouldeat} solving our coherence problem enables a construction of semisimplicial types. In the following, we first consider the coherence problem in two settings: with intrinsically typed higher inductive-inductive syntaxes, then with conventional syntaxes involving preterms and inductively defined typing and conversion relations. Then, we present syntactic translations as a partial solution to the coherence problem. \subsection{Interpreting Intrinsic Syntax} Following Altenkirch and Kaposi \cite{ttintt}, one might define the syntax of a type theory as a category with families (CwF) \cite{dybjer1995internal} extended with additional type formers, which supports an induction principle. The CwF part provides a calculus and equational theory for explicit substitutions, upon which one can build additional type structure. We present an excerpt below: \begin{alignat*}{5} & \mathsf{Con} && : \mathsf{Set} && \text{contexts} \\ & \mathsf{Ty} && : \mathsf{Con}\rightarrow\mathsf{Set} && \text{types} \\ & \mathsf{Sub} && : \mathsf{Con}\rightarrow\mathsf{Con}\rightarrow\mathsf{Set} && \text{substitutions} \\ & \mathsf{Tm} && : (\Gamma:\mathsf{Con})\rightarrow\mathsf{Ty}\,\Gamma\rightarrow\mathsf{Set} \hspace{2em} && \text{terms} \\ & \cdot && : \mathsf{Con} && \text{empty context} \\ & \mathord{\hspace{1pt}\text{--}\hspace{1pt}}\rhd\mathord{\hspace{1pt}\text{--}\hspace{1pt}} && : (\Gamma:\mathsf{Con})\rightarrow\mathsf{Ty}\,\Gamma\rightarrow\mathsf{Con} && \text{context extension} \\ & \mathord{\hspace{1pt}\text{--}\hspace{1pt}}[\mathord{\hspace{1pt}\text{--}\hspace{1pt}}] && : \mathsf{Ty}\,\Delta\rightarrow\mathsf{Sub}\,\Gamma\,\Delta\rightarrow\mathsf{Ty}\,\Gamma && \text{type substitution} \\ & \mathsf{id} && : \mathsf{Sub}\,\Gamma\,\Gamma && \text{identity substitution} \\ & [\mathsf{id}] && : A[\mathsf{id}] = A && \text{action of}\,\,\mathsf{id}\,\,\text{on types}\\ & \mathord{\hspace{1pt}\text{--}\hspace{1pt}}\circ\mathord{\hspace{1pt}\text{--}\hspace{1pt}} && : \mathsf{Sub}\,\Theta\,\Delta\rightarrow\mathsf{Sub}\,\Gamma\,\Theta\rightarrow\mathsf{Sub}\,\Gamma\,\Delta && \text{substitution composition} \\ & \mathord{\hspace{1pt}\text{--}\hspace{1pt}}[\mathord{\hspace{1pt}\text{--}\hspace{1pt}}] && : \mathsf{Tm}\,\Delta\,A\rightarrow(\sigma:\mathsf{Sub}\,\Gamma\,\Delta)\rightarrow\mathsf{Tm}\,\Gamma\,(A[\sigma])\hspace{1em} && \text{term substitution} \\ & ... && \\ & \mathsf{U} && : \mathsf{Ty}\,\Gamma && \text{universe} \\ & {\mathsf{U}[]} && : \mathsf{U}[\sigma] = \mathsf{U} && \text{substituting the universe} \\ & \mathsf{El} && : \mathsf{Tm}\,\Gamma\,\mathsf{U} \rightarrow \mathsf{Ty}\,\Gamma && \text{decoding} \\ & ... && \\ & \Pi && : (a:\mathsf{Tm}\,\Gamma\,\mathsf{U})\rightarrow\mathsf{Ty}\,(\Gamma\rhd \E\,a)\rightarrow\mathsf{Ty}\,\Gamma && \text{functions} \\ & ... && \end{alignat*} This notion of syntax is much more compact and often more convenient to use than extrinsic syntaxes. It is in essence ``merely'' a formalization of CwFs, which are often used in categorical semantics of type theory. However, its rigorous metatheory is subject to ongoing research (including the current paper). In a set-truncated setting, the current authors and Altenkirch have previously developed semantics and constructed initial algebras \cite{kaposi2019constructing}, but here we need to work in a non-truncated theory. \subsubsection{Set-truncation} Additionally, we prefer to set-truncate the syntax by adding the following constructors, for reasons shortly explained: \begin{alignat*}{5} & \mathsf{setTy} && : (A\,B : \mathsf{Ty}\,\Gamma)(p\,q : A = B)\rightarrow p = q \\ & \mathsf{setTm} && : (t\,u : \mathsf{Tm}\,\Gamma\,A)(p\,q : t = u)\rightarrow p = q \\ & \mathsf{setSub} && : (\sigma\,\delta : \mathsf{Sub}\,\Gamma\,\Delta)(p\,q : \sigma = \delta)\rightarrow p = q \end{alignat*} \noindent We can omit the rule for contexts, as it is derivable from the above ones. Set truncation forces definitional equality of the syntax (as defined by equality constructors in the HIIT signature) to be proof irrelevant. If we omit set-truncation, then the defined HIIT becomes very different from what we expect the syntax to be. We can easily show that the non-truncated syntax does not form sets. For example, $\mathsf{U}[]$ and $[\mathsf{id}]$ are two proofs of $\mathsf{U}[\mathsf{id}] = \mathsf{U}$, and they are not forced to be equal. Assuming univalence, we can give a model where types are interpreted as closed metatheoretic types, $\mathsf{U}$ is interpreted as metatheoretic $\mathsf{Bool}$, $[\mathsf{id}]$ is interpreted as $\mathsf{refl} : A = A$ for some metatheoretic $A$ type, and $\mathsf{U}[]$ is interpreted as the $\mathsf{Bool}$ negation equivalence, thereby formally distinguishing $\mathsf{U}[]$ and $[\mathsf{id}]$. Hence, by Hedberg's theorem \cite{hedberg}, the syntax does not have decidable equality, and hence it does not support decidable type checking. This also implies that the non-truncated intrinsic syntax is not constructible from set quotients of extrinsic terms (since those always form sets). The non-truncated syntax just does not seem to be a sensible notion. This situation is similar to how categories in homotopy type theory need to have set-truncated morphisms \cite{ahrens2015univalent}. Unfortunately, set-truncation makes it impossible to directly interpret syntactic types as elements of a UIP-free metatheoretic $\mathsf{Type}_i$ universe. This is because we must provide interpretations for all set-truncation constructors, which amounts to shoving that the interpretations of $\mathsf{Ty}$, $\mathsf{Tm}$, and $\mathsf{Sub}$ are all sets. However, we cannot show $\mathsf{Type}_i$ to be a set without UIP. A possible solution would be to add \emph{all higher coherences} instead of set-truncating, which would yield something like an ($\omega$, 1)-CwF, but this is also an open research problem \cite{altenkirch2018towards, finster2019structure}. \subsection{Interpreting Extrinsic Syntax} An extrinsic syntax for type theory is defined the following way: \begin{enumerate} \item We inductively define a \emph{presyntax}: sets of preterms, pretypes, precontexts, and possibly presubstitutions. These are not assumed to be well-formed, and only serve as raw material for expressions. \item We give mutual inductive definitions for the following relations on presyntax: well-formedness, typing and conversion. \end{enumerate} This is the conventional way of presenting the syntax; see e.g.\ \cite{winterhalter2019eliminating} for a detailed machine-checked formalization in this style. The main advantage compared to the intrinsic syntax is that this only requires conservative inductive definitions in the metatheory, which are also natively supported in current proof assistants, unlike HIITs. The main disadvantage is verbosity, lower level of abstraction, and a difficulty of pinning down a notion of model for the syntax. What about interpreting extrinsic syntax into a UIP-free universe? It is widely accepted that extrinsic syntaxes have standard interpretations in set-truncated metatheories, although carrying this out in formal detail is technically challenging. Streicher's seminal work \cite{streicher2012semantics} laid out a template for doing this: first we construct a family of partial functions from the presyntax to the semantic domain, then we prove afterwards that these functions are total on well-formed input. However, the coherence problem arises still: it is required that definitional equality is proof irrelevant. In a type-theoretic setting, this means that we need to propositionally truncate the conversion relation. This again prevents us from interpreting the syntax into a UIP-free universe. We have to interpret definitional equality in the syntax as propositional equality in the metatheory, but since the former is propositionally truncated, we can only eliminate it into propositions, and metatheoretic equality types are not generally propositions in the absence of UIP. Could we define a conversion relation which is propositional, but not truncated? For example, conversion could be defined in terms of a deterministic conversion checking algorithm. But then a complication is that we do not have a proof that conversion checking is \emph{total} and \emph{stable under substitution}, while still in the process of defining the syntax. Alternatively, we could first define a normalization algorithm for an extrinsic syntax in a UIP-free metatheory, and then try to interpret \emph{only normal forms}. Abel, \"Ohman and Vezzosi demonstrated a UIP-free conversion checking algorithm in type theory \cite{abel2017decidability}, which suggests that UIP-free normalization may be possible as well. But since this normal form interpretation is a major technical challenge, and it has not been carried out yet, we cannot use it to justify constructions in the current paper. \subsection{Syntactic Translations} We can circumvent the coherence problem in the following way: \begin{enumerate} \item Define a \emph{source} and a \emph{target} syntax in any suitable metatheory, where the target theory does not have UIP. The source and target theories do not necessarily need to differ. \item Interpret the source syntax into the target syntax. \end{enumerate} For extrinsic syntaxes, it is generally understood that a syntactic translation has to preserve definitional equalities in the source syntax. For intrinsic syntaxes, preservation of definitional equality is automatically enforced by the equality constructors. See Boulier et al.\ \cite{next700} for a showcase of syntactic translations. Now, we can take the source syntax to be the theory of signatures from Section \ref{sec:tos}, and the target syntax to be the external syntax from Section \ref{sec:external}. Truncation in the source syntax is not an issue here, because the target syntax is likewise truncated. However, using syntactic translations is also a significant restriction. As always, we must map equal inputs to equal outputs, but now the notion of equality for outputs coincides with definitional equality in the syntax of the target theory, which is far more restrictive than propositional equality. Recall the weak $\beta$-rule for $\J$ in the theory of signatures in Section \ref{sec:tos}: if we instead used a strict equality, then the translations in Sections \ref{sec:morphisms} and \ref{sec:sections} would not work, because they map $\J_{a\,t\,\,(x.z.p)}\,pr\,_t\,\mathsf{refl}$ and $pr$ to terms which are equal propositionally, but not definitionally. This restriction also prevents us from defining more translations which cover other parts of the categorical semantics, e.g.\ composition of homomorphisms. We return to this topic in Section \ref{sec:categorical}. In the following three sections we present syntactic translations yielding algebras, homomorphisms, displayed algebras and their sections. The presentation here, like in Section \ref{sec:tos}, is informal and focuses on readability. In particular, we omit interpretations for substitutions and preservation proofs for definitional equalities. \section{Algebras} \label{sec:algebras} We use $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$ to the denote the translation which computes algebras. It is specified as follows, for contexts, types and terms in the theory of signatures. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash}\Delta^\mathsf{A}\in\hat{\mathsf{Type}}_{\hat{1}}}{\hat{\Gamma}\vdash\Delta} \hspace{2em} \infer{\hat{\Gamma}\mathbin{\hat\vdash} A^\mathsf{A} \in \Delta^\mathsf{A}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{1}}}{\hat{\Gamma}\hat;\,\Delta\vdash A} \hspace{2em} \infer{\hat{\Gamma}\mathbin{\hat\vdash} t^\mathsf{A} \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\mathbin{\hat\ra} A^\mathsf{A}\,\hat{\gamma}}{\hat{\Gamma}\hat;\,\Delta\vdash t : A} \] The $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$ translation is essentially the standard interpretation of signatures, where every construction in the source syntax is interpreted with a corresponding {\color{BrickRed}brick red} construction in the target syntax. The only notable change is in the interpretation of contexts: a source context is interpreted as an iterated $\Sigma$-type. \begin{alignat*}{5} & (1)\hspace{1em} && \boldsymbol{\cdot}^\mathsf{A} && :\equiv \hat{\top} \\ & && (\Delta,x:A)^\mathsf{A} && :\equiv \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\mathbin{\hat\times} A^\mathsf{A}\,\hat{\gamma} \\ & && x^\mathsf{A}\,\hat{\gamma} && :\equiv x^{\text{th}}\text{ component in } \hat{\gamma} \\ & (2) && \mathsf{U}^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\mathsf{Type}}_{\hat{0}} \\ & && (\underline{a})^\mathsf{A}\,\hat{\gamma} && :\equiv a^\mathsf{A}\,\hat{\gamma} \\ & (3) && ((x:a)\rightarrow B)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}\hat{x}\in a^\mathsf{A}\,\hat{\gamma}\hat{)}\mathbin{\hat\ra} B^\mathsf{A}\,\hat{(}\hat{\gamma}\hat,\,\hat{x}\hat{)} \\ & && (t\,u)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}u^\mathsf{A}\,\hat{\gamma}\hat{)} \\ & (4) && (t=_a u)^\mathsf{A}\,\hat{\gamma} && :\equiv t^\mathsf{A}\,\hat{\gamma} \mathbin{\hat=} u^\mathsf{A}\,\hat{\gamma} \\ & && (\mathsf{refl}_t)^\S\,\hat{\gamma} && :\equiv \hat{\mathsf{refl}} \\ & && (\J_{a\,t\,(x.z.p)}\,pr\,_u\,eq)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\J}_{\hat{(}a^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}\hat{\lambda} \hat{x}\,\hat{z}.p^\mathsf{A}\,\hat{(}\hat{\gamma}\hat,\,\hat{x}\hat,\,\hat{z}\hat{)}\hat{)}}\,\hat{(}pr^\mathsf{A}\,\hat{\gamma}\hat{)}\,_{\hat{(}u^\mathsf{A}\,\hat{\gamma}\hat{)}}\,\hat{(}eq^\mathsf{A}\,\hat{\gamma}\hat{)} \\ & && (\J\beta_{a\,t\,(x.z.p)}\,pr)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\mathsf{refl}} \\ & (5) && (a=_\mathsf{U} b)^\mathsf{A}\,\hat{\gamma} && :\equiv a^\mathsf{A}\,\hat{\gamma} \mathbin{\hat=} b^\mathsf{A}\,\hat{\gamma} \\ & && (\mathsf{refl}_a)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\mathsf{refl}} \\ & && (\J_{a\,(x.z.p)}\,pr\,_b\,eq)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\J}_{\hat{\mathsf{Type}}_{\hat{0}}\,\hat{(}a^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}\hat{\lambda} \hat{x}\,\hat{z}.p^\mathsf{A}\,\hat{(}\hat{\gamma}\hat,\,\hat{x}\hat,\,\hat{z}\hat{)}\hat{)}}\,\hat{(}pr^\mathsf{A}\,\hat{\gamma}\hat{)}\,_{\hat{(}b^\mathsf{A}\,\hat{\gamma}\hat{)}}\,\hat{(}eq^\mathsf{A}\,\hat{\gamma}\hat{)} \\ & && (\J\beta_{a\,(x.z.p)}\,pr)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{\mathsf{refl}} \\ & (6) && ((\hat{x}\in \hat{A})\rightarrow B)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} B^\mathsf{A}\,\hat{\gamma} \\ & && (t\,\hat{u})^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{u} \\ & (7) && ((\hat{x}\in \hat{A})\rightarrow b)^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} b^\mathsf{A}\,\hat{\gamma} \\ & && (t\,\hat{u})^\mathsf{A}\,\hat{\gamma} && :\equiv \hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{u} \end{alignat*} For example, $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$ acts as follows on signature for circles: \begin{alignat*}{5} & && (\boldsymbol{\cdot},\,S^1 : \mathsf{U},\,b:\underline{S^1},\,loop: \underline{b=b})^\mathsf{A} \equiv \hat{\hat{\top}\mathbin{\hat\times}(\hat{S^1}\in\hat{\mathsf{Type}}_{\hat{0}})\mathbin{\hat\times}(\hat{b}\in \hat{S^1})\mathbin{\hat\times}(\hat{loop\in b=b})} \end{alignat*} Note that the resulting $\Sigma$ type is left-nested, and we use the reassociated notation for readability. The result could be written without syntactic sugar the following way: \begin{alignat*}{5} & \hat{\Big(\hat{x''}\in\big(\hat{x'}\in(\hat{x}\in\hat{\top})\mathbin{\hat\times}\hat{\mathsf{Type}}_{\hat{0}}\big)\mathbin{\hat\times}\hat{\mathsf{proj}_2}\,\hat{x'}\Big)\mathbin{\hat\times}(\hat{\mathsf{proj}_2}\,\hat{x''}\mathbin{\hat=} \hat{\mathsf{proj}_2}\,\hat{x''})} \end{alignat*} We shall keep to the short notation from now on. \section{Displayed Algebras} \label{sec:displayed} The $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ translation computes displayed algebras for a signature, which can be viewed as bundles of induction motives and methods. $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ is an unary logical predicate translation over the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$ translation. It is related to the logical predicate translation of Bernardy et al.\ \cite{bernardy12parametricity}, but our implementation differs by interpreting contexts as $\Sigma$-types instead of extended contexts. We fix a universe level $\hat{i}$ for the translation. For each context $\Delta$, $\Delta^\mathsf{D}$ is a predicate over the standard interpretation $\Delta^\mathsf{A}$. For a type $\Delta\vdash A$, $A^\mathsf{D}$ is a predicate over $A^\mathsf{A}$, which also depends on $\hat{\gamma}\in\Delta^\mathsf{A}$ and a witness of $\Delta^\mathsf{D}\,\hat{\gamma}$. All of these may refer to a target theory context $\hat{\Gamma}$. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash}\Delta^\mathsf{D} \in \Delta^\mathsf{A}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i+1}}}{\hat{\Gamma}\vdash\Delta} \hspace{2em} \infer{ \hat{\Gamma}\mathbin{\hat\vdash} A^\mathsf{D} \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\mathbin{\hat\ra} \Delta^\mathsf{D}\,\hat{\gamma}\mathbin{\hat\ra} A^\mathsf{A}\,\hat{\gamma}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i+1}}} {\hat{\Gamma}\hat;\,\Delta\vdash A} \] For a term $t$, $t^\mathsf{D}$ witnesses that the predicate corresponding to its type holds for $t^\mathsf{A}$; this can be viewed as a \emph{fundamental theorem} for the predicate interpretation. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash} t^\mathsf{D} \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\hat{(}\hat{\gamma^D}\in\Delta^\mathsf{D}\,\hat{\gamma}\hat{)}\mathbin{\hat\ra} A^\mathsf{D}\,\hat{\gamma}\,\hat{\gamma^D}\,\hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}}{\hat{\Gamma}\hat;\,\Delta\vdash t : A} \] The implementation of $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ is given below. We leave $\hat{\gamma}$-s mostly implicit, marking only $\hat{\gamma^D}$ witnesses. \begingroup \allowdisplaybreaks \begin{alignat*}{5} & (1)\hspace{1em} && \boldsymbol{\cdot}^\mathsf{D}\,\hat{\gamma} && :\equiv \hat{\top} \\ & && (\Delta,\,x:A)^\mathsf{D}\,(\hat{\gamma}\hat,\,\,\hat{t}) && :\equiv \hat{(}\hat{\gamma^D}\in\Delta^D\,\hat{\gamma}\hat{)}\mathbin{\hat\times} A^D\,\hat{\gamma^D}\,\hat{t} \\ & && x^\mathsf{D}\,\hat{\gamma^D} && :\equiv x^{\text{th}}\text{ component in } \hat{\gamma^D} \\ & (2) && \mathsf{U}^\mathsf{D}\,\hat{\gamma^D}\,\hat{A} && :\equiv \hat{A} \mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}} \\ & && (\underline{a})^\mathsf{D}\,\hat{\gamma^D}\,\hat{t} && :\equiv a^\mathsf{D}\,\hat{\gamma^D}\,\hat{t} \\ & (3) && ((x:a)\rightarrow B)^\mathsf{D}\,\hat{\gamma^D}\,\hat{f} && :\equiv \hat{(}\hat{x}\in a^\mathsf{A}\,\hat{\gamma}\hat{)}\hat{(}\hat{x^D}\in a^\mathsf{D}\,\hat{\gamma^D}\,\hat{x}\hat{)} \mathbin{\hat\ra} B^\mathsf{D}\,\hat{(}\hat{\gamma}\hat,\,\hat{x}\hat{)}\,\hat{(}\hat{\gamma^D}\hat,\,\hat{x^D}\hat{)}\,\hat{(}\hat{f}\,\hat{x}\hat{)} \\ & && (t\,u)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{(}t^\mathsf{D}\,\hat{\gamma^D}\hat{)}\,\hat{(} u^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}u^\mathsf{D}\,\hat{\gamma^D}\hat{)} \\ & (4) && (t=_a u)^\mathsf{D}\,\hat{\gamma^D}\,\hat{e} && :\equiv \hat{\mathsf{tr}}_{\hat{(}a^\mathsf{D}\,\hat{\gamma^D}\hat{)}}\,\hat{e}\,\hat{(}t^\mathsf{D}\,\hat{\gamma^D}\hat{)} \mathbin{\hat=} u^\mathsf{D}\hat{\gamma^D} \\ & && (\mathsf{refl}_t)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\mathsf{refl}}_{\hat{(}t^\mathsf{D}\,\hat{\gamma^D}\hat{)}} \\ & && (\J_{a\,t\,(x.z.p)}\,pr\,_u\,eq)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\J}\,\hat{\big(}\hat{\J}\,\hat{(}pr^\mathsf{D}\,\hat{\gamma^D}\hat{)}\,\hat{(}eq^\mathsf{A}\,\hat{\gamma}\hat{)}\hat{\big)}\,\hat{(}eq^\mathsf{D}\,\hat{\gamma^D}\hat{)} \\ & && (\J\beta_{a\,t\,(x.z.p)}\,pr)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\mathsf{refl}} \\ & (5) && (a=_\mathsf{U} b)^\mathsf{D}\,\hat{\gamma^D}\,\hat{e} && :\equiv \hat{\mathsf{tr}}_{\hat{(\lambda A.A\rightarrow\hat{\mathsf{Type}}_0)}}\,\hat{e}\,\hat{(}a^\mathsf{D}\,\hat{\gamma^D}\hat{)} \mathbin{\hat=} b^\mathsf{D}\hat{\gamma^D} \\ & && (\mathsf{refl}_a)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\mathsf{refl}}_{\hat{(}a^\mathsf{D}\,\hat{\gamma^D}\hat{)}} \\ & && (\J_{a\,(x.z.p)}\,pr\,_b\,eq)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\J}\,\hat{\big(}\hat{\J}\,\hat{(}pr^\mathsf{D}\,\hat{\gamma^D}\hat{)}\,\hat{(}eq^\mathsf{A}\,\hat{\gamma}\hat{)}\hat{\big)}\,\hat{(}eq^\mathsf{D}\,\hat{\gamma^D}\hat{)} \\ & && (\J\beta_{a\,t\,(x.z.p)}\,pr)^\mathsf{D}\,\hat{\gamma^D} && :\equiv \hat{\mathsf{refl}} \\ & (6) && ((\hat{x}\in \hat{A})\rightarrow B)^\mathsf{D}\,\hat{\gamma^D}\,\hat{f} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} B^\mathsf{D}\,\hat{\gamma^D}\,\hat{(}\hat{f}\,\hat{x}\hat{)} \\ & && (t\,\hat{u})^\mathsf{D}\,\hat{\gamma^D} && :\equiv t^\mathsf{D}\,\hat{\gamma^D}\,\hat{u} \\ & (7) && ((\hat{x}\in \hat{A})\rightarrow b)^\mathsf{D}\,\hat{\gamma^D}\,\hat{f} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} b^D\,\hat{\gamma^D}\,\hat{(}\hat{f}\,\hat{x}\hat{)} \\ & && (t\,\hat{u})^\mathsf{D}\,\hat{\gamma^D} && :\equiv t^\mathsf{D}\,\hat{\gamma^D}\,\hat{u} \end{alignat*} \endgroup The predicate for a context is given by iterating $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ for its constituent types. For a variable, the corresponding witness is looked up from $\hat{\gamma^D}$. The translation of the universe, given an element of $\hat{A}\in\mathsf{U}^\mathsf{A}\,\hat{\gamma}$ (with $\mathsf{U}^\mathsf{A}\,\hat{\gamma}\equiv\hat{\mathsf{Type}}_{\hat{0}}$) returns the predicate space over $\hat{A}$. For $\underline{a}$ types, we just return the translation of $a$. The predicate for a function type for inductive parameters expresses preservation of predicates. Witnesses of application are given by recursive application of $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$. The definitions for the other (non-inductive) function spaces are similar, except there is no predicate for the domain types, and thus no witnesses are required. The translation for paths between $a : \mathsf{U}$ elements $t=_a u$, for each $\hat{e}\in(t=_a u)^\mathsf{A}\,\hat{\gamma}$, i.e.\ $\hat{e}\in t^\mathsf{A}\,\hat{\gamma}\mathbin{\hat=} u^\mathsf{A}\,\hat{\gamma}$, witnesses that $t^\mathsf{D}$ and $u^\mathsf{D}$ are equal. As these have different types, we have to transport over the original equality $\hat{e}$. Hence, induction methods for path constructors will be \emph{paths over paths} in the sense of homotopy type theory. $\mathsf{refl}$ is interpreted with just reflexivity in the target syntax. The interpretation of $\J$ is given by a double $\hat{\J}$ application, borrowing the definition from Lasson \cite{lasson}. Here, we use a shortened $\hat{\J}$ notation; see the formalization (Section \ref{sec:formalization}) for details. For $a =_\mathsf{U} b$ paths, the interpretation is essentially the same as with the other path type. Again, let us consider the circle example: \begin{alignat*}{5} & && (\boldsymbol{\cdot},\,S^1 : \mathsf{U},\,b:\underline{S^1},\,loop: \underline{b=b})^\mathsf{D}\,\,\hat{(}\hat{\tt,\,S^1,\,b,\,loop}\hat{)}\\ & \equiv \,\, && \hat{\top}\mathbin{\hat\times}\hat{(}\hat{S^{1D}}\in \hat{S^1}\mathbin{\hat\ra}\hat{\mathsf{Type}}_{\hat{i}}\hat{)}\mathbin{\hat\times}\hat{(}\hat{b^D}\in \hat{S^{1D}\,b}\hat{)}\mathbin{\hat\times}\hat{(}\hat{loop^D\in \hat{\mathsf{tr}}_{\,S^{1D}}\,loop\,b^D = b^D}\hat{)} \end{alignat*} The inputs of $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ here are the signature for the circle (the context in black) and an $\hat{S^{1}}$-algebra consisting of three non-$\top$ components. It returns a family over the type $\hat{S^1}$, an element of this family $\hat{b^D}$ at index $\hat{b}$, and a path between $\hat{b^D}$ and $\hat{b^D}$ which lies over $\hat{loop}$. This is the same as the usual induction motives and methods for the circle, e.g.\ as described in \cite{HoTTbook}. \section{From Logical Relations to Homomorphisms} \label{sec:morphisms} In this section, we specify the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$ translation, which computes homomorphisms of algebras. We do so by first considering a logical relation interpretation, and then refining it towards homomorphisms. For dependent type theories, logical relation models are well-known (see e.g.\ \cite{atkey}), so they are certainly applicable to our restricted syntax as well. Below, we list induction motives for $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$; these remain the same as we move from logical relations to homomorphisms. The universe level $\hat{i}$ was chosen previously for the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$ translation. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash}\Delta^\mathsf{M} \in \Delta^\mathsf{A}\mathbin{\hat\ra}\Delta^\mathsf{A}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}}} {\hat{\Gamma}\vdash\Delta} \hspace{1em} \infer{ \hat{\Gamma}\mathbin{\hat\vdash} A^\mathsf{M} \in \hat{(}\hat{\gamma_0\,\gamma_1}\in\Delta^\mathsf{A}\hat{)}\mathbin{\hat\ra} \Delta^\mathsf{M}\,\hat{\gamma_0\,\gamma_1}\mathbin{\hat\ra} A^\mathsf{A}\,\hat{\gamma_0}\mathbin{\hat\ra} A^\mathsf{A}\,\hat{\gamma_1}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}}} {\hat{\Gamma}\hat;\,\Delta\vdash A} \] \[ \infer{ \hat{\Gamma}\mathbin{\hat\vdash} t^\mathsf{M}\, \hat{ : (\gamma_0\,\gamma_1 : \blc{\Delta^\mathsf{A}})(\gamma^M : \blc{\Delta^\mathsf{M}}\,\gamma_0\,\gamma_1)\rightarrow \blc{A^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,(\blc{t^\mathsf{A}}\,\gamma_0)\,(\blc{t^\mathsf{A}}\,\gamma_1) }} {\hat{\Gamma}\hat;\,\Delta\vdash t : A} \] Contexts are mapped to (proof-relevant) relations and types to families of relations depending on interpreted contexts. For terms, we again get a fundamental theorem: every term has related standard interpretations in related semantic contexts. We present below the logical relation interpretation only for contexts, variables, the universe and the inductive function space. \begingroup \allowdisplaybreaks \begin{alignat*}{5} & (1)\hspace{1em} && \boldsymbol{\cdot}^\mathsf{M}\hat{\gamma_0\,\gamma_1} && :\equiv \hat{\top} \\ & && (\Delta,x:A)^\mathsf{M}\,\hat{(\gamma_0,\alpha_0)\,(\gamma_1,\alpha_1)} && :\equiv \hat{(\gamma^M : \blc{\Delta^\mathsf{M}}\,\gamma_0\,\gamma_1)\times \blc{A^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,\alpha_0\,\alpha_1} \\ & && x^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M} && :\equiv x^{\text{th}}\text{ component in } \hat{\gamma^M} \\ & (2) && \mathsf{U}^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,A_0\,A_1} && :\equiv \hat{A_0\rightarrow A_1 \rightarrow \hat{\mathsf{Type}}_0} \\ & && (\underline{a})^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,t_0\,\,t_1} && :\equiv \hat{ \blc{a^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,t_0\,\,t_1}\\ & (3) && ((x:a)\rightarrow B)^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,f_0\,f_1} && :\equiv \hat{(x_0 : \blc{(\underline{a})^\mathsf{A}}\,\gamma_0)(x_1 : \blc{(\underline{a})^\mathsf{A}}\,\gamma_1)(x^M : \blc{(\underline{a})^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M)}\\ & && && \hat{\hspace{1.7em}\rightarrow \blc{B^\mathsf{M}}\,(\gamma_0,\,x_0)\,(\gamma_1,\,x_1)\,(\gamma^M,\,x^M)\,(f_0\,x_0)\,(f_1\,x_1)} \end{alignat*} \endgroup We interpret the universe as relation space, and function types as relations expressing pointwise relatedness of functions. This interpretation would work the same way for unrestricted (non-strictly positive) function types as well. For an example, this yields the following definition of logical relations between natural number algebras: \begin{alignat*}{5} & && (\boldsymbol{\cdot},Nat : \mathsf{U},zero:\underline{Nat},suc:Nat\rightarrow\underline{Nat})^\mathsf{M} \,\hat{(\tt,\,N_0,\,z_0,\,s_0)\,(\tt,\,N_1,\,z_1,\,s_1)} \\ & \equiv\,\, && \hat{\top\times(N^M : N_0\rightarrow N_1 \rightarrow \hat{\mathsf{Type}}_0)}\\ & && \hat{\hspace{1em}\times\,\,(z^M : N^M\,z_0\,z_1)}\\ & && \hat{\hspace{1em}\times\,\,(s^M : (x_0 : N_0)(x_1 : N_1)(x^M : N^M\,x_0\,x_1)\rightarrow N^M\,(s_0\,x_0)\,(s_1\,x_1))} \end{alignat*} However, we would like to have underlying functions instead of relations in homomorphisms. We take hint from the fact that for classical (simply-typed and single-sorted) algebraic theories, a logical relation is equivalent to a homomorphism if and only if the underlying relation is the graph of a function \cite[pg. 5]{udayReynolds}. Thus we make the following change: \begin{alignat*}{5} & \mathsf{U}^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,A_0\,A_1} && :\equiv \hat{A_0\rightarrow A_1} \end{alignat*} This requires us to change $(\underline{a})^\mathsf{M}$ as well, since we need to produce a type as result, but $a^M$ now yields a function. We can view the result of $a^\mathsf{M}$ as a functional relation, and use its graph to relate $t_0$ and $t_1$: \begin{alignat*}{5} & (\underline{a})^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,t_0\,\,t_1} && :\equiv \hat{( \blc{a^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,t_0 = t_1)} \end{alignat*} At this point we have merely restricted relations to functions, and left the rest of the interpretation unchanged. However, this is not strictly the desired notion of homomorphism. Consider now again the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$ interpretation for natural numbers: \begin{alignat*}{5} & && (\boldsymbol{\cdot},Nat : \mathsf{U},zero:\underline{Nat},suc:Nat\rightarrow\underline{Nat})^\mathsf{M} \,\hat{(\tt,\,N_0,\,z_0,\,s_0)\,(\tt,\,N_1,\,z_1,\,s_1)} \\ & \equiv\,\, && \hat{\top\times(N^M : N_0\rightarrow N_1)}\\ & && \hat{\hspace{1em}\times\,\,(z^M : N^M\,z_0 = z_1)}\\ & && \hat{\hspace{1em}\times\,\,(s^M : (x_0 : N_0)(x_1 : N_1)(x^M : N^M\,x_0 = x_1)\rightarrow N^M\,(s_0\,x_0)=s_1\,x_1)} \end{alignat*} In $\hat{s^M}$, there is a superfluous $\hat{x^M}$ equality proof. Fortunately, in the translation of the inductive function space, we can just singleton contract $\hat{x^M}$ away, yielding an equivalent, but stricter definition: \begin{alignat*}{5} & ((x:a)\rightarrow B)^\mathsf{M}\,\hat{\gamma_0\,\gamma_1\,\gamma^M\,f_0\,f_1} :\equiv \\ & \hat{\hspace{2em}(x_0 : \blc{a^\mathsf{A}}\,\gamma_0)\rightarrow \blc{B^\mathsf{M}}\,(\gamma_0,\,x_0)\,(\gamma_1,\,\blc{a^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,x_0)\,(\gamma^M,\,\mathsf{refl})\, (f_0\,x_0)\,(f_1\,(\blc{a^\mathsf{M}}\,\gamma_0\,\gamma_1\,\gamma^M\,x_0))} \end{alignat*} Now, the $\beta$-rule for successors is as expected: \begin{alignat*}{5} & \hat{s^M : (x_0 : N_0)\rightarrow N^M\,(s_0\,x_0)=s_1\,(N^M\,x_0)} \end{alignat*} Note that this singleton contraction is not possible for a general non-strictly positive function space. We rely on the domain being small: $(\underline{a})^\mathsf{M}$ yields an equation, but for general $\hat{\Gamma;\,}\Delta\vdash A$ types, $A^\mathsf{M}$ only yields an unknown relation. $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$ is similar to the translation to displayed algebra sections, which is discussed in the next Section. We will discuss in more detail the interpretations of equalities and the other (external) function types there. A full listing for the homomorphism translation can be found in Appendix \ref{sec:morphismrules}. \section{Displayed Algebra Sections} \label{sec:sections} The operation $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\S$ yields displayed algebra sections. Sections can be viewed as dependent homomorphisms: while homomorphisms are structure-preserving families of functions, sections are structure-preserving families of dependent functions. Contexts are interpreted as dependent relations between algebras and displayed algebras. We again fix a universe level $\hat{i}$. \[ \infer{\hat{\Gamma}\hat;\,\Delta^\S \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\mathbin{\hat\ra} \Delta^\mathsf{D}\,\hat{\gamma}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}}}{\hat{\Gamma}\vdash\Delta} \] Types are interpreted as dependent relations which additionally depend on $\hat{\gamma}$, $\hat{\gamma^D}$, $\hat{\gamma^S}$ interpretations of the context. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash} A^\S \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\hat{(}\hat{\gamma^D}\in\Delta^\mathsf{D}\,\hat{\gamma}\hat{)}\hat{(}\hat{\gamma^S}\in\Delta^\S\,\hat{\gamma}\,\hat{\gamma^D}\hat{)}\hat{(}\hat{x}\in A^\mathsf{A}\,\hat{\gamma}\hat{)}\mathbin{\hat\ra} A^\mathsf{D}\,\hat{\gamma}\,\hat{\gamma^D}\,\hat{x}\mathbin{\hat\ra} \hat{\mathsf{Type}}_{\hat{i}}}{\hat{\Gamma}\hat;\,\Delta\vdash A} \] For a term $t$, $t^\S$ again witnesses a fundamental theorem. \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash} t^\S \in \hat{(}\hat{\gamma}\in\Delta^\mathsf{A}\hat{)}\hat{(}\hat{\gamma^D}\in\Delta^\mathsf{D}\,\hat{\gamma}\hat{)}\hat{(}\hat{\gamma^S}\in\Delta^\S\,\hat{\gamma}\,\hat{\gamma^D}\hat{)}\mathbin{\hat\ra} A^\S\,\hat{\gamma}\,\hat{\gamma^D}\,\hat{\gamma^S}\,\hat{(}t^\mathsf{A}\,\hat{\gamma}\hat{)}\,\hat{(}t^\mathsf{D}\,\hat{\gamma}\,\hat{\gamma^D}\hat{)}}{\hat{\Gamma}\hat;\,\Delta \vdash t : A} \] We present the implementation below. Here, we make $\hat{\gamma}$-s and $\hat{\gamma^D}$-s implicit and mostly notate $\hat{\gamma^S}$ parameters and applications. \begingroup \allowdisplaybreaks \begin{alignat*}{5} & (1)\hspace{1em} && \boldsymbol{\cdot}^\S\,\hat{\gamma}\,\hat{\gamma^D} && :\equiv \hat{\top} \\ & && (\Delta,\,x:A)^\S\,(\hat{\gamma},\hat{t})\,(\hat{\gamma^D},\hat{t^D}) && :\equiv \hat{(}\hat{\gamma^S}\in\Delta^\S\hat{\gamma^2}\hat{)}\mathbin{\hat\times} A^\S\,\hat{\gamma^S}\,\hat{t}\,\hat{t^D} \\ & && x^\S\,\hat{\gamma^S} && :\equiv x^{\text{th}}\text{ component in } \hat{\gamma^S} \\ & (2) && \mathsf{U}^\S\,\hat{\gamma^S}\,\hat{A}\,\hat{A^D} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} \hat{A^D}\,\hat{x} \\ & && (\underline{a})^\S\,\hat{\gamma^S}\,\hat{t}\,\hat{t^D} && :\equiv a^\S\,\hat{\gamma^S}\,\hat{t} \mathbin{\hat=} \hat{t^D} \\ & (3) && ((x:a)\rightarrow B)^\S\,\hat{\gamma^S}\,\hat{f}\,\hat{f^D} && :\equiv \hat{(}\hat{x}\in a^\mathsf{A}\,\hat{\gamma}\hat{)}\mathbin{\hat\ra} B^\S\,\hat{(}\hat{\gamma,x}\hat{)}\,\hat{(}\hat{\gamma^D,}\,a^\S\,\hat{\gamma^S}\,\hat{x}\hat{)}\,\hat{(}\hat{\gamma^S,}\,\hat{\mathsf{refl}}\hat{)} \\ & && && \hspace{8.4em} \hat{(}\hat{f}\,\hat{x}\hat{)}\,\hat{\big(}\hat{f^D}\,\hat{x}\,\hat{(}a^\S\,\hat{\gamma^S}\,\hat{x}\hat{)}\hat{\big)} \\ & && (t\,u)^\S\,\hat{\gamma^S} && :\equiv \hat{\J}\,\hat{(}t^\S\,\hat{\gamma^S}\,\hat{(} u^\mathsf{A}\,\hat{\gamma}\hat{)}\hat{)}\,\hat{(}u^\S\,\hat{\gamma^S}\hat{)} \\ & (4) && (t=_a u)^\S\,\hat{\gamma^S}\,\hat{e} && :\equiv \hat{\mathsf{tr}}\,\hat{(}t^\S\,\hat{\gamma^S}\hat{)}\hat{\big(}\hat{\mathsf{tr}}\,\hat{(}u^\S\,\hat{\gamma^S}\hat{)}\,\hat{(}\hat{\mathsf{apd}}\,\hat{(}a^\S\,\hat{\gamma^S}\hat{)}\,\hat{e}\hat{)}\hat{\big)} \\ & && (\mathsf{refl}_t)^\S\,\hat{\gamma^S} && :\equiv \hat{\J}\,\hat{\mathsf{refl}}\,\hat{(}t^\S\,\hat{\gamma^S}\hat{)} \\ & && (\J_{a\,t\,(x.z.p)}\,pr\,_u\,eq)^\S\,\hat{\gamma^S} && :\equiv \\ & && && \hspace{-10em}\hat{\J}\,\hat{\bigg(}\hat{\J}\,\hat{\Big(}\hat{\J}\,\hat{\big(}\hat{\J}\,\hat{(}\hat{\lambda}\,\hat{p^D}\,\hat{p^S}\,\hat{pr^D}\,\hat{pr^S}\hat{.}\,\hat{pr^S}\hat{)}\,\hat{(}t^\S\,\hat{\gamma^S}\hat{)}\,\\ & && && \hspace{-9em}\hat{(}\hat{\mathsf{uncurry}}\,p^\mathsf{D}\,\hat{\gamma^2}\hat{)}\,\hat{(}\hat{\mathsf{uncurry}}\,p^\S\,\hat{\gamma^S}\hat{)}\,\hat{(}pr^\mathsf{D}\,\hat{\gamma^2}\hat{)}\,\hat{(}pr^\S\,\hat{\gamma^S}\hat{)}\hat{\big)}\,\hat{(}eq^\mathsf{A}\,\hat{\gamma}\hat{)}\hat{\Big)}\,\hat{(}u^\S\,\hat{\gamma^S}\hat{)}\hat{\bigg)}\,\hat{(}eq^\S\,\hat{\gamma^S}\hat{)} \\ & && (\J\beta_{a\,t\,(x.z.p)}\,pr)^\S\,\hat{\gamma^S} && :\equiv \\ & && && \hspace{-10em}\hat{\J}\,\hat{\big(}\hat{\J}\,\hat{(}\hat{\lambda}\,\hat{p^D}\,\hat{p^S}\hat{.}\,\hat{\mathsf{refl}}\hat{)}\,\hat{(}t^\S\,\hat{\gamma^S}\hat{)}\,\hat{(}\hat{\mathsf{uncurry}}\,p^\mathsf{D}\,\hat{\gamma^2}\hat{)}\,\hat{(}\hat{\mathsf{uncurry}}\,p^\S\,\hat{\gamma^S}\hat{)}\hat{\big)}\,\hat{(}pr^\S\,\hat{\gamma^S}\hat{)}\\ & (5) && (a=_\mathsf{U} b)^\S\,\hat{\gamma^S} && :\equiv \hat{\lambda\,e\,e^D.\,(\lambda\,x.\, \blc{b^\S}\,\gamma^S\, (\mathsf{coe}} \newcommand{\vz}{\mathsf{vz}\,e\,x)) = (\lambda\,x.\, \hat{\mathsf{tr}}\,e^D\,(\J\,(\blc{a^\S}\,\gamma^S\,x)\,e))}\\ & && (\mathsf{refl}_a)^\S\,\hat{\gamma^S} && :\equiv \hat{\mathsf{refl}} \\ & && (\J_{a\,(x.z.p)}\,pr\,_b\,eq)^\S\,\hat{\gamma^S} && :\equiv\\ & && && \hspace{-10em}\hat{\J\,\bigg(\lambda\,b^D\,\,b^S\,eq^D.\,\J\,\Big(\lambda\,b^S\,eq^S.\,\J\,(\lambda P^S\,pr^S.\,pr^S)\,eq^S\,(\mathsf{uncurry}\,\blc{p^\S}\,\gamma^S)\,(\blc{pr^\S}\,\gamma^S)\Big)} \\ & && && \hspace{-9em}\hat{\,(\blc{eq^\mathsf{D}}\,\gamma^D)\,b^S\bigg)\,(\blc{eq^\mathsf{A}}\,\gamma)\,(\blc{b^\mathsf{D}}\,\gamma^D)\,(\blc{b^\S}\,\gamma^S)\,(\blc{eq^\mathsf{D}}\,\gamma^D)\,(\blc{eq^\S}\,\gamma^S) } \\ & && (\J\beta_{a\,t\,(x.z.p)}\,pr)^\S\,\hat{\gamma^S} && :\equiv \hat{\J\,\mathsf{refl}\,(\blc{pr^\S}\,\gamma^S)} \\ & (6) && ((\hat{x}\in \hat{A})\rightarrow B)^\S\,\hat{\gamma^S}\,\hat{f}\,\hat{f^D} && :\equiv \hat{(}\hat{x}\in \hat{A}\hat{)}\mathbin{\hat\ra} B^\S\,\hat{\gamma^S}\,\hat{(}\hat{f}\,\hat{x}\hat{)}\,\hat{(}\hat{f^D}\,\hat{x}\hat{)} \\ & && (t\,\hat{u})^\S\,\hat{\gamma^S} && :\equiv t^\S\,\hat{\gamma^S}\,\hat{u} \\ & (7) && ((\hat{x}\in \hat{A})\rightarrow b)^\S\,\hat{\gamma^S\,f\,t} && :\equiv b^\S\,\hat{\gamma^S}\,\hat{(}\hat{f}\,\hat{t}\hat{)} \\ & && (t\,\hat{u})^\S\,\hat{\gamma^S} && :\equiv \hat{\mathsf{ap}}\,\hat{(}\hat{\lambda} \hat{f.}\hat{f}\,\hat{u}\hat{)}\,\hat{(}t^\S\,\hat{\gamma^S}\hat{)} \end{alignat*} \endgroup The interpretations follow the same pattern as in the case of $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$ up until $((x:a)\rightarrow B)^\S$, with $\mathsf{U}^\S$ defined as a dependent function instead of a non-dependent one. Also, $\mathsf{U}^\S\,\hat{\gamma^S}\,\hat{A}\,\hat{A^D}$ is precisely the type of sections of the $\hat{A^D}$ type family. Let us consider now the translation in the corresponding induction principles in mind, defined as $\mathsf{Induction}$ in Section \ref{sec:general}. The $\mathsf{U}^\S$ rule yields the type of the eliminator function for a type constructor. For natural numbers, the non-indexed $Nat : \mathsf{U}$ is interpreted as $\hat{\hat{Nat^S} \in (\hat{x}\in \hat{Nat})\mathbin{\hat\ra} \hat{Nat^D}\,\hat{x}}$. For indexed types, the indices are first processed by the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\S$ cases for inductive and external function parameters, until the ultimate $\mathsf{U}$ return type is reached. Hence, we always get an eliminator function for a type constructor. Analogously, the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\S$ result type for a point or path constructor is always a $\beta$-rule, i.e.\ a function type returning an equality. That is because $(\underline{a})^\S$ expresses that applications of $a^\S$ eliminators must be equal to the corresponding $\hat{t^D}$ induction methods. Hence, for path and point constructor types, $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\S$ works by first processing all inductive and external parameters, then finally returning an equality type. In the case of $t =_a u$ equalities, we only provide abbreviated definitions for the $t\,u$, $t=_a u$, $\mathsf{refl}$, $\J$ and $\J\beta$ cases. In the $\J$ case, we write $\hat{\mathsf{uncurry}}\,p^\mathsf{D}$ for $\hat{\lambda\,\gamma\,\gamma^D\,x\,x^D\,z\,z^D.}$ $\,p^\mathsf{D}\,\hat{(}\hat{\gamma,x,z}\hat{)}\,\hat{(}\hat{\gamma^D,x^D,z^D}\hat{)}$ and analogously elsewhere, to adjust for the fact that $p$ abstracts over additional $x$ and $z$ variables. The full definitions can be found in the Agda formalization. The definitions are highly constrained by the required types, and not particularly difficult to implement with the help of a proof assistant: they all involve doing successive path induction on all equalities available from induction hypotheses, with appropriately generalized induction motives. The full $(\J_{a\,t\,(x.z.p)}\,pr\,_u\,eq)^\S$ definition is quite large, and, for instance, yields a very large $\beta$-rule for the higher inductive torus definition (the reader can confirm this using the Haskell implementation). One could have an implementation with specialized cases for commonly used operations such as path compositions and inverses, in order to produce smaller translation output. The circle example is a bit more interesting here: \begin{alignat*}{5} & && (\boldsymbol{\cdot},\,S^1 : \mathsf{U},\,b:\underline{S^1},\,loop: \underline{b=b})^\S\,\,\hat{(}\hat{\tt,\,S^1,\,b,\,loop}\hat{)}\,\hat{(}\hat{\tt,\,S^{1D},\,b^D,\,loop^D}\hat{)} \\ & \equiv \,\, && \hat{\top}\mathbin{\hat\times}\hat{(}\hat{S^{1S}}\in \hat{(}\hat{x}\in\hat{S^1}\hat{)}\hat{\,\rightarrow\,}\hat{S^{1D}\,x}\hat{)}\mathbin{\hat\times}\hat{(}\hat{b^S}\in \hat{S^{1S}\,\,b = b^D}\hat{)}\\ & && \hspace{0.75em}\mathbin{\hat\times}\hat{(}\hat{loop^S\,\,\in\,\,} \hat{\mathsf{tr}}\,_{\hat{(}\hat{\lambda x. \hat{\mathsf{tr}}\,_{S^{1D}}\,loop\,x\,=\,b^D}\hat{)}}\,\hat{b^S}\,\hat{(}\hat{\mathsf{tr}}\,_{\hat{(}\hat{\lambda} \hat{x}. \hat{\mathsf{tr}}\,_{\hat{S^{1D}}}\,\hat{loop}\,\hat{(}\hat{S^{1S}\,b}\hat{)}\,\mathbin{\hat=}\,\hat{x}\hat{)}}\,\hat{b^S}\,\hat{(}\hat{\mathsf{apd}}\,\hat{S^{1S}\,loop}\hat{)}\hat{)}\,\\ & && \hspace{5.5em} \hat{=\,loop^D}\hat{)} \end{alignat*} In homotopy type theory, the $\beta$-rule for $loop$ is usually just $\hat{\mathsf{apd}}\,\hat{S^{1E}\,loop}\,\hat{\,=\,loop^D}$, but here all $\beta$-rules are propositional, so we need to transport with $\hat{b^S}$ to make the equation well-typed. When computing the type of $\hat{loop^S}$, we start with $(\underline{b =b})^S\,\,\hat{\gamma^3}\,\,\hat{loop}\,\,\hat{loop^D}$. Next, this evaluates to $(b=b)^\S\,\hat{\gamma^3}\,\hat{loop}\mathbin{\hat=}\hat{loop^D}$, and then we unfold the left hand side to get the doubly-transported expression in the result. For $a =_\mathsf{U} b$ equalities, let us examine here only the $(a =_\mathsf{U} b)^\S$ case. The interpretation is inspired by the univalence axiom, although the translation does not require it. If we assume univalence in the external theory, then $\hat{a =_{\hat{\mathsf{Type}}_i} b}$ equalities are equivalent to equivalences, which contain $\hat{f : a \rightarrow b}$ functions together with $\hat{\mathsf{isEqv}\,f}$ proofs. Hence, we can view the $\hat{e : \blc{a^\mathsf{A}}\,\gamma = \blc{b^\mathsf{A}}\,\gamma}$ and $\hat{e^D : \hat{\mathsf{tr}}_{(\lambda\,A.\,A \rightarrow \mathsf{Type_0})}\,e\,(\blc{a^\mathsf{D}}\,\gamma^D) = \blc{b^\mathsf{D}}\,\gamma^D}$ proofs as functions bundled together with witnesses of equivalence. Since $\hat{\mathsf{isEqv}}$ is proof irrelevant, we only need to relate the underlying functions in the translation. For $\hat{e}$, the underlying function is given just by $\hat{\mathsf{coe}} \newcommand{\vz}{\mathsf{vz}\,e}$. For $\hat{e^D}$, the underlying function is a bit more complicated, since $\hat{e^D}$ is a path over a path. We can observe this in the example for higher inductive integers: \begin{alignat*}{5} & && (\boldsymbol{\cdot},\,\,Int : U,\,\,zero : \underline{Int},\,p : Int =_\mathsf{U} Int)^\S\, \hat{(\tt,\,Int,\,z,\,p)\,(\tt,\,Int^D,\,z^D,\,p^D)} \\ & \equiv \,\, && \hat{ \top \times (Int^S : (x : Int)\rightarrow Int^D\,x) \times (z^S : Int^S\,z = z^D)}\\ & && \hat{\hspace{1em}\times\, (p^S : (\lambda\,x.\,Int^S\,(\mathsf{coe}} \newcommand{\vz}{\mathsf{vz}\,p\,x)) = (\lambda\,x.\,\hat{\mathsf{tr}}\,p^D\,(\J\,(Int^S\,x)\,p)))} \end{alignat*} In Appendix \ref{sec:app}, we additionally show how the type of displayed algebra sections is computed for the two-dimensional sphere. In Appendix \ref{sec:wtypes}, we show the same for indexed W-types. \section{Possible Extensions to Categorical Semantics} \label{sec:categorical} So far, we were able to compute algebras, displayed algebras, morphisms and sections, and this allows us to state recursion and induction principles. Reiterating Section \ref{sec:general}, for a signature $\hat{\Gamma\vdash}\,\Delta$ and assuming $\hat{\Gamma\vdash \Delta^* : \blc{\Delta^\mathsf{A}}}$ as a candidate algebra for the HIIT, we have the following types for induction and recursion: \begin{alignat*}{5} & \hat{\Gamma\vdash \mathsf{Induction} : (\gamma^D : \blc{\Delta^\mathsf{D}}\,\Delta^*)\rightarrow \blc{\Delta^\S}\,\Delta^*\,\gamma^D}\\ & \hat{\Gamma\vdash \mathsf{Recursion} : (\gamma : \blc{\Delta^\mathsf{A}})\rightarrow \blc{\Delta^\mathsf{M}}\,\Delta^*\,\gamma} \end{alignat*} However, this is not the full picture. We would also like to have a \emph{category} of algebras, with homomorphisms as morphisms. This is not without difficulties. We are working in an UIP-free setting. In such setting, standard definitions of categories feature set-truncated morphisms \cite{HoTTbook}. But we have non-truncated notions of algebras and homomorphisms, so we cannot use the standard definitions. One solution is to simply use non-truncated categories; these have been previously called ``precategories'' or ``wild categories'' \cite{semisegal}. Sojakova demonstrated \cite{sojakova} that working with wild categories internally to type theory is enough to prove equivalence of induction and homotopy initiality for a class of higher inductive types, which suggests that the same might be possible for HIITs. Still, it would be desirable to build semantics of HIITs in a richer ($\omega$, 1)-categorical setting. However, out current approach does not scale to the point where we have to worry about higher categories. We explain in the following. The natural next step towards a categorical semantics would be defining a $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^{\mathsf{ID}}$ translation, which computes identity homomorphisms: \[ \infer{\hat{\Gamma}\mathbin{\hat\vdash}\Delta^{\mathsf{ID}}\,\hat{:\,(\gamma : \blc{\Delta^\mathsf{A}})\rightarrow \blc{\Delta^\mathsf{M}}\,\gamma\,\gamma}} {\hat{\Gamma}\vdash\Delta} \hspace{1em} \infer{ \hat{\Gamma}\mathbin{\hat\vdash} A^{\mathsf{ID}}\,\hat{:\,(\gamma : \blc{\Delta^\mathsf{A}})(t : \blc{A^\mathsf{A}}\,\gamma)\rightarrow \blc{A^\mathsf{M}}\,\gamma\,\gamma\,(\blc{\Delta^{\mathsf{ID}}}\,\gamma)\,t\,t }} {\hat{\Gamma}\hat;\,\Delta\vdash A} \] \[ \infer{ \hat{\Gamma}\mathbin{\hat\vdash} t^{\mathsf{ID}}\,\hat{:\, (\gamma : \blc{\Delta^\mathsf{A}})\rightarrow \blc{t^\mathsf{M}}\,\gamma\,\gamma\,(\blc{\Delta^{\mathsf{ID}}}\,\gamma) = \blc{A^{\mathsf{ID}}}\,\gamma\,(\blc{t^\mathsf{A}}\,\gamma) }} {\hat{\Gamma}\hat;\,\Delta\vdash t : A} \] Here, the interpretation of terms witnesses functoriality: $t^\mathsf{M}$ maps identity morphisms in $\Delta$ to displayed identity morphisms in $A$. We translate $\mathsf{U}$ to identity functions: \[ \mathsf{U}^{\mathsf{ID}}\,\hat{\gamma\,A} :\equiv \hat{\lambda\,(x : A).\, x} \] Assume that $\sigma$ is a parallel substitution, and note that $\mathsf{U}$ and $\mathsf{U}[\sigma]$ are definitionally equal in the theory of signatures. The translation of the latter would be the following (omitting many details, including the handling of substitutions in the translation): \[ (\mathsf{U}[\sigma])^{\mathsf{ID}}\,\hat{\gamma\,A} \equiv \hat{\hat{\mathsf{tr}}_{(\lambda\,x. A\rightarrow A)}\,(\blc{\sigma^{\mathsf{ID}}}\,\gamma)\,(\lambda\,x.\, x)} \] Here, $\hat{\sigma^{\mathsf{ID}}\,\gamma}$ yields an equation for functoriality of $\hat{\sigma}$. Since the transport is constant, the result is propositionally equal to $\hat{\lambda\,x.\, x}$. However, that is not enough, since we need to preserve $\mathsf{U} = \mathsf{U}[\sigma]$ up to definitional equality. Alternatively, we could try changing the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{ID}$ interpretation of terms and substitutions to get \emph{definitional} equalities, instead of propositional equalities internal to the target syntax. This would not be a purely syntactic translation anymore, since for each $\hat{\Gamma}\hat;\,\Delta\vdash t : A$ we would get a universally quantified metatheoretic statement expressing that for each $\hat{\gamma}$ term, $\hat{\blc{t^\mathsf{M}}\,\gamma\,\gamma\,(\blc{\Delta^{\mathsf{ID}}}\,\gamma)}$ is definitionally equal to $\hat{\blc{A^{\mathsf{ID}}}\,\gamma\,(\blc{t^\mathsf{A}}\,\gamma)}$. This would solve the strictness problem in the case of $\mathsf{U}[\sigma]$, since there would be no need to transport over definitional equalities. Unfortunately, while this approach repairs $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^{\mathsf{ID}}$ in some cases, other cases become unfeasible, because we are not able to produce definitional equations. For example, the interpretation of the identity type would be as follows: \[ (t =_a u)^{\mathsf{ID}}\,\hat{\gamma \,\,\blc{:}\,\, \blc{(t =_a u)^\mathsf{M}}\,\gamma\,\gamma\,(\blc{\Gamma^{\mathsf{ID}}}\,\gamma)\,(\blc{(t =_a u)^\mathsf{A}}\,\gamma)} \equiv \hat{\blc{\mathsf{U}^{\mathsf{ID}}}\,\gamma\,(\blc{(t =_a u)^\mathsf{A}}\,\gamma)} \] The goal type can be reduced along definitions, and using $t^{\mathsf{ID}}$ and $u^{\mathsf{ID}}$, to \[ \hat{(\lambda\,e.\,\,\mathsf{refl}\,\sqcdot\,\hat{\mathsf{ap}}\,(\lambda\,x.\,x)\,e\,\sqcdot\,\mathsf{refl})\,\blc{\equiv}\,(\lambda\,e.\,e)} \] This definitional equality is clearly not provable, although $(t =_a u)^{\mathsf{ID}}$ does work when we interpret terms with weak internal equalities. Hence, there are strictness problems both with weak and strict equations for functoriality. We consider three potential solutions: \begin{enumerate} \item Solving the coherence problem. This would allow us to interpret signatures into the metatheory, allowing preservation of definitional equality up to propositional metatheoretic equality. \item Reformulating the syntax of signatures so that definitional equalities become weak propositional equalities. We already use weak $\beta$ for $\J$, can we do so elsewhere? However, this would result in an unusual and very inconvenient syntax of signatures, because we would need to weaken even basic substitution rules. In contrast, weak $\beta$ for $\J$ seems harmless, because we are not aware of any HIIT signature in the literature that involves any $\J$ computation. \item Instead of interpreting signatures into an UIP-free type theory, we interpret them into classical combinatorial structures for higher groupoids and categories, e.g.\ into simplicial sets. The drawback is that now we do not have the convenient synthetic notion of higher structures, provided by the syntax of type theory, and instead we have to manually build up these structures. Needless to say, this makes machine-checked formalization much more difficult. We have found mechanized formalization invaluable for the current paper, and it would be painful to abandon it in further research. \end{enumerate} In summary, solutions for the strictness problems require significant deviation from the approach of the current paper, or require significant further research. \section{Formalization and Implementation} \label{sec:formalization} There are additional development artifacts to the current work: a Haskell implementation, two Agda formalizations of the syntactic translations (a shallow and a deeper version) and an Agda formalization for deriving a Frobenius $\J$ rule from $\J$. All are available from \url{https://github.com/akaposi/hiit-signatures}. The Haskell implementation takes as input a file which contains a of a $\hat{\Gamma}\vdash\Delta$ signature. Then, it checks the input with respect to the rules in Figures \ref{sigrules1} and \ref{sigrules2}, and outputs an Agda-checkable file which contains algebras, homomorphisms, displayed algebras and sections for the input signature. It comes with examples, including the ones in this paper, the inductive-inductive dense completion \cite[Appendix A.1.3]{forsberg-phd} and several HITs from \cite{HoTTbook} including the definition of Cauchy reals. It can be checked that our implementation computes the expected elimination principles in these cases. The shallow Agda formalization embeds both the source and target theories shallowly into Agda: it represents types as Agda types, functions as Agda functions, and so on. We also leave the $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$ operation implicit. We state each case of translations as Agda functions from all induction hypotheses to the result type of the translation, which lets us ``typecheck'' the translation. We have found that this style of formalization is conveniently light, but remains detailed enough to be useful. We also generated some of the code of the Haskell implementation from this formalization. The deep Agda formalization deeply embeds the theory of signatures as a CwF with additional structure, in the style of \cite{ttintt}. However, it embeds the external type theory shallowly as Agda, and we model dependency on external $\hat{\Gamma}$ contexts with Agda functions. We formalize $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{A}$, $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{D}$, $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\mathsf{M}$ and $\mathord{\hspace{1pt}\text{--}\hspace{1pt}}^\S$ translations as a single model construction in Agda. This setup greatly simplifies formalization, since we do not have to reason explicitly about definitional equality in the external syntax, but we can still reason directly about preservation of definitional equalities in the theory of signatures. This semi-deep formalization can be in principle converted into a fully formal syntactic translation, because we prove all preservations of definitional equalities by $\mathsf{refl}$. Due to technical issues and performance issues in Agda, this deeper formalization uses transport instead of $\J$ for $t =_a u$ equalities, and it does not cover elimination for $a =_\mathsf{U} b$ equalities. \section{Conclusions} \label{sec:summary} Higher inductive-inductive types are useful in defining the well-typed syntax of type theory in an abstract way \cite{ttintt}. From a universal algebraic point of view, they provide initial algebras for multi-sorted algebraic theories where the sorts can depend on each other. From the perspective of homotopy type theory, they provide synthetic versions of homotopy-theoretic constructions such as higher-dimensional spheres or cell complexes. So far, no general scheme of HIITs have been proposed. To quote Lumsdaine and Shulman \cite{lumsdaineShulman}: \begin{quotation} ``The constructors of an ordinary inductive type are unrelated to each other. But in a higher inductive type each constructor must be able to refer to the previous ones; specifically, the source and target of a path-constructor generally involve the previous point-constructors. No syntactic scheme has yet been proposed for this that covers all cases of interest while remaining meaningful and consistent.'' \end{quotation} In this paper we proposed such a syntactic scheme which also includes inductive-inductive types. We tackled the problem of complex dependencies on previous type formation rules and constructors by a well-known method of describing intricate dependencies: the syntax of type theory itself. We had to limit the type formers to only allow strictly positive definitions, but these restrictions are the only things that a type theorist has to learn to understand our signatures. Our encoding is also direct in the sense that notions of induction and recursion are computed exactly as required and not merely up to equivalences or isomorphisms, and we also demonstrated that this computation is feasible to implement in computer programs. We developed an approach where syntactic translations are used to provide semantics for HIIT signatures. Our approach seems to be a sweet spot for computing notions of induction and recursion in a formally verifiable and relatively simple way. However, there is a coherence problem in the formal treatment of syntaxes and models of type theories internally to type theory. We sidestepped this by considering syntactic translations, but the problem remains, and it prevents extending the current approach to categorical semantics. Our impression is that the true solution will be the development of higher syntaxes and models of type theory in type theory. This may also require adding new features to the meta type theory. In particular, convenient formalization of higher categories seems elusive in conventional homotopy type theory, and we may need two-level type theories \cite{semisegal}, or directed type-theories with native notions of higher categories \cite{nuyts2015towards, riehl2017type}. \bibliographystyle{plain}
1,116,691,501,329
arxiv
\section{Introduction} Upgrading existing fiber links with transponders operating at higher data rates typically requires to improve the optical signal--to--noise ratio (OSNR). Codirectional Raman amplification is an attractive technology to increase the performance of optical fiber links~\cite{Krummrich-ECOC-01}. The achievable performance improvement originates from a modified power profile in the transmission fiber that allows to achieve higher output powers at the output of the transmission fiber with comparable impact of nonlinear fiber effects. This finally leads to an improved optical signal--to--noise ratio (OSNR)~\cite{Murakami-ECOC-01}. With the trend to advanced modulation formats at higher data rates employing phase--shift keying, this technology is gaining significantly in importance in unrepeatered submarine links, but increasingly also in terrestrial systems. In many cases, using this kind of amplification scheme is the only remaining option for upgrading existing unrepeated submarine links by employing transponders with higher data rates. In the past, preferably counterdirectional Raman amplifiers~\cite{Evans-OFC-01} or even remote optically pumped amplifiers (ROPAs)~\cite{Lucero-OFC-09} have been installed in order to enable communication over long distances without intermediate active components requiring electrical power supply. With the introduction of advanced modulation formats, the OSNR at the receivers needs to be further increased due to the larger electrical bandwidth or decreased Euclidian distance between the symbols, since sophisticated forward error correction (FEC) codes and the benefits of coherent detection can only partly mitigate the requirement for better signal quality. Higher--order pumping schemes can provide a further noticeable improvement of the system performance when using counterdirectional Raman amplification~\cite{Papernyi-OFC-02}\cite{Faralli-OFC-05}\cite{Schneiders-JLT-06} by moving the gain from the end of the fiber span deeper into the fiber. In contrast, there is almost no benefit by applying this pumping scheme to already installed links using ROPAs since the required relocation of the ROPA cassette does not make economic sense~\cite{Pavlovic-SubOptic-13}. Commercial systems are available up to third--order, but results for schemes making use of sixth order pumping have already been published~\cite{Papernyi-OFC-05}. Till now, this technique has successfully been used in submarine links, but is also becoming attractive for terrestrial systems. The big advantage of codirectional Raman amplification is its compatibility with all the above mentioned techniques and that it can be installed in existing links in a cost--effective manner~\cite{Rapp-Suboptic-10}. Furthermore, higher orders are also applicable to codirectional pumping~\cite{Schulze-OFC-05}. However, a major drawback of higher--order pumping schemes is the decrease of the efficiency with increasing order. Therefore, the large pump powers required for third--order pumping are difficult to produce by multiplexing laser diodes. As a consequence, Raman fiber lasers (RFLs) are employed in most cases. However, these sources are known for their significantly higher relative intensity noise (RIN) as compared with laser diodes. Unfortunately, this gives rise to additional effects deteriorating system performance and achieved performance improvement is typically significantly smaller than predicated by theoretical considerations considering the modified power profile only. The transfer of relative intensity noise (RIN) from the pump to the signals via stimulated Raman scattering (SRS) has been investigated intensively and detailed mathematical models have been elaborated for first order~\cite{Fludger-JLT-01}\cite{Lakoba-JLT-04}\cite{Martinelli-PTL-05} and second order pumping schemes~\cite{Mermelstein-JLT-03}. These investigations have a strong focus on the frequency characteristics and deal with intensity modulated signals. Even techniques for suppressing this effect have been proposed~\cite{Mermelstein-PTL-03}. However, third--order codirectional Raman amplification also introduces random phase variations. These phase variations also go back to intensity variations of the pump lasers, but the impact on signal phase is caused by cross--phase modulation (XPM)~\cite{Agrawal-NLFO-07} and cross polarization modulation (XPolM)~\cite{Louchet-ICTON-11}. Due to the large bandwidth of the Kerr effect, the pump affects the signals directly. In particular, XPM and XPolM induces phase variations that impact the detection of phase modulated signals, and that are also converted into amplitude variations by group--velocity dispersion~\cite{Rapp-PTL-97}\cite{Rapp-JOC-99}. Since the distortions go back to random power fluctuations of the pump, they manifest itself as additional noise and are therefore difficult to remove. Since signal phase is not taken into account when using direct detection of amplitude modulated signals, there is no noticeable effect on transmission performance for on--off--keying (OOK) based signals. Thus, codirectional third--order Raman pumping is a suitable option for improving performance of such signals. However, the phase variations have a strong impact on the detection of signals making use of advanced modulation formats such as quaternary phase shift keying (QPSK). Therefore, higher--order codirectional pumping does not provide a noticeable benefit as compared with first order pumping. In this paper, a technique for reducing the impact of the described phase variations on symbol detection is presented. Reducing the impact of disturbing phase variations allows to use higher order pumping schemes for codirectional Raman amplification in order to improve the optical signal--to--noise ratio of existing fiber links and thus to increase their capacity. For this purposes, the presented compensation technique makes use of the correlation of phase variations induced via XPM in neighboring signals copropagating in an optical fiber and resulting from intensity fluctuations of the fundamental pump. \section{Distortions of the constellation diagram} Higher--order pumping transfers optical power from a high--power pump to the signals via intermediate lightwaves called seeds. This is accomplished by taking advantage of the limited bandwidth of SRS with a peak of the Raman gain curve at a frequency shift of around 13\THz~\cite{Agrawal-NLFO-07}. When doubling the frequency shift, the Raman gain coefficient equals to less than $6\%$ of its peak value. The wavelengths of the pumps and the seeds are chosen such that the spacing between neighboring lightwaves is in the order of magnitude of the frequency shift of the peak. Thus, power transfer via SRS is only possible from one lightwave to its neighboring lightwave. Within the first kilometers of the transmission fiber, the power of the pump is transferred to the first seed without amplifying the signals significantly. In a next step, the power is transferred from the first seed to the second seed. Finally, this second seed provides amplification to the signals which reach their total power maximum after a propagation distance of approximately~$40\km$. Amplification of the signals occurs mainly within this region. However, this also is where significant fluctuations of the signal power levels can be induced due to the RIN transfer. The large power of the pump within the first kilometers of the transmission fiber is the root cause for above mentioned phase variations deteriorating system performance. The effect of strong phase noise on symbol detection is illustrated in Fig.~\ref{figure_phase_distortions} showing the constellation diagram including decision regions. On the left side, the constellation diagram is affected by additive noise only, whereas there is also phase noise on the right side. \graficeps{figure_phase_distortions}{0.8\figwidth}{Constellation diagram with phase noise only (left side) and in the presence of phase noise and additive noise (right side)}{htb} Frequency characteristics of the conversion process is studied by assuming sinusoidal modulation of the power of the pump signal with varying modulation frequency. Neglecting signal distortions by linear and nonlinear effects, the efficiency of the process converting power variations of the pump into phase variations of a copropagating signal can mathematically be described by the equation \begin{equation} \eta \propto \sqrt{\alphaNp^2 + \left(\dphaseconst^{(s1p)}\cdot\omega\right)^2} \end{equation} wherein $\alphaNp$ stands for the attenuation coefficient of the pump \begin{equation} \alphaNp = \ln(10)/10\cdot\alphadB \end{equation} with $\alphadB$ being the attenuation coefficient in decibel units. Furthermore, $\omega$ stands for the angular frequency and $\dphaseconst^{(s1p)}$ represents the difference between the inverses of the group velocities \begin{equation} \dphaseconst^{(s1p)} = \phaseconst^{(s1)} - \phaseconst^{(p)} \end{equation} of the pump and the signal. The resulting frequency characteristics is illustrated in Fig.~\ref{figure_efficiency}. Please note that no phase estimation removing slow phase variations is assumed for this representation. Simulation results show that this equation describes the frequency dependence of the conversion effect including linear and nonlinear effects quite well. \graficeps{figure_efficiency}{0.8\figwidth}{Efficiency of power to phase conversion via XPM versus frequency}{htb} In real systems, slow phase variations up to frequencies of around $1\MHz$ are already eliminated by carrier phase estimation performed by the digital signal processor (DSP) and random phase variations are observed at higher frequencies only. With increasing frequency, the magnitude passes through a maximum and finally diminishes continuously. This decrease is due to the walk--off effect between the pump signal and the affected signals. Two signals propagating in the same direction in an optical fiber are affected in a very similar way by XPM induced by intensity fluctuations of a strong pump as long as the walk--off effect between the two channels is small enough. Thus, the phase variations of copropagating signals are correlated which allows for reducing the impact on BER. Neglecting fiber attenuation and other linear fiber effects, the complex envelope of the electrical fields of the two signals at the receivers $\signalRx^{(1)}(t)$ and $\signalRx^{(2)}(t)$ are represented by the following equations, wherein $\signalTx^{(1)}(t)$ and $\signalTx^{(2)}(t)$ stand for the respective complex envelopes at the transmitter. \begin{eqnarray} \signalRx^{(1)}(t) & = & \signalTx^{(1)}(t)\cdot{}\underbrace{e^{\imath\phasecommon}}_{\parbox{1.8cm}{\centering Common phase shift}} + \underbrace{\noise_{I}^{(1)} + \imath \noise_{Q}^{(1)}}_{\mbox{Noise signal \rom1}} \\[1em] \signalRx^{(2)}(t) & = & \signalTx^{(2)}(t)\cdot{}\underbrace{e^{\imath\phasecommon}}_{\parbox{1.8cm}{\centering Common phase shift}} + \underbrace{\noise_{I}^{(2)} + \imath \noise_{Q}^{(2)}}_{\mbox{Noise signal \rom2}} \end{eqnarray} Both signals suffer from the same phase shift $\phasecommon$ induced by XPM due to intensity variations of the Raman pump. This phase shift is modeled as a Gaussian distributed random variable. Furthermore, additive noise represented by $\noise_{I}^{(1)} + \imath \noise_{Q}^{(1)}$ and $\noise_{I}^{(2)} + \imath \noise_{Q}^{(2)}$ is added to each of the signals. Each of the variables $\noise_{I}^{(1)}$, $\noise_{Q}^{(1)}$, $\noise_{I}^{(2)}$ and $\noise_{2}^{(1)}$ represents stochastically independent random variables with Gaussian probability distribution. The key idea is to determine from the actual phase of both signals an estimate of the common phase shift. In a next step, this common phase shift is removed from the signals: \begin{eqnarray} \signaldet^{(1)}(t) & = & \signalRx^{(1)}(t)\cdot{}e^{-\imath\phasecommonest} \nonumber \\ & = & \left\{\signalTx^{(1)}(t)\cdot{}e^{\imath\phasecommon} + \noise_{I}^{(1)} + \imath \noise_{Q}^{(1)}\right\}\cdot{}e^{-\imath\phasecommonest} \\ & = & \signalTx^{(1)}(t)\cdot{}e^{\imath(\phasecommon-\phasecommonest)} + \left\{\noise_{I}^{(1)} + \imath \noise_{Q}^{(1)}\right\}\cdot{}e^{-\imath\phasecommonest}\\[1em] \signaldet^{(2)}(t) & = & \signalRx^{(2)}(t)\cdot{}e^{-\imath\phasecommonest} \nonumber \\ & = & \left\{\signalTx^{(2)}(t)\cdot{}e^{\imath\phasecommon} + \noise_{I}^{(2)} + \imath \noise_{Q}^{(2)}\right\}\cdot{}e^{-\imath\phasecommonest} \\ & = & \signalTx^{(2)}(t)\cdot{}e^{\imath(\phasecommon-\phasecommonest)} + \left\{\noise_{I}^{(2)} + \imath \noise_{Q}^{(2)}\right\}\cdot{}e^{-\imath\phasecommonest} \end{eqnarray} \section{Compensation of the direct impact of the fundamental pump} In the following, a very simple approach for determining the estimate of the common phase is presented. For sure, this is in no way limiting. Determining the phases $\phaseactual^{(1)}$ and $\phaseactual^{(2)}$ of the two received signals by using the Viterbi--Viterbi phase estimator, the estimate of the common phase is calculated by using the equation \begin{equation} \phasecommonest = \frac{\weightfactor^{(1)}\cdot\phaseactual^{(1)} + \weightfactor^{(2)}\cdot\phaseactual^{(2)}}{\weightfactor^{(1)}+\weightfactor^{(2)}} \end{equation} with \begin{eqnarray} \weightfactor^{(1)} = \exp\left( -\probfactor{}\cdot\left|\phaseactual^{(1)}\right|\right) \\[1em] \weightfactor^{(2)} = \exp\left( -\probfactor{}\cdot\left|\phaseactual^{(2)}\right|\right) \end{eqnarray} and $\probfactor$ being a factor equal to or larger than zero. In the border case of very large values of the parameter $\probfactor$, the estimate of the common phase corresponds to the phase having minimum magnitude. Depending on the symbol definition , $\sfrac{\pi}{2}$ needs to be subtracted from $\phasecommonest$. Several constellations are considered in the following for illustrating the functioning of the compensation algorithm. For illustration purposes, it is assumed that both signals transmit the symbol found in the light blue quadrant of the constellation diagram and the above mentioned border case is assumed. In each of the plots, the common phase shift is illustrated by a red arc, whereas the signal individual additive noise if represented by a blue arrow. The position of the samples after common phase compensation is indicated by green dots. \graficeps{figure_correction_2}{0.7\figwidth}{Illustration of the compensation algorithm for the case wherein the symbol of the first signal ${\rm S}_1$ is found in the original quadrant, whereas the symbol of the second signal ${\rm S}_2$ is moved to the neighboring quadrant due to additive noise.}{htb} \graficeps{figure_correction_4}{0.7\figwidth}{Illustration of the compensation algorithm for the case wherein the symbol of the second signal ${\rm S}_2$ is found in the original quadrant, whereas the symbol of the first signal ${\rm S}_1$ is moved to the neighboring quadrant due to additive noise.}{htb} In Figs.~\ref{figure_correction_2} and~\ref{figure_correction_4}, one of the symbols is moved to a neighboring quadrant, whereas the second symbol is found in the original quadrant. In both cases, the compensation algorithm is able to move the symbol found in the wrong quadrant back into the original quadrant such that error free detection becomes possible since the magnitude of the phase of the symbol moved into the neighboring quadrant is larger as compared with the symbol in the original quadrant. \graficeps{figure_correction_1}{0.7\figwidth}{Illustration of the compensation algorithm for the case wherein both received symbols are found in the original quadrant.}{htb} As illustrated in Fig.~\ref{figure_correction_1}, the algorithm does not introduce additional errors in case both symbols are found in the original quadrant and are therefore mapped to the correct symbols even without common phase compensation. Furthermore, Fig.~\ref{figure_correction_3} shows that the algorithm cannot reduce the bit error ratio in case both symbols are moved away from the original quadrant. \graficeps{figure_correction_3}{0.7\figwidth}{Illustration of the compensation algorithm for the case wherein the symbol of both signals are moved to the neighboring quadrant due to additive noise.}{htb} In some cases, the compensation algorithm might even induce additional bit errors, as illustrated in Fig~\ref{figure_correction_5}. However, the probability for this constellation is quite small as compared with the constellations wherein the compensation algorithm improves performance. Therefore, the compensation algorithm is able to provide an overall performance improvement. \graficeps{figure_correction_5}{0.7\figwidth}{Illustration of a constellation wherein the compensation algorithm induces additional bit errors.}{htb} In summary, the following constellations are relevant: \begin{center} \begin{tabular}{|l|l|l|} \hline \multicolumn{3}{|c|}{Relevant constellations}\\ \hline \hline (1) & No correction required & Fig.~\ref{figure_correction_1} \\ \hline (2) & Correction successful & Figs.~\ref{figure_correction_2} and~\ref{figure_correction_4} \\ \hline (3) & Additional errors & Fig.~\ref{figure_correction_5} \\ \hline (4) & No correction possible & Fig.~\ref{figure_correction_3} \\ \hline \end{tabular} \end{center} Although the algorithm may introduce some additional errors in some constellations, an overall reduction of the bit error ratio is achieved since constellations for which a successful correction is possible are more likely to happen. In other words, the probability for a symbol shifted to the next quadrant having a smaller magnitude of the phase as compared with the symbol located in the original quadrant is quite small. \clearpage \subsection{Processing steps} Steps required for performing the described compensation of common phase variations are listed in the following. Some of them are illustrated in Fig.~\ref{figure_explanation}. \begin{enumerate} \item Common processing of samples from both signals for reducing common phase variations. \item At the transmitters, the clocks of the copropagating data signals need to be adjusted relative to each other in such a way that the phase variations at the later sampling points are affected by the same power fluctuations of the pump. Essentially, this will result in synchronous clock signals, but some slight deviations might even provide better performance. The optimum time shift between the clock signals can be determined by means of a feedback control comprising the transmitter and the receiver. \item The two signals will arrive at the receiver with same delay. Thus, the data samples of the "faster" channels need to be buffered. Furthermore, compensation at higher frequencies requires to remove some phase shift in the frequency domain. \item The parameter $\probfactor$ determining the weight of the different contributions is adjusted in another control loop. \end{enumerate} \graficeps{figure_explanation}{0.9\figwidth}{Processing steps required for compensating common phase variations}{htb} \clearpage \section{Implementation aspects} Using the Viterbi \& Viterbi phase recovery algorithm, data dependency is removed from the samples by raising them to the fourth power and calculating the phase of the results. Afterwards, the influence of additive noise is minimized by filtering. The algorithm is illustrated in Fig.~\ref{figure_Viterbi_Viterbi}. \graficeps{figure_Viterbi_Viterbi}{0.8\figwidth}{Block diagram illustrating the functionality of the Viterbi \& Viterbi carrier phase estimation (CPE) algorithm}{htb} The symbols of a quaternary phase shift keying (QPSK) signal can be described by the equation \begin{eqnarray} s_k & = & \exp\left\{ \imath k \cdot\frac{\pi}{2} + \varphi(t)\right\} \\ & = & \exp\left\{ \imath k \cdot\frac{\pi}{2} + \bar{\varphi} + \tilde{\varphi}(t)\right\} \label{Eq:symbols} \end{eqnarray} with an arbitrary time dependent phase $\varphi(t)$ with average $\bar{\varphi}$ and fluctuations $\tilde{\varphi}(t)$ with zero average. The characteristic mathematical operation of the Viterbi \& Viterbi phase recovery algorithm is given by the equation \begin{equation} \phi(t) = \frac{1}{4}\cdot\angle\left\{ \left(s_k^\star\right)^4 \right\} \end{equation} wherein $s_k^\star$ stands for the detected samples and $\angle$ denotes the phase of the following expression in brackets (argument). With equation~(\ref{Eq:symbols}), this leads to the expression \begin{equation} \phi(t) = \bar{\varphi} + \tilde{\varphi}(t) \end{equation} The average phase $\bar{\varphi}$ is canceled out by carrier phase estimation, e.\,g. by using the Viterbi \& Viterbi approach. What remains are fast variations $\tilde{\varphi}(t)$ impairing symbol detection. The constellation diagram at the receiver is rotated against the original constellation diagram at the transmitter if the phase of the local oscillator does not match the carrier phase. This mismatch is canceled in the digital domain by carrier phase estimation (CPE). The fundamental functionality of the CPE is explained in the following starting from constellations rotated by various angles against the original constellation diagram, as illustrated in Fig.~\ref{figure_start_phase}. \graficeps{figure_start_phase}{0.5\figwidth}{Constellation diagram rotated against the original constellation diagram due to carrier phase mismatch}{htb} Two possible implementations of the phase noise compensation algorithm in combination with Viterbi \& Viterbi phase recovery are illustrated in Fig.~\ref{figure_implementations}. \graficeps{figure_implementations}{\figwidth}{Two possible implementations of phase noise compensation in combination with Viterbi \& Viterbi phase recovery. In the upper diagram, carrier phase estimation and the inventive phase noise compensation are cascaded. In the lower diagram, both steps are combined in a single algorithm.}{htb} \section{Conclusions} Higher--order pumping schemes are suitable techniques for enhancing the performance of Raman amplifiers. Due to their complexity, the main application area is to be found in unrepeatered submarine links, where often no cost--efficient alternatives are available. However, the increased pump powers present new challenges and give rise to additional noise contributions. Currently, quaternary phase--shift keying combined with polarization multiplexing and coherent detection (CP--QPSK) is the preferred modulation format for leading edge systems. Many installed unrepeatered submarine links are upgraded by introducing transponders making use of this modulation format. Signal distortions of such phase--modulated signals induced directly by the high--power pump of a third--order codirectional Raman amplifier via the Kerr effect impair signal detection such that high--order codirectional pumping often does not provide any performance improvement as compared with first order pumping schemes. In contrast, there is clear improvement when using on--off--keying signals. In this paper, a technique reducing the impact of the detected phase distortions is presented in order to make full use of the potential performance improvement expected from higher order pumping.
1,116,691,501,330
arxiv
\section{Introduction} The von Neumann entropy, given by $S(\rho)=-\mathrm{Tr}{\rho\log\rho}$ for a quantum state (density operator) $\rho$, is one of the cornerstones of quantum information theory. It plays an essential role in the expressions for the best achievable rates of virtually every coding theorem. In particular, when proving the optimality of these expressions, it is the inequalities governing the relative magnitudes of the entropies of different subsystems which are important. There are essentially two such inequalities known, the so called basic inequalities: \begin{align} I(A:B|C) := -S(C)+S(AC)+S(BC)-S(ABC) &\geq 0, \tag{SSA}\label{SSA}\\ S(AB)+S(AC)-S(B)-S(C) &\geq 0. \tag{WMO}\label{weakmono} \end{align} Inequality \eqref{SSA} is known as \emph{strong subadditivity} and was proved by Lieb and Ruskai \cite{LR73} and the expression on the left hand side as the (quantum) conditional mutual information; inequality \eqref{weakmono} is usually called \emph{weak monotonicity}, and it is in fact equivalent to (SSA) -- see section \ref{notation} below. To be precise, we will be considering only \emph{linear} inequalities involving the entropies of various reduced states of a multi-party quantum state, as we shall explain, and partly motivate now. Given a multipartite state $\rho$ on a set of parties (quantum systems) $N=\{X_1,\ldots,X_n\}$, we can think of the entropy as a function which assigns a real number to each subset of $N$, i.e. $S(.)_\rho:\powset{N}\rightarrow\real$ with $S(J)_\rho:=S(\rho_J)$. (We will use the notation $S(J)_\rho$ and $S(\rho_J)$ interchangeably). Further, with each function $f:\powset{N}\rightarrow\real$, which satisfies $f(\emptyset)=0$, we can associate a vector in $2^n-1$ dimensional real space: $(f(X_1),f(X_2),\ldots,f(X_1\ldots X_n))$. It is then natural to ask the question: which vectors can arise as the entropies of quantum states? For example, the vector $(1,1,2)$ is the entropy vector of the maximally mixed state on two spin-$\frac12$ systems. However, we know that the vector $(1,1,3)$ cannot represent the entropies of any quantum state since in general the quantity $S(X_1)_\rho+S(X_2)_\rho-S(X_1X_2)_\rho$ is non-negative, whereas here it is equal to $-1$. Thus, the question of which vectors can be realised by quantum states is inextricably linked to the knowledge of entropy inequalities. Indeed, for $n=2$ and $n=3$ it has been shown \cite{Pip03} that the closure of the set of achievable vectors, which we will denote $\overline{\Sigma}_n^*$, is exactly the cone in $\real^{2^n-1}$ cut out by the basic inequalities, denoted $\Sigma_n$. In other words, a vector can be realised, with arbitrary accuracy, as the entropy vector of a quantum state if and only if it satisfies all the basic inequalities. For $n\geq 4$ it can again be shown that $\overline{\Sigma}_n^*$ forms a convex cone, however, it is unknown whether or not this cone is the same as that which is determined by the basic inequalities. In classical information theory, the Shannon entropy of a random variable, given by $H(X)=-\sum_{x\in\mathcal{X}}p_X(x)\log p_X(x)$, plays an analagous role to the von Neumann entropy. It satisfies the same basic inequalities as above, with \eqref{weakmono} replaced by the stronger condition of \emph{monotonicity}: $H(AB)\geq H(A)$. The analogous classical problem to the one we study here has been extensively studied for quite some time. First Zhang and Yeung \cite{YZ98}, and then Makarychev \emph{et al.} \cite{Mak02} and Dougherty \emph{et al.} \cite{Dou06} found new inequalities, which are not implied by the basic inequalities. Mat\'{u}\v{s} \cite{Mat07} even proved that for $n\geq 4$ the classical entropy cone is not polyhedral, i.e.~it cannot be described by any finite set of linear inequalities. In the quantum case, only one inequality is known which cannot be deduced from the basic inequalities \cite{LW05}, and it is a so-called constrained inequality -- an inequality which holds whenever certain conditional mutual informations are zero. This shows that parts of certain faces of the cone $\Sigma_n$ do not contain any entropy vectors of quantum states (noting that $\Sigma_n$, being defined by finitely many linear inequalities, is a polyhedral cone). This is not enough, however, to conclude that the entropy cone, $\overline{\Sigma}_n^*$, is strictly smaller, as we are concerned with the closure. In fact, it remains a major open problem to decide the existence of an unconstrained inequality for the von Neumann entropy that is not implied by the basic inequalities. Here we make progress in two different directions; we prove an infinite family of constrained inequalities, which are provably independent, and we do so with a strictly smaller set of constraints. The structure of the remainder of the paper is as follows: in section \ref{notation} we introduce notation and review the basic linear framework of entropy inequalities. In section \ref{result} we prove that a family of constrained inequalities are true for the von Neumann entropy of quantum states; in section \ref{independence} we show that this family is mutually independent; in section \ref{variations} we exhibit some alternate forms of the inequalities; and in section \ref{discussion} we conclude and mention some open problems. \section{Entropy Inequalities} \label{notation} In this section we explain preciesely what it means for entropy inequalities to hold, and to be independent of one another. Consider the inequality of strong subadditivity \eqref{SSA}, \begin{equation} -S(C)+S(AC)+S(BC)-S(ABC) \geq 0, \end{equation} which holds for all quantum states on a Hilbert space $\hilb{A}\otimes\hilb{B}\otimes\hilb{C}$. By swapping the labels of $\hilb{A}$ and $\hilb{C}$ we obtain another inequality: $-S(A)+S(AC)+S(AB)-S(ABC)\geq0$. Alternatively, suppose we have a four party quantum state on $\{A,B,C,D\}$. Then we can think of $\rho_{ABCD}$ as a tripartite state on $\hilb{A}\otimes\hilb{BC}\otimes\hilb{D}$ and so we obtain the inequality $-S(D)+S(AD)+S(BCD)-S(ABCD)\geq0$. We could even think of a bipartite state $\rho_{AB}$ as a tripartite state on $\{A,B,C\}$ with $\hilb{C}$ trivial; in this case we obtain \begin{equation} I(A:B) := S(A)+S(B)-S(AB) \geq 0, \label{mutualinfo} \end{equation} which sometimes is considered another basic inequality because of the importance of the expression on the left, the (quantum) mutual information, although the above reasoning shows that it is not really necessary. Generally, if we have a state on a set of parties $N$, then for each disjoint triple $\alpha,\beta,\gamma\subseteq N$ we obtain a different \emph{instance} of SSA. A function which satisfies all instances of \eqref{SSA} is called \emph{submodular} \cite{Oxley}. Weak monotonicity has similarly as special instances \begin{align} S(AB)+S(A)-S(B) &\geq 0, \label{triangle}\\ S(A) &\geq 0, \label{pos} \end{align} the first known as \emph{triangle inequality}; they are obtained from \eqref{weakmono} by making $\hilb{C}$ trivial, and both $\hilb{B}$ and $\hilb{C}$ trivial, respectively. More generally, an entropy inequality is an expression of the form \begin{equation}\label{ineqdef} L(X_1,\ldots,X_k) = \sum_{\omega\in\powset{K}}\chi_\omega S(X_\omega) \geq 0, \end{equation} for some $k\in \mathbb{N}$ where $K=\{1,\ldots,k\}$, $\chi_\omega\in\real$ and $X_\omega=\bigcup_{i\in\omega}X_i$. An \emph{instance} of the inequality is the expression obtained by fixing a ground set of parties, $N$, and substituting $X_1,\ldots,X_k$ for $k$ disjoint subsets of $N$ in \eqref{ineqdef}. We then say that the inequality $\tilde{L}\geq 0$ is \emph{independent} of the inequalities $L_1,\ldots,L_m \geq 0$ whenever some instance of $\tilde{L}$ cannot be written as a positive linear combination of instances of $L_1,\ldots,L_m$. In section \ref{independence} we will prove that a family of \emph{constrained} inequalities are independent of each other, and of the basic inequalities. A constrained inequality can be thought of in the same way as above, but it is required to hold only when the constraints, $C_i$, are equal to zero: \begin{equation} C_i(X_1,\ldots,X_k) = \sum_{\omega\in\powset{K}} \eta_\omega^{(i)} S(X_\omega) = 0, \end{equation} for all $i$. We say that the inequality $\tilde{L}\geq0$ (with constraints $\{C_i\}$) is independent of the inequalities $L_1,\ldots,L_m\geq0$ (each with some subset of $\{C_i\}$ as constraints) whenever some instance of $\tilde{L}$ cannot be written as a positive linear combination of instances of $L_1,\ldots,L_m$ plus an arbitrary linear combination of the $C_i$. (Here we only consider instances of $L_1,\ldots,L_m$ with constraints matching the particular instance of $\tilde{L}$.) A slight caveat to this definition of independence is provided by the following observation. Notice that one instance of \eqref{SSA} applied to a purification $\psi_{ABCD}$ of the state $\rho_{ABC}$ is \begin{equation} -S(B)+S(AB)+S(BD)-S(ABD) \geq 0. \end{equation} Using the property of pure states that $S(J)_\psi=S(J^c)_\psi$, where $J^c = N\setminus J$ is the complement of $J$ in $N$, we can eliminate $D$ from this inequality to obtain \begin{equation} -S(B)-S(C)+S(AB)+S(AC) \geq 0, \end{equation} and so we have deduced \eqref{weakmono}. However, in the sense defined above, the inequalities \eqref{SSA} and \eqref{weakmono} are independent. \section{Main Result}\label{result} Our main result is the following theorem, whose analogue was proved in \cite{Mak02} for the Shannon entropy: \begin{thm} \label{main} Let $\rho$ be a multipartite quantum state on parties $\{A,B,C,X_1,\ldots,X_n\}$ which satisfies the constraints: \begin{equation} I(A:C|B)_{\rho}=I(B:C|A)_{\rho}=0. \end{equation} Then the following inequality holds: \begin{equation} S(X_1\ldots X_n)_\rho+(n-1)I(AB:C)_\rho \leq \sum_{i=1}^n S(X_i)_\rho+\sum_{i=1}^n I(A:B|X_i)_\rho. \end{equation} \end{thm} Before commencing the proof, we state a result from \cite{HJPW04} which will be crucial. \begin{prop} A state $\rho_{ABC}$ on $\hilb{A}\otimes\hilb{B}\otimes\hilb{C}$ satisifies $I(A:C|B)_\rho=0$ if and only if there is a decomposition of system $B$ as \begin{equation} \hilb{B}=\bigoplus_{j}\hilb{b_j^L}\otimes\hilb{b_j^R} \end{equation} into a direct sum of tensor products, such that \begin{equation} \rho_{ABC}=\bigoplus_j q_j\rho_{Ab_j^L}^{(j)}\otimes\rho_{b_j^RC}^{(j)}, \end{equation} with states $\rho_{Ab_j^L}^{(j)}$ on $\hilb{A}\otimes\hilb{b_j^L}$ and $\rho_{b_j^RC}^{(j)}$ on $\hilb{b_j^R}\otimes\hilb{C}$, and a probability distribution $\{q_j\}$. \end{prop} In \cite{LW05}, using this result, two of the present authors derived the general structure of a state $\rho_{ABC}$ which saturates two separate instances of \eqref{SSA} simultaneously, exactly the constraints of Theorem \ref{main}. They found that such a state must have the form \begin{equation} \rho_{ABC}=\bigotimes_{i,j} p_{ij} \sigma_{a_i^L}^{(i)}\otimes\sigma_{a_i^Rb_j^L}^{(ij)} \otimes\sigma_{b_j^R}^{(j)}\otimes\sigma_C^{(k)} \end{equation} where, importantly, $k$ is a function only of $i$ and only of $j$, in the sense that \begin{equation} k = k(i,j) = k_1(i) = k_2(j) \quad\text{ whenever }\quad p_{ij} > 0. \end{equation} In particular, $k$ need only be only defined where $p_{ij}>0$ so that it is not necessarily constant. By collecting the terms of equivalent $k$ we can write \begin{equation} \label{structure} \rho_{ABC}=\bigoplus_k p_k \sigma_{AB}^{(k)}\otimes\sigma_C^{(k)}, \end{equation} where $p_k\sigma_{AB}^{(k)} = \sum_{i,j:k(i,j)=k}p_{ij}\sigma_{a_i^L}^{(i)}\otimes\sigma_{a_i^Rb_j^L}^{(ij)} \otimes\sigma_{b_j^R}^{(j)}$. We are now ready to proceed with the proof of the main theorem. \begin{proof}[Proof of Theorem 1] Since $I(A:C|B)_\rho=I(B:C|A)_\rho=0$, from the argument above we know that $\rho_{ABC}$ has the form \eqref{structure}. The key ideas of the proof are the following three steps: \begin{enumerate}[(1)] \item ``Measure the value of $k$'' locally at $A$, without disturbing the state $\rho_{ABC}$, storing the result of this measurement in a classical register, $R$. \item Observe that the entropies of our new $(n+4)$-party state, $\sigma$, satisfy many desirable properties, which allow us to derive new inequalities for $\sigma$ by methods analagous to \cite{Mak02}. \item Relate these inequalities for $\sigma$ back to inequalities for $\rho$, using the fact that the measurement left many of the entropies unchanged. \end{enumerate} More precisely, we define $\hilb{A}^{(k)}=\bigoplus_{j:k_1(j)=k}\hilb{a_j^L}\otimes\hilb{a_j^R}$ and $P_k$ to be the projection operator onto $\hilb{A}^{(k)}$. We then perform the local projective measurement at A defined by the projectors $\{P_k\}$ and, conditional upon obtaining measurement outcome $i$, we prepare the state $\ket{i}\bra{i}$ in an ancilla, $R$, where $\{\ket{i}\}$ form an orthonormal basis of $\hilb{R}$. We then forget the value of $i$. Let $p_k$ be the probability that outcome $k$ is obtained, and write $\sigma^{(k)}$ for the state on $ABCX_1\ldots X_n$ in this event. Then we can express the overall state of the system as: \begin{equation}\label{sigmastructure} \sigma=\sum_{k=1}^K p_k\sigma^{(k)} \otimes \ket{k}\bra{k}_R. \end{equation} From \eqref{structure}, and since the subspaces $\hilb{A}^{(k)}$ are orthogonal, it is clear that the reduced state $\sigma_{ABC}^{(k)}$ is equal to the state $\sigma_{AB}^{(k)}\otimes\sigma_C^{(k)}$ as defined previously, so that the $\sigma_{AB}^{(k)}$ and $\sigma_C^{(k)}$ of \eqref{structure} are indeed the appropriate reduced states of $\sigma^{(k)}$. It is also clear that the use of $p_k$ in \eqref{sigmastructure} is consistent with that in \eqref{structure}. This implies that $\sigma_{ABC}=\sum_{k=1}^Kp_k\sigma_{AB}^{(k)}\otimes\sigma_C^{(k)}=\rho_{ABC}$. We now write $N=\{A,B,C,X_1,\ldots,X_n\}$ and observe that $\sigma$ exhibits the following properties: \begin{enumerate}[(i)] \item $S(RA)_\sigma-S(A)_\sigma=:S(R|A)_\sigma=S(R|B)_\sigma=0$; \item For $J\subseteq N$ we have $S(JR)_\sigma\geq S(J)_\sigma$; \hfill(R-monotonicity) \item $S(R)_\sigma\geq I(AB:C)_\sigma$. \end{enumerate} To see (ii) notice that the structure of $\sigma$ given in equation \eqref{sigmastructure} implies, for any $J\subseteq N$, \begin{equation} S(RJ)_\sigma=H(\boldvec{p})+\sum_{k=1}^K p_kS(J)_{\sigma^{(k)}}\geq S(J)_\sigma, \end{equation} where $H$ is the Shannon entropy, $\boldvec{p}=(p_1,\ldots,p_K)$, and the inequality follows from \cite[Thm. 11.10]{NC00}. If $A\in J$ or $B\in J$ then the inequality becomes an equality since the $\sigma_J^{(k)}$ are supported on orthogonal subspaces. This proves (i). Finally, (iii) follows from \begin{equation} S(ABC)_\sigma-S(AB)_\sigma=\sum_{k=1}^Kp_kS(C)_{\sigma^{(k)}}\geq S(C)_\sigma-H(\boldvec{p}), \end{equation} and the fact that $S(R)_\sigma=H(\boldvec{p})$. Using these properties we follow an argument similar to that used in \cite{Mak02} for classical entropies. Notice the following chain of inequalities: \begin{equation}\begin{split} S(R|X_i)_\sigma&=S(RX_i)_\sigma-S(X_i)_\sigma+S(ABX_i)_\sigma-S(ABX_i)_\sigma \\ &\leq S(RX_i)_\sigma+S(RABX_i)_\sigma-S(X_i)_\sigma-S(ABX_i)_\sigma \\ &\leq S(RAX_i)_\sigma+S(RBX_i)_\sigma-S(X_i)_\sigma-S(ABX_i)_\sigma \\ &=S(R|AX_i)_\sigma+S(R|BX_i)_\sigma+I(A:B|X_i)_\sigma \\ &\leq S(R|A)_\sigma+S(R|B)_\sigma+I(A:B|X_i)_\sigma \\ &= I(A:B|X_i)_\sigma. \end{split}\end{equation} In the second line we used R-monotonicity, in the third and fifth lines we used strong subadditivity and for the final equality we used property (i). This implies \begin{equation}\begin{split} S(X_i|R)_\sigma+S(R)_\sigma &= S(X_i)_\sigma+S(R|X_i)_\sigma \\ &\leq S(X_i)_\sigma+I(A:B|X_i)_\sigma. \end{split}\end{equation} Summing over all $i$ we obtain \begin{equation} \label{dagger} \sum_{i=1}^n S(X_i|R)_\sigma + nS(R)_\sigma \leq \sum_{i=1}^n S(X_i)_\sigma + \sum_{i=1}^n I(A:B|X_i)_\sigma. \end{equation} Our aim here is to form inequalities for $\sigma$ which can be related back to $\rho$, and so we need to eliminate system $R$. To this end, observe that \begin{equation} S(X_1\ldots X_n)_\sigma\leq S(RX_1\ldots X_n)_\sigma\leq S(RX_1\ldots X_{n-1})_\sigma+S(X_n|R)_\sigma, \end{equation} by R-monotonicity and SSA. Applying the second inequality here inductively we obtain \begin{equation} S(X_1\ldots X_n)_\sigma \leq \sum_{i=1}^n S(X_i|R)_\sigma + S(R)_\sigma, \end{equation} which we can substitute into \eqref{dagger} to give \begin{equation} S(X_1\ldots X_n)_\sigma + (n-1)S(R)_\sigma \leq \sum_{i=1}^n S(X_i)_\sigma + \sum_{i=1}^n I(A:B|X_i)_\sigma. \end{equation} Finally, applying property (iii) yields \begin{equation} S(X_1\ldots X_n)_\sigma + (n-1)I(AB:C)_\sigma \leq \sum_{i=1}^n S(X_i)_\sigma + \sum_{i=1}^n I(A:B|X_i)_\sigma. \end{equation} We have shown that the new inequalities holds for the state $\sigma$, the state after the measurement, but it remains to prove that they hold for the general state $\rho$. However, this is straightforward. Indeed, since the measurement was local at $A$ it did not alter the state on $X_1\ldots X_n$ and hence $S(X_1\ldots X_n)_\sigma=S(X_1\ldots X_n)_\rho$ and $S(X_i)_\sigma=S(X_i)_\rho$ for all $i$. Likewise, since the measurement did not affect the state on $ABC$ we must have $I(AB:C)_\sigma=I(AB:C)_\rho$. Finally, since the conditional mutual information is monotone decreasing under local maps, we must have $I(A:B|X_i)_\sigma\leq I(A:B|X_i)_\rho$. Putting all this together we obtain the result: \begin{equation} S(X_1\ldots X_n)_\rho+(n-1)I(AB:C)_\rho\leq \sum_{i=1}^n S(X_i)_\rho+\sum_{i=1}^n I(A:B|X_i)_\rho, \end{equation} as advertised. \end{proof} \section{Independence of the Inequalities}\label{independence} In the previous section we proved that certain constrained inequalities hold, however, we have not yet justified why this result is interesting. Let $C_n$ denote the constrained inequality of Theorem \ref{main} for a given value of $n$. If we simplify $C_1$ then most of the terms cancel and we are left with the inequality: \begin{equation} I(A:B|X_1)\geq 0, \end{equation} which is simply one of the basic inequalities. With this in mind, one may suspect that we have not proved anything new. Let $n\geq 2$ be an arbitrary, but fixed, integer. We aim to show that $C_n$ is independent of the basic inequalities and of $\{C_p\}_{p\neq n}$. To do this we must show that $C_n$ cannot be written as a positive linear combination of instances of $\{C_p\}_{p\neq n}$ and basic inequalities, together with a negative linear combination of the constraints. Let $N=\{a,b,c,x_1,\ldots,x_n\}$. Our approach will be to find a function $g:\powset{N}\rightarrow\real$ which is submodular, monotonic, satisfies the constraints $g(a:c|b)=g(b:c|a)=0$ and satisfies all instances of $\{C_p\}_{p\neq n}$, but which violates $C_n$. Here we use the notation $h(\alpha:\beta|\gamma)=-h(\gamma)+h(\alpha\cup\gamma)+h(\beta\cup\gamma)-h(\alpha\cup\beta\cup\gamma)$ for any function $h:\powset{N}\rightarrow \real$ where $\alpha,\beta,\gamma$ are disjoint subsets of $N$. Notice that in section \ref{result} we considered $C_p$ only as an inequality on $p+3$ parties. However, following the argument of section \ref{notation} we see that, for any $p$, there are instances of $C_p$ on $N$. This is because we can always choose $p+3$ disjoint subsets of $N$, though, of course, for $p>n$ some of these are necessarily empty. We begin by introducing some notation. We keep $n\geq 2$ fixed, and let $N_1=\{x_1,\ldots,x_n\}$ and $N_2=\{a,b,c\}$ so that $N=N_1\ensuremath{\mathaccent\cdot\cup} N_2$. (In this section we will use lower case roman letters to represent singletons, and capital roman letters or greek letters to represent subsets of $N$). It is clear that each subset $M\subseteq N$ has a unique decomposition $M=J\ensuremath{\mathaccent\cdot\cup} K$ (we will usually write $M=JK$) with $J\subseteq N_1$ and $K\subseteq N_2$. We can then define a function $f:\powset{N}\rightarrow \real$ by \begin{equation} f(JK)=\theta_K + |J|\lambda_K -\mu_{JK}, \end{equation} for some constants $\theta_K,\lambda_K,\mu_{JK}\in\real$, where $|J|$ is the size of the set $J$. The particular values of $\theta$ and $\lambda$ are as follows. \begin{align} \theta &= \left(\begin{array}{ccc} & \theta_{abc} & \\ \theta_{ab} & \theta_{ac} & \theta_{bc} \\ \theta_a & \theta_b & \theta_c \\ &\theta_\emptyset& \end{array}\right) \nonumber\\ &:= (n+1)\left(\begin{array}{ccc} & 2n^3+8n^2+4n-1 & \\ 2n^3+8n^2+4n-1 & 2n^3+5n^2+2n & 6n^2+4n-1 \\ 2n^3+5n^2 & 4n^2+2n-1 & 3n^2+n \\ & 0 & \end{array}\right), \\ \lambda &:= - \left(\begin{array}{ccc} & 2n^3+8n^2+4n-1 & \\ 2n^3+8n^2+4n-1 & 2n^3+5n^2+2n & 6n^2+4n-1\\ 2n^3+5n^2+2n & 4n^2+2n-1 & 4n^2+2n-1\\ & n^2 & \end{array} \right), \label{lambdadef} \end{align} and the only non-zero values of $\mu_{JK}$ are \begin{align} \mu_a &= 2n^2(n+1), \\ \mu_{ab} &= 2n(n+1)^2. \end{align} \begin{prop} f is a submodular function. \end{prop} \begin{proof} In \cite{Pip03} it is shown that a function $f$ is submodular if and only if all expressions of the form $f(i:j|\alpha)$ are nonnegative, where $i,j\in N$ distinct elements, and $\alpha\subseteq N\setminus\{i,j\}$. By considering whether $i$ and $j$ belong to $N_1$ or $N_2$, and setting $\alpha=JK$, we arrive at three different kinds of expression: \begin{enumerate}[(i)] \item $f(r:s|JK)=\theta(r:s|K)+|J|\lambda(r:s|K)-\mu(r:s|JK)$; \item $f(r:x_1|JK)=\lambda_K-\lambda_{rK}-\mu(r:x_1|JK)$; \item $f(x_1:x_2|JK)=-\mu(x_1:x_2|JK)$; \end{enumerate} where $x_1,x_2\in N_1$, $r,s\in N_2$ are distinct, but otherwise arbitrary, and, for example, $\theta(a:b|c)=-\theta_c+\theta_{ac}+\theta_{bc}-\theta_{abc}$. By direct computation we find \begin{align} \theta(a:b|\emptyset) &= n(n+1)(n-2) &\qquad \lambda(a:b|\emptyset) &= 0, \nonumber\\ \theta(a:c|\emptyset) &= n(n+1)(3n-1) & \lambda(a:c|\emptyset) &= -(n+1)(3n-1), \nonumber\\ \theta(b:c|\emptyset) &= n(n+1)(n-1) & \lambda(b:c|\emptyset) &= -(n+1)(n-1), \nonumber\\ \label{theta} \theta(a:b|c)& =n(n+1) & \lambda(a:b|c) &= (n+1)(n-1), \\ \theta(a:c|b)& =2n(n+1)^2 & \lambda(a:c|b) &= -2n(n+1), \nonumber\\ \theta(b:c|a)& =2n(n+1) & \lambda(b:c|a) &= 0, \nonumber \end{align} and that the only non-zero values of $\mu(i:j|\alpha)$ are \begin{align} \mu(b:c|a)=\mu(b:x_1|a)=-\mu(a:b|\emptyset) &=2n(n+1), \\ \mu(a:c|\emptyset)=\mu(a:x_1|\emptyset)=-\mu(c:x_1|a)=-\mu(x_1:x_2|a) &=2n^2(n+1), \\ \mu(a:c|b)=\mu(a:x_1|b)=-\mu(x_1:x_2|ab)=-\mu(c:x_1|ab) &=2n(n+1)^2. \end{align} We now demonstrate in turn the non-negativity of the three different types of expressions, (i), (ii) and (iii). \begin{enumerate}[(i)] \item From \eqref{theta} it follows that always $\theta(r:s|K)+|J|\lambda(r:s|K)\geq0$, so it only remains to check those cases with $\mu(r:s|JK)>0$. We find that $f(b:c|a)=f(a:c|b)=0$, and that $f(a:c|\emptyset)=n(n+1)(n-1)>0$ since $n\geq 2$. \item From \eqref{lambdadef} we can check that always $\lambda_K-\lambda_{rK}\geq0$, so that we only have to consider cases with $\mu(r:x_1|JK)>0$. We find that $f(b:x_1|a)=(n+1)(n-1)$, $f(a:x_1|b)=0$ and $f(a:x_1|\emptyset)=2n(n+1)$. \item Notice that all expressions of the form $\mu(x_1:x_2|JK)$ are non-positive. \end{enumerate} This concludes the proof. \end{proof} \begin{prop} For all integer $p\geq2$, $f$ satisfies (all instances of) $C_p$, except for $C_n$, which $f$ violates: to be precise, it violates the ``standard'' instance on $n+3$ parties. \end{prop} \begin{proof} First notice that $f$ satisfies the constraints $f(b:c|a)=f(a:c|b)=0$. Since $C_p$ are constrained inequalities, we now fix the constrained parties, and so consider instances of $C_p$ of the form \begin{equation} L := \sum_{i=1}^pf(a:b|\alpha_i)-(p-1)f(ab:c|\emptyset)+\sum_{i=1}^pf(\alpha_i)-f(\alpha_1\ldots\alpha_p), \end{equation} where $\alpha_1,\ldots,\alpha_p$ are disjoint subsets of $N_1$. Observe the following \begin{align} f(ab:c|\emptyset) &= n(n+1)(n-1), \\ f(a:b|\emptyset) &= n^2(n+1), \\ f(a:b|\alpha) &= n(n+1)(n-2) \quad\text{ for } \alpha\neq\emptyset. \end{align} We must now check that for all choices of $p$ and $\alpha_1,\ldots,\alpha_p$, $C_p$ is satisfied, i.e.~$L\geq 0$, except sometimes when $p=n$. First notice that always $\sum_{i=1}^m f(\alpha_i) - f(\alpha_1\ldots\alpha_m)=0$ and hence \begin{equation} L = pn(n+1)(n-2)-(p-1)n(n+1)(n-1)+2\Delta n(n+1), \end{equation} where $\Delta$ denotes the number of empty sets among the $\alpha_i$. Rearranging we obtain \begin{equation} L = n(n+1)(n-p-1+2\Delta), \end{equation} which is negative precisely when $p>n-1+2\Delta$. This certainly means that we must have $p\geq n$ in order for $f$ to violate $C_p$. Let $p=n+k$. For a violation, we require that $k+1>2\Delta$, however, we must have at least $k$ empty $\alpha_i$. Therefore, our condition becomes $k+1>2k$ and the only violation of $C_p$ occurs when $k=0$, $p=n$ and $\Delta=0$. \end{proof} For any $n\geq2$ we have found a submodular function $f$ which violates $C_n$ and satisfies $C_p$ for all $p\neq n$. However, we do not know that $f$ satisfies all the basic inequalities, as it could still violate weak monotonicity. However, all the inequalities we have dealt with so far satisfy the following property: for all $i\in N$ \begin{equation} \sum_{\omega\in\powset{N}:i\in\omega}\chi_\omega=0, \end{equation} where the constants $\chi_\omega$ are as defined in \eqref{ineqdef}. Inequalities with this property are called \emph{balanced} \cite{Cha03}. This allows us to define a new function $g:\powset{N}\rightarrow\real$ by \begin{equation} g(M)=f(M)+\sum_{i\in M} c_i, \end{equation} for some constants $c_i\in\real$. Notice that for any balanced expression the terms involving $c_i$ will cancel, and so $g$ will take the same value as $f$. This means that for all possible values of $c_i$, $g$ will still be submodular, and will still violate $C_n$ and satisfy $C_p$ for $p\neq n$. In particular, if we choose $c_i=\max_{\alpha\subseteq\beta}\{f(\alpha)-f(\beta)\}$ for all $i$ then $g$ will be monotonic, and so certainly satisfy weak monotonicity. (Note that the $c_i$ are chosen such that if $f$ is already monotonic, then $f=g$.) The above reasoning in particular shows that the classical inequalities of Makarychev \emph{et al.} \cite{Mak02} are independent, a fact that was not shown in the original paper. \section{Variants of the Inequalities} \label{variations} In this section we present some alternate forms of the inequalities proved in section \ref{result}. Firstly, we can consider Theorem \ref{main} when $\rho$ is a pure state on parties $A,B,C,X_1,\ldots,X_{n+1}$. Then, since $S(J)_\rho=S(J^c)_\rho$, we can eliminate system $X_{n+1}$ from the inequality, and obtain the following theorem. \begin{thm1'} Let $\rho$ be a multipartite quantum state on parties $\{A,B,C,X_1,\ldots,X_n\}$ which satisfies the constraints: \begin{equation} \label{constraints} I(A:C|B)_{\rho}=I(B:C|A)_{\rho}=0. \end{equation} Then, \begin{equation}\begin{split} \sum_{i=1}^nI(A:B|X_i)_\rho &+ \sum_{i=1}^nS(X_i)_\rho+I(A:B|CX_1\ldots X_n)_\rho \\ &+ S(ABCX_1\ldots X_n)_\rho-S(ABC)_\rho-nI(AB:C)_\rho \geq 0. \end{split}\end{equation} \end{thm1'} There is a further transformation we can make to Theorem \ref{main}. Looking through the proof, at all stages we can allow $C$ to play the role of one of the $X_i$. If we do this we obtain another family of constrained inequalities, and their purified versions. \begin{thm} Let $\rho$ be a multipartite quantum state on parties $\{A,B,C,X_1,\ldots,X_n\}$ which satisfies \eqref{constraints}. Then: \begin{equation}\begin{split} \sum_{i=1}^nI(A:B|X_i)_\rho+\sum_{i=1}^nS(X_i)_\rho &+ I(A:B|C)_\rho+S(C)_\rho\\ &- S(CX_1\ldots X_n)_\rho-nI(AB:C)_\rho \geq 0. \end{split}\end{equation} \end{thm} \begin{thm2'} Let $\rho$ be a multipartite quantum state on parties $\{A,B,C,X_1,\ldots,X_n\}$ which satisfies \eqref{constraints}. Then: \begin{equation}\begin{split} \sum_{i=1}^nI(A:B|X_i)_\rho+\sum_{i=1}^nS & (X_i)_\rho+I(A:B|CX_1\ldots X_n)_\rho+S(ABCX_1\ldots X_n)_\rho\\ &+ I(A:B|C)_\rho+S(C)_\rho-S(AB)_\rho-(n+1)I(AB:C)_\rho \geq 0. \end{split}\end{equation} \end{thm2'} Previously, the only known constrained inequality for the von Neumann entropy was found in \cite{LW05}. There it was shown that any $4$-party quantum state satisfying the constraints $I(A:C|B)=I(B:C|A)=I(A:B|D)=0$ also satisfies the inequality $I(C:D)\geq I(AB:C)$. Choosing $n=1$ in the forms of the theorem above, we obtain three new four-party constrained inequalities. Since our inequalities use two of the same constraints as \cite{LW05}, but not the other, we might expect them to be strictly stronger, meaning we could rederive the previous result. Curiously, however, this turns out not to be the case. Indeed, consider the function $e:\powset{ABCD}\rightarrow\real$ given in the following table, which satisfies the basic inequalities: \bigskip\noindent \begin{center} {\begin{tabular}{c||c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c} &\ $\emptyset$~&\ A~&\ B~&\ C~&\ D~&\ AB&\ AC&\ AD&\ BC&\ BD&\ CD&ABC&ABD&ACD&BCD&ABCD\\ \hline\hline $e$&0&5&5&2&4&6&5&5&5&5&6&6&6&5&5&4\\ \end{tabular}} \end{center} \bigskip\noindent We can easily check that $e$ satisfies all the constraints of the old inequality, however, $e(C:D|\emptyset)-e(AB:C|\emptyset)=-2<0$. Therefore, $e$ violates the old inequality. On the other hand, each of our new four party constrained inequalities is satisfied by $e$. Hence the old inequality is independent of the new ones. \section{Discussion} \label{discussion} We have shown that an infinite family of independent inequalities hold for the von Neumann entropy. However, since these inequalities are constrained they only reveal information about the boundary of $\Sigma_n^*$, and so they are still not enough to conclude that $\overline{\Sigma_n^*}\subsetneq\Sigma_n$. Towards this end, we note that the inequalities we proved are the same as those proved in section 3 of \cite{Mak02} for the Shannon entropy, and that our proof follows a similar outline. In \cite{Mak02} a similar family of \emph{unconstrained} inequalities for the Shannon entropy are also proved, using a method which, in some sense, generalises the constrained proof. It may be possible that this proof can be generalised to apply to the von Neumann entropy, however, it seems as though some new tools would have to be developed first. In any case, we believe it possible that a deeper connection between classical and quantum entropy inequalities exists. We have tested many of the new non-Shannon type inequalities on quantum states, using a numerical optimisation program, and have not been able to find a single violation (although limits on processing power restrict us to Hilbert spaces with small local dimensions). We also note that all these new inequalities are balanced, and that the only entropy inequality which is known to be true in the classical but not in the quantum case, is monotonicity -- which is unbalanced. We therefore are led to speculate whether, in fact, \emph{all} balanced inequalities that hold for the Shannon entropy also hold for the von Neumann entropy. \acknowledgments We thank Beth Ruskai and Franti\v{s}ek Mat\'{u}\v{s} for discussions on information inequalities. The authors acknowledge support by the U.K.~EPSRC, the European Commission (STREP project ``QCS''), the ERC (Advanced Grant ``IRQUAT''), the Royal Society and the Philip Leverhulme Trust. \bibliographystyle{unsrt}
1,116,691,501,331
arxiv
\section{Introduction} Particles floating on water and stirred by turbulence tend to segregate into string-like clusters~\cite{bib:cressman2004_NJP, bib:larkin2009_PRE,bib:lovecchio2013_PRE,bib:lovecchio2014} as they disperse. Similarly, a passive scalar (let say the concentration field of a given substance) stirred on a turbulent interface is also found to exhibit pronounced persistent heterogeneities~\cite{bib:eckhardt2001_PRE}, never reaching a perfectly mixed state. Such a \textit{turbulent interfacial unmixing} can have dramatic consequences in environmental, geophysical and industrial flows. It is for instance responsible for the accumulation of pollutants (such as micro-plastic debris) floating at the surface of the oceans \cite{bib:lebreton2012_MarinePollutionBull,bib:eriksen2014_PlosOne}, and is probably also to be associated to coastal mixing fronts~\cite{bib:carrillo2001_PhysChemEarth} as well as to the dynamics of phyto-plankton blooms and patchiness~\cite{bib:piontkovski1997,bib:abraham2000_Nature}. In this respect, clustering of floaters on a turbulent interface contrasts with the usual intuition that turbulence always enhances the mixing of tracer particles and passive scalars, with an effective turbulent diffusivity orders of magnitude larger than the simple molecular diffusivity, classically leading to a rapid homogenization of stirred substances. This is all the more true that such a clustering is observed regardless of any attractive interactions between floaters, such as the so-called \textit{cheerios effect}~\cite{Vella:2005hi}. Indeed, such counter-intuitive response to turbulent stirring is known to occur when there exist sources of compressibility in the system. This has been long recognized for the transport of inertial particles, with the so-called preferential concentration phenomenon~\cite{bib:monchaux2012_IJMF} associated to the effective compressibility that arises from inertia-induced departure between particles' velocity and stirring -- incompressible-- flow velocity~\cite{bib:maxey1987}). In the case of floaters, the compressibility source responsible for the persistent clustering does not rely on additional physical effects as above, but is readily provided by the flow. Indeed the 2D interfacial turbulent field of relevance is a priori compressible due to the presence of upwelling regions (acting as divergent zones or sources) and downwelling regions (acting as convergent zones or sinks), even in the case where the 3D stirring turbulence underneath the interface is itself incompressible~\cite{bib:eckhardt2001_PRE, bib:larkin2009_PRE,bib:afonso2009_JPA,bib:lovecchio2013_PRE}. This results in a final state which is statistically stationary (though out-of-equilibrium) where on one hand turbulent mixing tends to homogenize the particles spatial distribution while compressibility effects sustain the non-uniformities with dense regions at the convergent zones and depleted regions at the divergent zones. For both the turbulent clustering of floaters and the one of inertial particles the effective compressibility is related to some underlying coupling to the overall turbulent mixing process. In recent studies~\cite{bib:Volketal2014,bib:Maugeretal2016,bib:shukla2017_NJP,bib:raynaletal2018,bib:raynal_volk2019} our groups have shown that similar compressible effects on the transport and mixing properties of particles can be induced by the particle response -- a so-called phoretic drift -- to environmental field gradients. This \textit{environment sensing} strategy can be built from various phoretic phenomena: diffusiophoresis (drift induced by chemical concentration gradients), thermophoresis (drift induced by thermal gradients), electrophoresis (drift induced by electric field gradients), etc. Experiments, simulations and analytical models~\cite{bib:Volketal2014,bib:Maugeretal2016,bib:shukla2017_NJP,bib:raynaletal2018, bib:raynal_volk2019} show that such phoretic particles acquire an effectively compressible dynamics, even though the underlying flow is perfectly incompressible. Active (self-propelled) particles, which can be seen as an extreme case of phoretic particles (with self-generated gradients driving their drift), have also been reported to exhibit clustering when stirred by incompressible chaotic or turbulent flows~\cite{bib:khurana2011_PRL, bib:durham2011_PRL}. In this article, we propose to experimentally investigate a new original configuration of interfacial mixing, where small floating particles are stirred by active interfacial particles, in the absence of any other underlying forced flow. More precisely, we consider the mixing of a patch of micrometric hollow glass spheres floating on water, stirred by millimetric interfacial active camphor disks~\cite{bib:bourgoin_etal2020}. The motivation of exploring this configuration is two-folds: (i) in a recent study~\cite{bib:bourgoin_etal2020} we have shown that the dynamics of such active disks mimics very accurately the statistical multi-scale properties of homogeneous isotropic turbulence (in particular a Kolmogorov like spectrum has been found), although the underlying flow itself remains almost at rest; it is therefore tempting to investigate the mixing induced when such active disks are used as stirrers. (ii) The self-motility of these camphor disks is driven by self generated surface tension gradients (related to a symmetry breaking of the dissolution of camphor in water) \cite{Nakata:2015ki}; one may therefore expect that the mixed micro-particles experience some additional phoretic drift (due to the surface tension gradients) eventually leading to an effective compressibility behaviour, as previously described. As a consequence, although no actual fluid turbulence (neither compressible nor incompressible) is present in the system, the proposed configuration shares qualitative properties with interfacial turbulent mixing: a stirring mechanism (active camphor particles) with turbulent-like statistical features and a source of possible compressible effects (related to the presence of surface tension gradients). We explore the global and multi-scale mixing properties when a small patch of glass bubbles is released at the free surface and stirred by the active camphor disks. We observe, that similarly to interfacial turbulent mixing, a final non-uniform steady state is reached for the concentration field of the micro-particles, with the existence of densely seeded regions and depleted regions (in the trail of the active camphor stirrers). A spectral analysis of the concentration field reveals striking quantitative analogies with the turbulent mixing of a passive scalar. Finally a close investigation of the dynamics in the vicinity of individual stirrers confirms the role played by compressible effects induced by Marangoni flows driven by tension surface gradients in the chemical wake of the active camphor stirrers. \section{Materials and Methods} \subsection{Experimental setup} \begin{figure} \centering \includegraphics{figure1.pdf} \caption{(a) Experimental setup: water tank filled with \SI{1}{cm} millipore water. At the surface, $N$ interfacial swimmers (camphor disks). At the beginning of the experiment, a patch of passive floaters (\SI[parse-numbers = false]{40 \pm 2}{mg} of glass bubbles) is introduced in the system near the center of the bath. A LED plate emits light from above and a camera acquires light transmitted through the setup. (b) Typical image recorded after ten minutes for $N=15$ swimmers with radius $R=$\SI{2.5}{mm}.} \label{fig:setup} \end{figure} Camphor disk swimmers are made by punching an agarose sheet (\SI{0.5}{mm} in thickness) filled with solid camphor grains \cite{boniface2019self, Nakata:2015ki, bib:bourgoin_etal2020}. Depending on the size of the puncher, the radius $R$ of the camphor disks considered hereafter ranges from 1 to \SI{4}{mm}. A number $N$ (ranging from 7 to 45 \footnote{For small number of camphor disks (typically $N\le5$), swimmers have a tendency to follow the side of the container, and mixing is not efficient enough. In particular it is not possible to access to the averaged concentration field around a swimmer such as shown in figure \ref{fig:concentration}, because the glass bubbles do not cover enough domain. This is why we decided to begin with a minimum number of swimmers equal to 7.}) of freshly made swimmers is deposited at the surface of a \SI{1}{cm}-thick water subphase (Elga PureLab Flex ultra-pure water) in a circular glass cell (Fig. \ref{fig:setup}a). All around the cell, a thin floating plastic ring is placed at the edge, that cancels the capillary meniscus, avoiding the trapping of floaters or swimmers; the available remaining free surface has a diameter of $\SI{18}{\centi\metre}$. Prior to each new experiment, the subphase is renewed by fresh water. Once deposited on water, swimmers begin to release camphor into the fluid which results in surface tension heterogeneities in their vicinity that drive the so-called Marangoni flows \cite{bib:Nishimori_jpsj-2017,bib:leRouxetal2016}. Despite their circular shape, a spontaneous symmetry breaking occurs immediately, resulting in their propulsion in an arbitrary direction, at a typical velocity $U\sim \SI{6}{cm.s^{-1}}$ for a single swimmer of radius $R=\SI{2.5}{mm}$ \cite{boniface2019self}. This swimming velocity depends not only on the swimmers radius and physico-chemical parameters, but also on the number of swimmers as they interact altogether. As soon as enough swimmers are present, the system starts to exhibit spatio-temporal fluctuations \cite{bib:bourgoin_etal2020}, so that their velocity is better characterized using the root mean square $u_\mathit{rms}=\left(\langle u^2_x \rangle + \langle u^2_y\rangle\right)^{1/2}$. Table \ref{table1} summarizes the values of $u_\mathit{rms}$ together with the corresponding Reynolds numbers, for the cases considered in our experiments; for a sake of comparison with other works, the percentage of surface fraction covered by the swimmers $\varphi_s=100\,N\pi R^2/A_t$, where $A_t$ is the total available area, is also given. The Reynolds number is defined here as $\mathrm{Re}_p = u_\mathrm{rms} R/\nu$, where $\nu$ is the kinematic viscosity of water, and two series of experiments have been conducted. In the first case the radius is fixed ($R=\SI{2.5}{mm}$) and the number $N$ of Marangoni swimmers is increased, resulting in a decreasing velocity. In the second case we consider a fixed number of swimmers $N=15$, with increasing radius: here the rms-velocity does not vary much, although the Reynolds number increases with increasing radius. Given the range of Reynolds numbers considered here, $Re_p \in [20,100]$, the flow of water remains perfectly laminar although the dynamics of the swimmers is fluctuating in space and time as observed in our previous study \cite{bib:bourgoin_etal2020}. \begin{table} \caption{Root mean square velocity of the swimmers $u_\mathit{rms}=\sqrt{\langle u^2_x \rangle + \langle u^2_y\rangle}$ and corresponding Reynolds number here defined as $\mathrm{Re}_p = u_\mathrm{rms} R/\nu$, where $\nu$ is the kinematic viscosity of water, for: (a) $R=\SI{2.5}{mm}$, and different numbers of swimmers $N$; (b) $N=15$, and increasing radius $R$. For all experiments we indicate $\varphi_s$, the percentage of surface fraction covered by the swimmers.} \centering \begin{tabular}{|@{\quad}l | c |c |c |c |c |c |} \multicolumn{6}{c}{$R = \SI{2.5}{\milli\meter}$}\\[.4mm] \hline $N$ & 7 & 11 & 15 & 20 & 30 & 45 \\[.4mm] \hline $\varphi_s$ & 0.54 & 0.85 & 1.16 & 1.54 & 2.32 & 3.47 \\[.4mm] \hline $u_\mathit{rms}$ (mm/s)\ \ &\ 37.1\ &\ 32.8\ &\ 26.6\ &\ 22.1\ &\ 15.8 \ &\ 11.2 \\[.4mm] \hline $\mathrm{Re}_p$ & 93 & 82 & 66 & 55 & 40 & 28 \\ \hline \end{tabular} \hfil \begin{tabular}{|@{\quad}l | c |c |c |c |c |c |c |} \multicolumn{6}{c}{$N = 15$}\\[.4mm] \hline $R$ (mm) & 1 & 1.5 & 2 & 2.5 & 3 & 3.5 & 4 \\[.4mm] \hline $\varphi_s$ & 0.19 & 0.42 & 0.74 & 1.16 & 1.67 & 2.27 & 2.96 \\[.4mm] \hline $ u_{rms}$ (mm/s)\ \ &\ 19.4\ \ &\ 26.6\ \ &\ 26.7\ \ &\ 26.6\ \ &\ 25.1\ \ &\ 24\ \ &\ 22.8 \\[.4mm] \hline $\mathrm{Re}_p$ & 19 & 40 & 53 & 66 & 75 & 84 & 91\\ \hline \end{tabular}\\ \vspace{0.1cm} \quad(a)\hskip7.4cm(b) \label{table1} \end{table} At the beginning of an experiment, a patch of passive floaters constituted of glass bubbles (see properties in \cite{glassbubbles}) is introduced in the system of interfacial swimmers near the center of the bath. The subsequent dynamics of the whole system, backlit with a LED panel, is recorded from top using a HXC Flare camera equipped with a Nikon 24-85mm f/2.8-4D IF AF NIKKOR objective, yielding images with resolution 2048x2048 px$^2$ at a rate of \SI{35}{Hz} (see fig. \ref{fig:setup}a). A typical image is shown in figure \ref{fig:setup}b where interfacial swimmers appear as dark disks and glass bubbles as grey shades on the surface. \subsection{Concentration field} In the following, $\langle Q\rangle$ denotes the average of a given quantity $Q$ over the whole circular surface. To access the local surface concentration of floaters, we quantify the light absorption at location $(x, y)$ and time $t$ by comparing the light intensity field $I(x,y,t)$ in presence of floaters to the intensity field $I_0(x,y)$ of a reference image without floaters. Assuming that such an absorption is linear with the local concentration $C(x,y,t)$ of the floaters --- a natural choice for a single layer of individual scatterers --- we define a non dimensional intensity field $\widetilde I(x, y,t)$ through the relation \begin{equation} \widetilde I(x, y,t) = \frac{I_0(x,y)-I(x,y,t)}{I_0(x,y)}. \label{calib} \end{equation} In order to support that this linear approximation gives a good indication of the local concentration of floaters, \textit{i.e.} \begin{equation} \widetilde I(x, y,t) \propto C(x, y,t)\, , \label{I_propto_C} \end{equation} we must check that $\langle\widetilde I(x,y,t)\rangle$ estimated from Eq.~(\ref{calib}) satisfies mass conservation and linearly follows the total number of glass bubbles. For a given amount of poured glass bubbles onto the surface, the total number of particles must indeed be conserved over time as the mixing process goes on (as far as the floaters do not leave the measurement area) and hence the space averaged concentration $\langle C\rangle$ must remain time independent and proportional to the initial amount of particles poured on the surface, hence so must $\langle \widetilde I\rangle$. We verified that this property is satisfied for a series of experiments performed at varying mass of glass bubbles poured on the surface with $N=15$ swimmers, for which we computed the spatial average $\langle \widetilde I \rangle(t)$ at each time step. Inset of figure \ref{fig:concentration}a shows the resulting spatial average for an initial mass $m=\SI{40}{mg}$ of glass bubbles. As expected for $\widetilde I(x, y,t) \propto C(x, y,t)$, the spatial average is indeed time-invariant. The remaining fluctuations have been quantified at large times long after any memory of the initial condition is lost (measurement window in the inset of figure \ref{fig:concentration}a). The standard deviation are of order $\sigma_{\widetilde I}=10^{-3}$ (this corresponds to relative standard deviation of around 5\,\% for the signal in inset) which serves as an estimate of the error bar of the measurement (smaller than the size of the symbols in figure \ref{fig:concentration}a). \begin{figure} \includegraphics{figure2.pdf} \caption{(a) Mean measured intensity, $\overline{\langle \widetilde I (x,y,t)\rangle}$, averaged over space and time, as a function of the poured glass bubbles mass $m$ for $N=15$ swimmers. $(+)$ measurements, $(-)$ linear fit $\overline{\langle \widetilde I \rangle} = a\times m$, with $a=5.51\times 10^{-4}\mathrm{mg}^{-1}$. The error bar is estimated as $\sigma_{\widetilde I}=10^{-3}$ by computing the standard deviation of the time series $\langle \widetilde I\rangle(t)$. The inset is a time series of the non dimensional instantaneous quantity $\langle \widetilde I\rangle(t)/\, \overline{\langle \widetilde I \rangle}$ as a function of time; the vertical dotted lines indicate our measurement window for time averaging. (b) Instantaneous non dimensional glass bubble concentration field deduced from the image Fig. \ref{fig:setup}b.} \label{fig:concentration} \end{figure} In the next section, we will show that the glass-bubble system reaches a statistically invariant stationary state after an initial stage of few tens of minutes. In the following, we thus denote by $\overline{Q}$ the time average of a given quantity, with $t\in [55, \,65]\,\mathrm{min}$ corresponding to our measurement window within this late-stage stationary regime. Figure \ref{fig:concentration}a displays the evolution of the global average concentration $\overline{\langle \widetilde I\rangle(t)}$: it is visible that this quantity evolves linearly with the poured glass bubble mass, validating the assumed proportionality between luminosity and floaters concentration so that equation (\ref{I_propto_C}) can be used to convert recorded images into a concentration field. As all moments of the concentration field are expected to scale with the mass concentration, all experiments discussed in the sequel have been performed with the same mass $m=40\pm 2$ mg of glass bubbles which was observed to be a good compromise in having a good signal to noise ratio with no risk of forming multiple layers of bubbles on the surface. Note finally that, because the equations of mixing are linear, the results do not depend on the mean concentration, so that in the following we only consider non dimensional concentration fields; in order to rub out the very small time fluctuations shown in the inset of figure \ref{fig:concentration}a, the reference chosen is $\overline{\langle C\rangle}$. \section{Mixing properties} \label{sec:mixing} As already mentioned in the previous section, when a large scale patch containing glass bubbles is poured on the surface, it starts to get stretched and folded by the action of the swimmers so that the concentration field $C(x,y,t)$ becomes strongly non homogeneous, as already observed in figure \ref{fig:concentration}b. As the spatial average of the concentration field is conserved, we characterize its heterogeneity by computing the standard deviation \begin{equation} C_\mathrm{std}(t) = \sqrt{\langle C^2 \rangle (t)-\langle C \rangle^2(t)} \, . \end{equation} \begin{figure} \begin{centering} \includegraphics{figure3.pdf} \end{centering} \caption{(a) Temporal evolution of the concentration field standard deviation: $C_\mathrm{std}(t) = \sqrt{\langle C^2 \rangle-\langle C \rangle^2}$, normalized by the global mean $\overline{\langle C \rangle}$ in the case of $N=15$ swimmers with radius $R=\SI{2.5}{mm}$. (b) Short term evolution of $(C_\mathrm{std}(t)-C^\mathit{int}_\mathrm{std})/\,\overline{\langle C \rangle}$ plotted in semilog representation, with $C ^\mathit{int}_\mathrm{std}/\,\overline{\langle C \rangle}=2.7$. For those figures the points are obtained with a sliding window average over a few seconds, so that the error is very low.} \label{fig:ectype} \end{figure} Figure \ref{fig:ectype}a shows the evolution of $C_\mathrm{std}(t)/\,\overline{\langle C \rangle}$ in the case of $N=15$ swimmers, which is typical of all explored parameters. Starting from a finite initial value corresponding to the initial patch, the standard deviation relaxes over a short time scale (less than one minute) as expected for a system being mixed by the random motions of stirrers --- here the swimmers. However, while classical expectation would be to decrease down to a stationary fully homogeneous system with $C_\mathrm{std} = 0$, the system reaches a statistically stationary state of incomplete mixing as quantified by $C^\infty_\mathrm{std}$ being more than twice larger than the global mean $\overline{\langle C \rangle}$. The mixing process then consists in three main stages, a rapid phase ($t \leq 4$ min) during which the standard deviation $C_\mathrm{std}(t)$ decreases exponentially toward an intermediate value $C^\mathit{int}_\mathrm{std} \simeq 2.7 \overline{\langle C \rangle}$ (figure \ref{fig:ectype}b); this exponential decay, typical of chaotic or turbulent mixing \cite{bib:boylandetal2000,bib:Gouillartetal2006,bib:tennekes_Lumley1972}, is followed by a slower relaxation ($t \in [4-40]$ min) and finally a long phase ($t \geq 40$ min) for which the statistical properties of the concentration field only weakly evolve due to a slow loss of activity of the swimmers. All characterizations (including calibration as discussed in the previous section) have been performed in this third phase using a $10$ min recording to ensure that the swimmers activity remains the same when comparing experiments, with stationary statistical properties. Surprisingly, when investigating how this final mixing degree $C^\infty_\mathrm{std}$ depends on the mixer properties (number and size of camphor swimmers), it is found to be quite robust and independent on the conditions. Indeed, Fig. \ref{fig:spectres} (a,b) report that except for the smallest particle sizes ($R<\SI{2.5}{mm}$) or numbers ($N<15$) for which the final state is slightly closer to homogeneous, the final standard deviation of concentration remains unaffected. \begin{figure} \includegraphics{figure4.pdf} \caption{\textit{Top:} Evolution of the standard deviation of the concentration in glass bubbles $C_\mathrm{std}^\infty/\overline{\langle C\rangle}$ as a function of (a) the number of swimmers $N \in [7,\,45]$ with radius $R=2.5$ mm; (b) the radius $R \in [1,\,4]$ with $N=15$ swimmers. In both figures, the fit (dashed dotted line) results from a model explained in section \ref{subsec:model_Cstd}, and the error bars were calculated using a logarithmic differentiation. (c) Power spectrum density of the glass bubble concentration as a function of the wavenumber $k_x$ for increasing number of swimmers with $R=2.5$ mm. $(\circ)$ $N=7$ swimmers, $(\diamond)$ $N=15$ swimmers, $(\triangleright)$ $N=30$ swimmers, $(\square)$ $N=45$ swimmers. Dashed lines correspond to power law spectra with exponent $-5/3$ and $-1$. (d) Power spectrum density of the glass bubble concentration as a function of the wavenumber $k_x$ for $N=15$ swimmers and different radii $R$. $(\circ)$ $R=1$ mm, $(\diamond)$ $R=3$ mm, $(\triangleright)$ $R=3$ mm, $(\square)$ $R=4$ mm. Dashed lines correspond to power law spectra with exponent $-5/3$ and $-1$.} \label{fig:spectres} \end{figure} It should be noted that the standard deviation observable yields a global characterization that integrates over concentration fluctuations at all length scales. A simple look at figures \ref{fig:setup}b or \ref{fig:concentration}b however unveils a rich underlying spatial organization of the floaters with (i) large void features around each swimmer typical of the present system together with (ii) thin and complicated structures in between voids qualitatively reminiscent of classical mixing processes. Indeed, in a recent study the dynamics of the camphor swimmers was shown to exhibit multi-scale features following typical turbulent scaling laws~\cite{bib:bourgoin_etal2020}. This contribution caused by the swimmers relative velocities is responsible for the floaters mixing that shows up in between void features. We now explore the multi-scale nature of this mixing process and shall come back in the next section on the mechanism responsible for the void generation around each swimmer. To characterize the multi-scale mixing, we compute one dimensional power spectra of the concentration field along the $x$ axis \begin{equation} |\hat{C}|^2(k_x,y,t)=|\mathrm{DFT}_x[C(x,y,t)]|^2, \end{equation} where DFT stands for Discrete Fourier Transform. It is computed in a square box of size 1310x1310 pix$^2$ centered in the middle of the surface, and is a function of the wave number $k_x$, $y$, and $t$, so that we average it over space and time in the stationary regime to get a better statistics. Figures \ref{fig:spectres} (c,d) display the corresponding power spectra, $\overline{\langle |\mathrm{DFT}_x[C(x,y,t)]|^2\rangle_{y}}(k_x)$, obtained when increasing the number of swimmers at fixed radius $R=\SI{2.5}{mm}$, or for different radii at fixed $N=15$. It is visible in these two figures that the concentration field exhibits fluctuations at all spatial scales whatever the number of swimmers or their radii. When increasing the number of swimmers at fixed $R$, the power spectra are attenuated in the low wave numbers range $k_x \leq 0.05$ mm$^{-1}$ while exhibiting higher and higher fluctuations in the high wave number range $k_x \geq 0.05$ mm$^{-1}$. This is inline with the fact that the global mixing efficiency does not vary much when increasing $N$ as $(C_\mathrm{std}^\infty)^2$ is proportional to the area under each curve. When $N$ is large enough so that the turbulent-like behavior of the swimmers develops \cite{bib:bourgoin_etal2020}, the concentration field is efficiently stretched and folded, which results in small spatial structures. In that regime ($N \geq 15$), we observe that concentration spectra tend to follow a power law behavior with an exponent close $-5/3$ in the intermediate spatial frequency range as observed in hydrodynamic turbulence \cite{bib:Warhaft2000}. It is remarkable that a second regime emerges in the very high frequency range where scalar spectra exhibit a second power law behavior with an exponent close to $-1$. This Batchelor type spectra \cite{bib:batchelor1959} indicates that the flow is smooth at these spatial scales so that small scales are created via random advection as observed in experiments and numerical simulations \cite{bib:Williansetal1997,bib:pierrehumbert1994,bib:toussaintetal2000}. Figure \ref{fig:spectres}\,(d) shows that all the aforementioned results appear to be modulated by the radius of the swimmers: (i) increasing the radius $R$ of the particles, which results in an increase of the swimmer Reynolds numbers, reinforces the power law behavior at intermediate scale, which extends over more than one decade when $R=4$ mm; (ii) the exponent in the high frequency range gets closer to $-1$ when increasing the radii of the swimmers although such regime was not very well developed with $R=2.5$ mm. Because the system reaches an out-of-equilibrium steady state, a competing mechanism must balance with the aforementioned mixing processes. Indeed, as already pointed each swimmer trails an area devoid of floaters, suggesting the existence of an \textit{unmixing} mechanism that constantly rejuvenates large-scale heterogeneities. Those empty wakes eventually feed the mixing process to smaller scales, leading to the non-trivial spectra shown in fig.\ref{fig:spectres}. The evolution of the wake of individual swimmers is therefore a key ingredient to understand the overall process that we now investigate. \section{Around a swimmer} \label{sec:around_swimmer} \subsection{Averaged concentration field} As already pointed out, a striking feature of the present system is the wake of washed up surface free from floaters, that follows each swimmer (see figures \ref{fig:setup}b or \ref{fig:concentration}b). To get a more quantitative insight into this phenomenon, we now define and consider the mean concentration field around a single swimmer. To this aim, we perform a coherent mean whereby we average the concentration field in the neighborhood of a single swimmer after a set of geometrical transformations to ensure spatial registration (translation of the swimmer's position) and orientational registration (rotation to provide identical swimming direction). In addition, in order to take into account only isolated camphor disks we discard those whose center lies within \SI{2.5}{\centi\metre} from the cell edges, and those from which at least one other swimmer lies in an exclusion rectangular zone. The size of the exclusion zone (\SI{9}{\milli\metre} in front of a swimmer, \SI{32}{\milli\metre} behind it and \SI{14}{\milli\metre} in directions perpendicular to its trajectory) was chosen according to typical extent of depleted zone as seen in figure \ref{fig:setup}b and \ref{fig:concentration}b. Note that extending it further did not change significantly the outcome except for a drastic reduction of the statistics. Overall, a typical set of 10\,000 images of swimmers' neighborhood was used for computing the averaged concentration field of floaters. \begin{figure} \centering \includegraphics{figure5.pdf} \caption{Averaged glass bubbles concentration field around a swimmer (in white), going from the left to the right, obtained by a coherent mean over 10\,000 concentration fields. The configuration considered here is $N=15$ and $R=$\SI{2.5}{\milli\metre}. \label{fig:immoy} } \end{figure} Figure \ref{fig:immoy} shows such an averaged field around a swimmer, going from left to right on the figure, here in a configuration with $N=15$ camphor swimmers of radius $R=\SI{2.5}{mm}$. Two remarkable features are noticeable: (i) as expected from figure \ref{fig:concentration}b, a depleted wake trailing behind the swimmer where all floaters have been swept away, and (ii) an accumulation front immediately ahead of the swimmer, that was far less visible. This type of pattern is typical of all conditions, see figures \ref{fig:meanTrailvarN} and \ref{fig:meanTrailvarR} in the appendix, for different numbers of swimmers $N$ and radii $R$. From the averaged concentration fields obtained for all configurations (Fig. \ref{fig:immoy} and appendix figures \ref{fig:meanTrailvarN} and \ref{fig:meanTrailvarR}), we extract the total wake area by fitting with an ellipsoidal shape. Note that additional information on the depleted surface can be found in appendix \ref{appC}. This measured depleted area $A_d$ is shown in figure \ref{fig:deplarea} as a function of the number of swimmers $N$ (Fig. \ref{fig:deplarea} a with $R=\SI{2.5}{mm}$), and as a function of the radius $R$ of the swimmer (Fig. \ref{fig:deplarea}b with $N=15$ swimmers in the bath). As clearly seen in figure \ref{fig:deplarea}a, the depleted area around each swimmer decreases with the number $N$ of swimmers. This decrease exhibits a well defined power-law behavior (see figure inset for the log-log scale) that yields a fitted exponent close to $-3/4$, \textit{i.e.} $A_d(N,R=\SI{2.5}{mm})=A_d^{(1)} \, N^{-0.76}$, with $A_d^{(1)} = \SI{24}{\centi\meter\squared}$ the fitted wake area of a single isolated swimmer. On the contrary, the depleted area is found to increase with the radius of the swimmers $R$ at fixed density ($N=15$). In practice, the increase might be viewed as close to linear at the smallest sizes, before to saturate at larger radii. For practical purposes, a fit by an hyperbolic tangent function reasonably approximates the overall behavior (figure \ref{fig:deplarea}b). \begin{figure} \centering \includegraphics{figure6.pdf} \caption (a) \textcolor{blue}{\textbf{$+$}}: Measured depletion area $A_d$ as a function of the number of swimmers $N$ (fixed radius R=\SI{2.5}{mm}). Inset: same in log-log scale; the point \textcolor{red}{$\ast$} for $N=1$ is measured in different conditions, see later in figure \ref{fig:TestOrigin}b. Solid line: fit by a power-law decay $A_d(N,R=\SI{2.5}{mm})=A_d^{(1)} \, N^{-0.76}$, with $A_d^{(1)} = \SI{24}{\centi\meter\squared}$. (b) \textcolor{blue}{$\bullet$}: Measured depleted area $A_d$ as a function of the radius $R$ of the swimmer, for $N=15$ swimmers in the bath. Solid line: linear behavior at small radii; dashed line: guide-line fit by hyperbolic tangent function yielding $A_d=3.52\,\tanh{(0.47R)}$ in \si{\centi\meter\squared}. } \label{fig:deplarea} \end{figure} \subsection{A first order model for the standard deviation of concentration} \label{subsec:model_Cstd} Besides the excluded area, another striking feature from figure \ref{fig:immoy} is the fact that the concentration relaxes rapidly towards $\overline{\langle C\rangle}$ when moving away from the camphor swimmer. This is another evidence that mixing by the moving particles is efficient, inline with the spectra shown in figure \ref{fig:spectres}. Therefore one can wonder whether at first order, the large-scale inhomogeneity measured through $C_\mathrm{std}$, of the order of $1.5$--$ 3\,\overline{\langle C\rangle}$ in figure \ref{fig:spectres}, could primarily reflect the patchiness of the superposition of the wakes and accumulation fronts of each swimmer, and therefore be related to their respective areas. According to this proposition a simple estimate for $C_{std}$ can be developed as follows. Let us consider $N$ identical swimmers of radius $R$ carrying each a depleted wake of area $A_d$, an accumulation front ahead of the swimmer of area $\alpha\,A_d$, and let $A_T$ be the total area of the system. As before, we neglect the area of the swimmers. Let us assume for the sake of simplicity that the wakes and accumulation fronts have identical respective extents for each swimmer and that they do not overlap. We therefore model the surface and concentration distributions as follows: \begin{itemize}[label=$-$,leftmargin=1cm ,parsep=0cm,itemsep=0cm,topsep=0cm] \item $N$ depleted area of total surface $NA_d$, having a zero concentration; \item $N$ accumulation fronts ahead of the swimmers of total area $N\alpha A_d$, in which the over concentration comes from glass bubbles that are not in the wakes, that is a concentration $(1+\alpha)A_d/(\alpha A_d)\langle C\rangle$; \item a uniform concentration equal to $\langle C\rangle$ everywhere else, on an area equal to $A_t-N(1+\alpha) A_d$. \end{itemize} We have: \begin{eqnarray} \left(C_\mathit{std}^\infty\right)^2&=& \langle C^2\rangle-\langle C\rangle^2\\ &=& \frac{1}{A_t}\left[0+N\alpha A_d\left(\frac{1+\alpha}{\alpha}\right)^2\,\langle C\rangle^2+\bigl(A_t-N(1+\alpha)A_d\bigr) \langle C\rangle^2 \right]-\langle C\rangle^2\\ &=& \frac{N A_d}{A_t}\left[\frac{(1+\alpha)^2}{\alpha}-(1+\alpha) \right]\langle C\rangle^2\\ &=& \frac{N A_d}{A_t}\, \frac{1+\alpha}{\alpha}\, \langle C\rangle^2 \end{eqnarray} and finally \begin{equation} C_\mathit{std}^\infty=\sqrt{\frac{N A_d}{A_t}\, \frac{1+\alpha}{\alpha}}\, \langle C\rangle\,. \label{eq:Cstd} \end{equation} We now confront this first order model against previous experimental measurements. To do so, let us note that measured standard deviations are obtained from instantaneous images, and then averaged over time. Because the tails of depleted wakes are often curved in a random direction, the procedure for calculating the averaged concentration field around a swimmer thus rubs out wakes and eventually leads to an underestimation of their extent. Compared with instantaneous images, this underestimation amounts to about 50\% depending on the swimmer considered and the given time. This requires a correcting factor in Eq.~(\ref{eq:Cstd}) for direct comparison with data. Alike, the concentration averaging smooths the maximum concentration ahead of the swimmer: from about $6\,\langle C\rangle$ on raw images depending on the instant $t$ chosen (Fig. \ref{fig:concentration}b), down to $2.5\,\langle C\rangle$ (Fig. \ref{fig:immoy}) in averaged concentration fields. Accordingly, the parameter $\alpha$ setting the size of the accumulated area in the model is chosen to be $\alpha=0.2$ to yield the proper maximum concentration value ($6 \langle C\rangle$). Such an extent of 20\,\% of the depleted area is consistent with direct observations (Fig. \ref{fig:concentration}b). Qualitatively, using the $N$-dependency of the averaged concentration field found in Fig.~\ref{fig:deplarea}, our simple model Eq.~\ref{eq:Cstd} predicts a measured standard deviation for the glass bubbles behaving as $C_\mathrm{std}\propto N^{0.12}$. This very weak power-law prediction is consistent with the very slow evolution reported in Fig. \ref{fig:spectres}a, for $N\ge7$. As for the $R$-dependency, the initially linear evolution of the single swimmer averaged depleted area (Fig. \ref{fig:deplarea}b) followed by a saturation should lead to $C_\mathrm{std}\propto\sqrt{R}$ before reaching a plateau. Again, this is fairly consistent with the observed trend in figure \ref{fig:spectres}b. In a more quantitative way, we can fit the measured data figures \ref{fig:spectres}a,b by our model expression including the previously mentioned correcting factors \begin{equation} C_\mathit{std}^\infty/\overline{\langle C\rangle}=\beta\,\sqrt{\frac{N \,A_d^\mathrm{eff.}}{A_t}\, \frac{1+\alpha}{\alpha}}\,, \label{eq:Cstd_fit} \end{equation} with $\beta$ a free scaling parameter, $\alpha=0.2$ and $A_d^\mathrm{eff.} = 1.5 A_d$, where $A_d(N, R)$ is the averaged depleted area around a single swimmer. We used this expression, combined with the equations given in figure \ref{fig:deplarea} for the behavior of $A_d$ versus $N$ or $R$, in order to plot the fits shown in dotted lines in figures \ref{fig:spectres}a and b. As can be seen, it captures in both cases the overall experimental behavior with a scaling factor $\beta=1.95$. This is a very reasonable order of magnitude for such a simple model, where only three regions (each with a given value of concentration) are considered. Because the evolution of $C_\mathrm{std}$ with $R$ and $N$ reflects that of the wakes of individual swimmers, $C_\mathrm{std}$ is phenomenologically dominated by demixing at large scale; it is thus reasonable to associate the demixing properties of this flow to the strongly inhomogeneous concentration field around the swimmers. \section{Discussion: Marangoni effects} \label{sec:Marangoni} Up to now we have mainly focused on the mixing/demixing properties of the flow, associated with the depleted areas around the swimmers, regardless of the associated physical process that produces them. We now discuss the different possible origin for this phenomenon, and show how the description of Marangoni effects accounts consistently for the observed behaviors. \subsection{Origin of the depleted area} \label{sec:origin_depleted} Physically, it is tempting to link the existence of the depleted area to the chemical cloud released by a single swimmer and to the associated Marangoni flows driving the self-propulsion. This, however, requires that we discard other plausible origins among which the possible wake around a disk moving at finite Reynolds number along the interface. \begin{figure} \centering \includegraphics{figure7.pdf} \caption{(a) An agarose disk of radius \SI{2.5}{mm} is pulled at \SI{6}{\centi\metre\per\second} by an engine. No depleted area is observed, proving that mechanical effects can not explain what we observe in Fig. \ref{fig:immoy}. Scale bar is \SI{1}{cm}. (b) An interfacial swimmer is fixed in the bath. Glass bubbles are poured on it. In less than a second, a large depleted area is observed, leading us to think of a chemical effect. Scale bar is \SI{1}{cm}.} \label{fig:TestOrigin} \end{figure} To discriminate between these scenarios, we performed two complementary experiments: in a first configuration, an agarose gel disk \textit{without camphor loading} is moved along the air-water interface at a constant velocity imposed by a motorized translation stage. At a velocity typical of the swimmers velocity $U = \SI{6}{\centi\metre\per\second}$, the simple motion of the disk at an interface filled with glass bubbles floaters does not generate any significant pattern around the moving disk (Fig. \ref{fig:TestOrigin}a). In particular, no signature of a swept wake is observed, the only feature being a thin concentrated filament released at the rear, due to floaters trapped at the disk edge by capillary effects. Note also that no accumulation front is visible ahead of the moving disk in this configuration. In the second configuration, we cancel the disk motion by nailing it at a fixed position at the interface but restore the camphor loading of the disk so that chemical release and associated Marangoni flows do occur: a clear pattern develops around the disk where all floaters in its vicinity are swept away and leave a depleted area reminiscent of the one observed with swimmers. Such an observation is consistent with closely related experiments with Marangoni driven flows by surfactant spreading \cite{bib:roche_etal_PRL2014,bib:leRouxetal2016} where a similar cleaned-up surface-area is observed. The area obtained here (around \si{26\,\centi\meter^2}), although of larger extent than in Fig. \ref{fig:immoy}, is in very good agreement with the value given by the fit in figure \ref{fig:deplarea}a for $N=1$ (\si{24\,\centi\meter^2}). Overall, the camphor spreading from each disk triggers two different effects which act oppositely on the mixing of floaters: first, it generates the net motion of each disk, all the disks operating as an assembly of stirrers moving randomly in the system. Second, the complex Marangoni flow pattern attached to each swimmer induces a local unmixing mechanism that constantly rejuvenates gradients by sweeping of floaters from a finite area, so as to form a concentrated rim ahead of the swimmer, together with an empty wake. In the next two subsections, we examine how the description of Marangoni effects can indeed rationalize some of the observations reported earlier. \subsection{Single Swimmer} We first discuss how the generation of the depletion area around each swimmer can be captured based on Marangoni flow description. As a first step, we begin with an estimate of the camphor concentration field generated by a single swimmer of radius $R$ and typical velocity $U$. The viscous friction is $F_v = \pi R^2 \eta U / \delta_v$ \cite{bib:ockendon1995}, where \begin{equation} \delta_v = R/\sqrt{\mathrm{Re}} \label{eq:delta_v} \end{equation} is the viscous boundary layer thickness underneath the swimmer, with $\mathrm{Re}=UR/\nu$ the Reynolds number. Balancing $F_v$ with the driving capillary force contribution $F_c = \pi R \Delta\gamma$, where $\Delta\gamma$ is the fore-aft surface tension difference, we obtain \begin{equation} \Delta\gamma \sim \eta\, U \sqrt{\mathrm{Re}}\,. \label{eq:dGamma} \end{equation} For our typical situation with $R=\SI{2.5}{\milli\meter}$ and $U\approx\SI{6}{\centi\meter\per\second}$ for $N=1$ swimmer \cite{boniface2019self}, corresponding to a Reynolds number $\mathrm{Re}=150$, this leads to a typical surface tension imbalance of $\Delta\gamma\simeq\SI{0.7}{\milli\newton\per\meter}$ in good agreement with previous experimental estimates on camphor boats \cite{Karasawa:2014gq}. To proceed we now assume, in agreement with the literature, a linear relationship $\Delta\gamma = - \alpha C$ between surface tension and local camphor concentration, with $\alpha = \SI{3e-3}{\newton\square\meter\per\mol}$ \cite{Soh_jpcb-2008}. We obtain a characteristic concentration behind the swimmer $C^*\simeq \SI{0.2}{\mol\per\cubic\meter}$, far below the solubility limit $C_\mathrm{sat.} = \SI{8}{\mol\per\cubic\meter}$. Qualitatively, this camphor release in the vicinity of the swimmer induces, on top of the capillary force, a Marangoni stress at the free surface, from low to high surface tension region, that drives fluid outward, away from the swimmer. This flow sweeps away surface floaters, generating a clean depleted area around each swimmer (see Fig. \ref{fig:TestOrigin}b). To make this argument more quantitative, it is natural to identify the depleted area to the camphor-contaminated area over which Marangoni stress occurs. We also simplify the problem by considering a fixed camphor particle (as in figure \ref{fig:TestOrigin}b), yielding to a depleted disc (rather than an ellipse) of area $A_d$ and radius $R_d$. In that case the typical velocity of Marangoni flows produced matches with the freely moving swimmers velocity $U$ in agreement with experimental measurements \cite{Sur:2019bj}; therefore, except for the simplified geometry, all physical scalings remain identical. On the one hand the swimmer releases camphor at a rate proportional to its area; this production term writes $Q_p = \beta R^2$, with $\beta =\SI{3.1e-4}{\mol\per\second\per\square\meter}$ as measured for this system \cite{boniface2019self}. On the other hand camphor is removed from the surface by dissolution \footnote{Note that for camphor, the evaporation/sublimation flux towards the upper atmosphere can act as a competing removal mechanism. Such an alternative route is discussed in appendix \ref{ap_sec:camphor} and does not change the overall picture described here.}, at a rate \begin{equation} Q_d = D \frac{C^*}{\delta_D} A_d, \label{eq:CamphorConserv} \end{equation} with $\delta_D$ the thickness of the diffusion boundary layer $\delta_D = (R_d/R)^{1/2}\;\delta_v / \sqrt{\mathrm{Sc}}$ \cite{bib:levich1962}, $\mathrm{Sc} = \nu/D$ the Schmidt number, and $D=\SI{7e-10}{\square\meter\per\second}$ the camphor diffusivity \cite{boniface2019self}. In line with recent treatments of other Marangoni spreading problems \cite{Mandre_jfm-2017, bib:roche_etal_PRL2014}, we suppose that production is balanced by dissolution according to \begin{equation} Q_p = Q_d\,. \label{eq:produc_dissip} \end{equation} This yields to an extension of the depleted area \begin{equation} R_d = \left[ \frac{\alpha\beta}{\pi\rho D \;\mathrm{Sc}^{1/2}} \right]^{2/3} \;\frac{R}{U^{4/3}}. \label{eq:Ldiff_iso} \end{equation} With the typical values given above, this predicts a contaminated area of extent $R_d\simeq 3R$, to be compared with the measured extent $R_d\simeq10R$ (Fig~\ref{fig:TestOrigin}b). Considering the rough scaling approach used, this shows a very fair agreement although it underestimates the size of the depleted area. Finally, noticing that the swimming velocity of individual swimmers was shown to follow a scaling law of the form $U\propto R^{1/3}$ \cite{boniface2019self}, it is possible to gather all radius-dependencies in Eq. \eqref{eq:Ldiff_iso} to reach a theoretical expectation that $R_d\propto R^{5/9}$, that is, a depleted area $A_d\propto R^{10/9}$. While this result has been derived for a --fixed-- isolated swimmer, experimental measurements always involve multiple swimmers. For a single swimmer, it is indeed not possible to properly define a wake as the camphor disk gets trapped at tank edges where it moves along the outer perimeter. This implies that measured wakes for multiple swimmers are expected to follow the above scaling only in the limit of small interaction and overlap among wakes. For a fixed number of swimmers $N=15$, this is best achieved for small wakes corresponding to small size swimmers. Indeed figure \ref{fig:deplarea}b shows an almost linear trend for small $R$, consistent with the previous scaling law, before the depleted area eventually saturates at larger radii for which wakes overlap. Overall, simple estimates considering Marangoni effects can successfully account for the dependence of the depletion area with the radius of the swimmer and predict with a rather good order of magnitude its size. \subsection{Multi Swimmers} While previous arguments were developed for isolated swimmers, we now focus on the wake behavior with multiple swimmers. In crowded environment when the mean distance $d$ between two swimmers, scaling as $d \sim \sqrt{A_t/N}$, starts to compare with the isolated wake extension, one naturally expects that the typical radius of the depleted area should be limited by $d$ and thus decrease with increasing $N$. The inset in figure \ref{fig:deplarea}a however suggests that the wake area is a \textit{strictly} decaying function of $N$, even for small values of $N$ for which this naive crowding effect should not contribute. Indeed, in the case of 7 swimmers of radius $R=\SI{2.5}{\milli\meter}$, the area of one wake amounts to \SI{5.4}{cm^2}. This is significantly smaller than the anticipated isolated swimmer wake (\SI{26}{cm^2} for $N=1$) despite consisting on a hardly crowded configuration: the total accessible area $A_t = \SI{254}{cm^2}$ should allow to host 7 swimmers with wakes area equal to \SI{26}{cm^2} as for isolated swimmers. In order to test whether this decay could be attributed to Marangoni effects, we propose a very simple 1D analytic model that mimics this situation. Let us consider a 1D system of fixed equidistant chemical sources that correspond to our swimmers. For a finite size system of width $W$, the inter-source distance $d$ thus goes like $d\approx W/N$. In the following, we will neglect edge effects by considering an infinite system, keeping in mind that $1/d$ reads for the number $N$ of swimmers. We now assume that each single source --\textit{i.e.} camphor particle-- generates a camphor distribution around its location $x_0$ that is an even function of $x-x_0$, with standard deviation $\sigma$, and decreasing with distance to the origin $x_0$. This is due to camphor being spread and eventually lost from the surface by several effects (Marangoni effect, dissolution, sublimation, diffusion) The precise shape is not crucial; for simplicity we choose a Gaussian profile $C(x)=C_0 \exp(-x^2/2 \sigma^2)$. We further assume that when placing camphor particles on a line, the total camphor concentration $C_t$ is a linear superposition of the effects from all sources. One then has: \begin{equation} C_t(x) = \sum_{n\in \mathbb{Z}} C(x-n d)\,. \end{equation} Once the camphor concentration is known, we take care of the distribution of glass bubbles on the surface, and denote by $G_b(x)$ the corresponding concentration. Glass bubbles are repulsed by the camphor due to the Marangoni flow through a compressible velocity of the type \begin{equation} v = - \alpha \partial_x C_t. \label{eq:v_alpha_partial_C} \end{equation} This repulsive contribution is balanced by a diffusive transport term with coefficient $D_b$ that tends to homogenize glass bubbles so that the conservation equation for the distribution $G_b$ satisfies: \begin{equation} \partial_x \bigl(v\, G_b - D_b\, \partial_x G_b\bigr) = 0\, , \end{equation} with $v$ solution of equation (\ref{eq:v_alpha_partial_C}). One then gets \begin{equation} v(x)\, G_b(x) - D_b\, \partial_x G_b(x) = B\, . \label{eq:eq_with_B} \end{equation} The value of the constant can be obtained by a symmetry argument: indeed, the solution is periodical with period $d$ so that one can take a local average of the previous equation. Defining the average as \begin{equation} \langle f \rangle_d(x) = \frac{1}{d} \int_{x-d/2}^{x+d/2} f(x') dx'\, , \end{equation} one gets $\langle v\, G_b \rangle_d = B$. In the case a single swimmer ($d\longrightarrow+\infty$), $C(x)$ is an even function, so that for reasons of symmetry, $G_b(x)$ is also an even function; because $v$ is an odd function (equation \ref{eq:v_alpha_partial_C}), one gets $\langle v\, G_b \rangle_d (x=0)= 0 = B$. Note that this corresponds to a solution with zero mean flux of particles transported by the flow. Actually, this term has to vanish whatever the symmetries of $C(x)$ if glass bubble are confined in a box as they can not leave the domain, so that the total flux vanishes at the boundaries. Equation \ref{eq:eq_with_B} now writes: \begin{equation} D_b\, \partial_x \log G_b(x) = -\alpha \partial_x C_t(x) \end{equation} with general solution \begin{equation} G_b(x) = G_0 \exp(-\alpha C_t(x)/D_b), \label{eq:G_b} \end{equation} a result that was checked numerically using Monte-Carlo simulations. \begin{figure} \includegraphics{figure8.pdf} \caption{a, b, c : Camphor concentration $C_t(x)$ (dashed line), glass bubbles concentration $G_b(x)/\max(G_b)$ (solid line) as a function of $x/d$ for $d/\sigma=7$, $5$, $3$. $G_b(x)/\max(G_b)$ is given by Eq. \ref{eq:G_b} with $\alpha C_0/D_b=10$. The depleted zone, represented by black symbols, corresponds to $G_b < 0.025 \max(G_b)$. d: Evolution of the dimensionless depletion length $\ell/\sigma$ as a function of $\sigma/d\propto N$.} \label{fig:1Dmodel_Marangoni} \end{figure} Figures \ref{fig:1Dmodel_Marangoni}a, b, c display $C_t$ and $G_b/\max(G_b)$ as a function of $x/d$ for a $d$-periodic distribution of Gaussian profiles and different ratios of $d/\sigma$. On those profiles the ratio $\alpha C_0/D_b$ is set to 10, and the size of the depleted zone $\ell_d$ is defined as the region where the glass bubble concentration, $G_b(x)$, is smaller than a threshold set to $2.5\%$ of its maximal value \footnote{Because of the exponential in equation \ref{eq:G_b}, changing the threshold does not change much the size of the depleted region provided that it is not too small.}. The depleted region corresponds to the black symbols in figures \ref{fig:1Dmodel_Marangoni}a, b, c, and is represented in figure \ref{fig:1Dmodel_Marangoni}d as a function of $\sigma/d$ for $3\le d/\sigma\le 20$. For large enough values of $d$, the camphor clouds of two neighbouring particles should not interact much; this is illustrated in figure \ref{fig:1Dmodel_Marangoni}a for $d=7\sigma$ (corresponding to $\sigma/d\simeq 0.14$), where the total concentration of camphor nearly goes to zero at $x=d/2$. For larger values of $d$ we therefore expect the size $\ell_d$ of the depletion region to be essentially unchanged: this is exactly what is observed in figure \ref{fig:1Dmodel_Marangoni}d with the plateau for $\sigma/d< 0.14$. For smaller gaps between swimmers, the wakes interact, the camphor distribution is less steep with an increasing minimum of $C_t$ (figures \ref{fig:1Dmodel_Marangoni}b and c), and the size of the depleted zone decreases (to eventually vanish for very small values of $d$) \footnote{This is not obvious from figures \ref{fig:1Dmodel_Marangoni}abc, where the size of the depleted region seems higher for the intermediate value $d/\sigma=5$; however, the $x$-axis represents $x/d$, and $d$ is not the same for those three figures. While $\ell_d$ is decreasing with increasing d, $\ell_d/d$ would present a maximum.}. This is also visible in figure \ref{fig:1Dmodel_Marangoni}d for $\sigma/d>0.15$. Because $\sigma/d\propto N$, this shows that, as in our experiments, the size of the depleted region is decreasing with the number of swimmers. As a very naive application of this model, let us calculate what would be the minimum experimental tank diameter that would allow to host two camphor disks of radius $R=\SI{2.5}{mm}$ with wakes having the same extent as for isolated swimmers. From the previous 1D model, non-interacting camphor clouds require $d\ge7\sigma$ in which condition the depleted zone extends over $\ell_d\approx2.8\,\sigma$ (figure \ref{fig:1Dmodel_Marangoni}d). Overall, the inter-swimmer distance should thus exceed $d\ge2.5\,\ell_d$, which requires that the tank diameter verifies $D\ge d+2\ell_d/2\ge3.5\,\ell_d$. For $R=\SI{2.5}{mm}$, the isolated depleted zone extension is $\ell_d\sim\SI{5.75}{cm}$ from figure \ref{fig:TestOrigin}b or from $A_d^{(1)}$ in figure \ref{fig:deplarea}. Therefore the minimum diameter for having non-interacting wakes for only two swimmers is $D\sim\SI{20}{cm}$, that is, larger than what we actually have. This naive application of the model predicts that even for $N=2$, the wakes of each individual swimmer would be smaller than that for a single camphor disk; therefore we would always be in the decaying region, away from the plateau for a tank of the size of our experiment: this is indeed what we observe in figure \ref{fig:deplarea}a. Finally, from this model, the physical reason why the depleted region decreases when increasing the number of swimmers should be attributed to the fact that the Marangoni flow $v=-\alpha \partial_z C_t$ is less compressible; such an explanation should also hold in our experiment, although the scalings would be different in the 2D case. \section{Summary and conclusion} In this article, we have proposed an original experiment of mixing at the free surface of a water tank, with multiple stirrers. The stirrers are self-propelled camphor disks that move at the interface of the fluid; the particles to be mixed consist in a patch of passive floaters (glass bubbles) initially released at the center of the tank. Mixing is achieved thanks to the random motions of $N$ camphor disks, with various $N$ or radii: in a first stage, the decrease of the standard deviation of concentration of glass bubbles is exponential, as for chaotic or turbulent mixing, inline with the power spectra of concentration that exhibit a power law behavior with an exponent close to $-5/3$ in the intermediate spatial frequency range, followed by a second power law with an exponent close to $-1$ at higher frequencies. However, the system reaches a stationary state of incomplete mixing, with a final standard deviation of concentration more than twice the mean concentration. By averaging the concentration field around a swimmer, we have shown that, in addition to the depleted wake around the swimmer, there is an accumulation front immediately ahead. We thus have proposed a very simple model of concentration distribution, with three different values of concentration (an empty wake and an over-concentrated accumulation front around each swimmer, surrounded by a perfectly mixed fluid), that reproduces the levels of unmixing observed through the standard deviation, proving that $C_\mathrm{std}$ is dominated by demixing at large scale. In the last section, we have proved experimentally that the depleted area is related to Marangoni effects; then, using rough calculations considering those effects, we have found a good order of magnitude of the size of the depleted area, with a correct scaling for the dependency on the radius of the swimmer. Finally, we have proposed a 1D model on Marangoni effects that explains the tendency of the depleted area to decrease when increasing the number of swimmers, even when no crowding effect comes into play. Overall, the system reaches a stationary state (although out of equilibrium) of mixing/demixing, where demixing is linked to Marangoni flows, related with compressible effects. A striking feature of this study is that, besides demixing, the system develops a "turbulent-like" concentration spectra, with a large-scale region, an inertial regime at intermediate scale, and a Batchelor regime at small scales: while this is in accordance with the idea that the stirrers are characterized by a "turbulent-like" motion for large enough number of swimmers \cite{bib:bourgoin_etal2020}, this may seem intriguing since the glass-bubbles do not develop such a dynamics; this raises the open question of a possible relationship between the spectrum of concentration of a scalar mixed by $N$ moving stirrers and the spatial correlation of the dynamics of those stirrers. \begin{acknowledgments} This work was supported by the French research programs ANR-16-CE30-0028, and IDEXLYON of the University of Lyon in the framework of the French program ``Programme Investissements d'Avenir" (ANR-16-IDEX-0005). \end{acknowledgments}
1,116,691,501,332
arxiv
\section{Glossary} \paragraph{Measure-theoretic dynamical system} Let $(Z,\mathscr{C},\kappa)$ be a standard probability Borel space. Assume $R\colon X\to X$ is invertible (a.e.), bi-measurable and $\kappa$-preserving. Then $R$ is called an {\em automorphism} or (invertible) {\em measure-preserving transformation} and the quadruple $(R,Z,\mathscr{C},\kappa)$ is a {\em measure-theoretic dynamical system}. \paragraph{Topological dynamical system} Let $X$ be a compact metric space and let $T$ be a homeomorphism of $X$. Then $(T,X)$ is called a {\em topological dynamical system}. \paragraph{Subshift} For any closed subset $\mathbb{A}\subset \mathbb{U}:=\{z \in \mathbb{C} : |z|\leq 1\}$ (most often $\mathbb{A}$ will be finite), let $S\colon \mathbb{A}^\mathbb{Z}\to \mathbb{A}^\mathbb{Z}$ be the left shift, i.e. for each $x=(x_n)_{n\in\mathbb{Z}}\in \mathbb{A}^\mathbb{Z}$, $Sx=y$, where $y_n=x_{n+1}$ for $n\in\mathbb{Z}$. Each closed and $S$-invariant subset $X\subset \mathbb{A}^\mathbb{Z}$ is called a {\em subshift} and the corresponding dynamical system is $(S,X)$. For each $u\in \mathbb{A}^\mathbb{Z}$, we define a subshift $X_u\subset \mathbb{A}^{\mathbb{Z}}$ as the orbit closure of $u$ under $S$. Similarly, when $u\in \mathbb{A}^\mathbb{N}$ {($\mathbb{N}=\{1,2,\ldots\}$)}, we can extend $u$ symmetrically, by setting $u(-n):=u(n)$ {(and $u(0)=0$)} for each $n\geq 1$ and define again the corresponding subshift $X_u$. Finally, we will denote by $F$ the continuous function on $X\subset\mathbb{A}^\mathbb{Z}$, defined by $F(x)=x_0$. \paragraph{Invariant measure} Given a topological dynamical system $(T,X)$, the set of probability Borel $T$-invariant measures is denoted by $M(T,X)$. The subset of ergodic measures (which is always non-empty) is denoted by $M^e(T,X)$. Each $\nu\in M(T,X)$ gives rise to a measure-theoretic dynamical system $(T,X,\mathscr{B}(X),\nu)$, where $\mathscr{B}(X)$ stands for the $\sigma$-algebra of Borel subsets of $X$. With the weak-$*$-topology, $M(T,X)$ becomes a compact metrizable space. $(T,X)$ is called {\em uniquely ergodic} if $|M(T,X)|=1$. \paragraph{Uniquely ergodic model of a measure-theoretic dynamical system} Given a measure-theoretic (ergodic) dynamical system $(R,Z,\mathscr{C},\kappa)$ and a uniquely ergodic topological system $(T,X)$ with $M(T,X)=\{\nu\}$ ($\nu$ is necessarily ergodic), one says that $(T,X)$ is a {\em uniquely ergodic model} of the automorphism $R$ if the measure-theoretic systems $(R,Z,\mathscr{C},\kappa)$ and $(T,X,\mathscr{B}(X),\nu)$ are measure-theoretically isomorphic. Each ergodic automorphism $R$ has a uniquely ergodic model. \paragraph{Generic point} Assume that $(T,X)$ is a topological dynamical system. We say that $x\in X$ is a {\em generic point} for a Borel measure $\nu$ on $X$, whenever the ergodic theorem holds for $T$ at $x$ for any continuous function $f\in C(X)$, i.e.\ $\frac{1}{N}\sum_{n\leq N}f(T^nx)\to \int f\, d\nu$. In other words, the empirical measures $\frac{1}{N}\sum_{n\leq N}\delta_{T^nx}$ converge to $\nu$ in the weak-$*$-topology. If this convergence holds along a subsequence, we say that $x$ is {\em quasi-generic} for $\nu$. We denote by $V(x)$ the set of all $\nu\in M(T,X)$ for which $x$ is quasi-generic ($\emptyset\neq V(x)\subset M(T,X)$ by compactness). We say that $x\in X$ is {\em logarithmically generic} for $\nu$, whenever $\frac{1}{{L_N}}\sum_{n\leq N}\frac{1}{n}\delta_{T^{n}x}$, {with $L_N=\sum_{n\leq N}1/n$}, converges to $\nu$. {No harm arises if in what follows we replace $L_N$ by $\log N$.} Finally, we say that $x\in X$ is {\em logarithmically quasi-generic} for $\nu$ whenever this convergence holds along a subsequence and we denote the set of all measures for which $x$ is logarithmically quasi-generic by $V^{\log}(x)$, ($\emptyset\neq V^{\log}(x)\subset M(T,X)$). \paragraph{{Quasi-genericity versus logarithmic quasi-genericity}} {Obviously, if $|V(x)|=1$ then $V(x)=V^{\log}(x)$, but in general the sets $V(x)$ and $V^{\log}(x)$ can be even disjoint. However,} as shown in~\cite{MR3821718}, \begin{equation}\label{gkl1} V^{\log}(x)\cap M^e(T,X)\subset V(x). \end{equation} Moreover, using an idea from Tao~\cite{Ta5}, it has been proved in \cite{Gomilko:ab} that \begin{multline}\label{glr1} \mbox{If $V^{\log}(x)=\{\nu\}$ and $\nu$ is ergodic, then $\lim_{k\to\infty}\frac1{N_k}\sum_{n\leq N_k}\delta_{T^nx}=\nu$},\\ \mbox{for a subset $\{N_k:k\geq1\}$ whose logarithmic density is~1.} \end{multline} \paragraph{Entropy} There are two basic notions of {\em entropy}: topological and measure-theoretic. We skip the definitions and refer the reader, e.g., to \cite{MR2809170}. For a topological dynamical system $(T,X)$ its topological entropy will be denoted by $h_{top}(T,X)$ and for a measure-theoretic dynamical system $(T,X,\mathscr{B},\nu)$ the corresponding measure-theoretic entropy will be denoted by $h(T,X,\mathscr{B},\nu)$. The basic connection between them is the variational principle: $$h_{top}(T,X)=\sup_{\nu\in M(T,X)}h(T,X,\mathscr{B}(X),\nu)=\sup_{\nu\in M^e(T,X)}h(T,X,\mathscr{B}(X),\nu).$$ \paragraph{Completely deterministic point} We say that point $x\in X$ is {\em completely deterministic}~\cite{We9} (see also~\cite{Kam}) if for any $\nu\in V(x)$, we have $h(T,X,\mathscr{B}(X),\nu)=0$. By the variational principle, $h_{top}(T,X)=0$ if and only if all points of $X$ are completely deterministic. \paragraph{Furstenberg system} Let $u\in \mathbb{A}^\mathbb{Z}$. For each $\nu\in V(u)$, the system $(T,X_u,\mathscr{B}(X_u),\nu)$ is called a {\em Furstenberg system of $u$}. For each $\nu\in V^{\log}(u)$, the system $(T,X_u,\mathscr{B}(X_u),\nu)$ is called a {\em logarithmic Furstenberg system of $u$}. For each $u\in \mathbb{A}^\mathbb{Z}$, one can consider $|u|\in [0,1]^\mathbb{Z}$. Then $(S,X_{|u|})$ is a topological factor of $(S,X_u)$ (the map $\pi\colon X_{u}\to X_{|u|}$ given by $\pi(x)=|x|$, understood coordinatewise, is equivariant with $S$). For $\nu\in V(u)$, we have $\pi_\ast(\nu)\in V(|u|)$, where $\pi_\ast(\nu)$ stands for the image of $\nu$ via $\pi$. Moreover, the Furstenberg system $(S,X_u,\mathscr{B}(X_u),\nu)$ is an extension of $(S,X_{|u|},\mathscr{B}(X_{|u|}),\pi_\ast(\nu))$. In particular, if $V(|u|)$ is a singleton ($|u|$ is a generic point), then all Furstenberg systems of $u$ have $(S,X_{|u|},\mathscr{B}(X_{|u|}),\nu)$ (where $\nu$ is the unique member of $V(|u|)$) as their factor. \paragraph{Arithmetic function} A sequence of complex numbers is usually denoted by $u=(u_n)$. But if such a sequence is, in some sense, important from number theory point of view, one speaks about an {\em arithmetic function} and rather writes $\boldsymbol{u}\colon \mathbb{N}\to \mathbb{C}$, $\boldsymbol{u}=(\boldsymbol{u}(n))$. An arithmetic function $\boldsymbol{u}$ is said to be {\em multiplicative}, whenever $\boldsymbol{u}(1)=1$ and $\boldsymbol{u}(m\cdot n)=\boldsymbol{u}(m)\cdot \boldsymbol{u}(n)$ for any choice of coprime $m,n\in \mathbb{N}$. The prominent examples of multiplicative functions are the M\"obius function $\boldsymbol{\mu}$ and the Liouville function $\boldsymbol{\lambda}$. The M\"obius function $\boldsymbol{\mu}\colon\mathbb{N}\to\{-1,0,1\}$ is defined by $\boldsymbol{\mu}(p_1\ldots p_k)=(-1)^k$ for different prime numbers $p_1,\ldots,p_k$ (in what follows, the set of primes is denoted by $\mathbb{P}$), $\boldsymbol{\mu}(1)=1$ and $\boldsymbol{\mu}(n)=0$ for all non-square-free numbers. The Liouville function $\boldsymbol{\lambda}\colon\mathbb{N}\to\{-1,1\}$ is given by $\boldsymbol{\lambda}(n)=(-1)^{i_1+\ldots+i_k}$ for $n=p_1^{i_1}\ldots p_k^{i_k}$ with $p_1,\ldots,p_k\in\mathbb{P}$ and $i_1,\ldots,i_k\in\mathbb{N}$. Clearly $\boldsymbol{\mu}=\boldsymbol{\lambda}\cdot\boldsymbol{\mu}^2$, where $\boldsymbol{\mu}^2$ is nothing but the characteristic function of the set $\mathscr{S}$ of square-free numbers. In fact, $\boldsymbol{\lambda}$ is completely multiplicative, i.e.\ $\boldsymbol{\lambda}(m\cdot n)=\boldsymbol{\lambda}(m)\cdot \boldsymbol{\lambda}(n)$ for any choice of $m,n\in \mathbb{N}$. We extend both, $\boldsymbol{\mu}$ and $\boldsymbol{\lambda}$, to negative coordinates symmetrically. \paragraph{Aperiodicity} We say that $\boldsymbol{u}\colon \mathbb{N}\to \mathbb{C}$ is {\em aperiodic} whenever $\boldsymbol{u}$ has a mean, equal to zero, along each arithmetic progression: $\lim_{N\to \infty}\frac{1}{N}\sum_{n\leq N}\boldsymbol{u}(an+b)=0$. Many classical multiplicative functions are aperiodic, including $\boldsymbol{\mu}$ and $\boldsymbol{\lambda}$. A distance between $\boldsymbol{u},\boldsymbol{v}\colon \mathbb{N}\to\mathbb{U}$ is defined as $$\mathbb{D}(\boldsymbol{u},\boldsymbol{v};N):=\left(\sum_{p\in\mathbb{P},p\leq N}\frac{1-\Re(\boldsymbol{u}(p)\overline{\boldsymbol{v}(p)})}{p} \right)^{1/2}.$$ We say that $\boldsymbol{u}\colon \mathbb{N}\to \mathbb{U}$ is {\em strongly aperiodic}~\cite{MR3435814}, whenever $M(\boldsymbol{u}\cdot \chi; N):=\min_{|t|\leq N}\mathbb{D}(\boldsymbol{u}\cdot \chi,n^{it};N)^2\to \infty$ as $N\to \infty$ for every Dirichlet character $\chi$ (i.e.\ for every periodic, completely multiplicative function). Strong aperiodicity implies aperiodicity. The converse is not in general true~(see Theorem B.1 in~\cite{MR3435814}), but it is true for (bounded) real valued multiplicative functions (see Appendix C in~\cite{MR3435814}). In particular, $\boldsymbol{\mu}$ and $\boldsymbol{\lambda}$ are strongly aperiodic. \paragraph{Orthogonality of sequences} Suppose that one of sequences $(u_n),(v_n)\subset\mathbb{C}$ is of zero {mean}. We say that $(u_n),(v_n)$ are {\em orthogonal}, whenever $\lim_{N\to\infty}\frac{1}{N}\sum_{n\leq N}u_n \overline{v}_n=0$. We say that sequences $(u_n),(v_n)\subset\mathbb{C}$ are {\em orthogonal on short intervals}, whenever $\lim_{K\to\infty}\frac{1}{b_K}\sum_{k\leq K}\left| \sum_{b_k\leq n<b_{k+1}}u_n\overline{v}_n\right|= 0$ for any sequence $(b_k)$ of natural numbers with $b_{k+1}-b_k\to \infty$. Clearly, orthogonality on short intervals implies orthogonality. We can consider $u_n$ being an element of a Banach space, while in the dynamical context one often takes $u_n=f(T^nx_k)$, whenever $n\in [b_k, b_{k+1})$ (with ${x_k}\in X$ and $f\in C(X)$). Moreover, often $(v_n)$ will be in fact a multiplicative function (and then we rather write $(\boldsymbol{v}(n))$). \paragraph{M\"obius orthogonality} We say that a (topological) dynamical system $(T,X)$ is \emph{M\"obius orthogonal} if \begin{equation}\label{defmo} \lim_{N\to\infty}\frac{1}{N}\sum_{n\leq N}f(T^nx)\boldsymbol{\mu}(n)=0 \end{equation} for each $f\in C(X)$ and $x\in X$. \paragraph{Joinings of measure-theoretic dynamical systems} Assume that $(R_i,Z_i,\mathscr{C}_i,\kappa_i)$ is a measure-theoretic dynamical system, $i=1,2$. Each $R_1\times R_2$-invariant measure $\rho$ on $\mathscr{C}_1\otimes\mathscr{C}_2$ projecting on $\kappa_1$ and $\kappa_2$, respectively, is called a {\em joining} of the automorphisms $R_1$ and $R_2$. The set of joinings between $R_1$ and $R_2$ is denoted by $J(R_1,R_2)$ and each $\rho\in J(R_1,R_2)$ yields a (new) measure-theoretic dynamical system $(R_1\times R_2, Z_1\times Z_2,\mathscr{C}_1\otimes\mathscr{C}_2,\rho)$. When $R_1,R_2$ are both ergodic then the set $J^e(R_1,R_2)$ of ergodic joinings between $R_1$ and $R_2$ is non-empty. If $J(R_1,R_2)=\{\kappa_1\otimes\kappa_2\}$ then $R_1$ and $R_2$ are called {\em disjoint} (in the sense of Furstenberg). Let $(T,X)$ be a topological dynamical system. Fix $x\in X$ and let $\boldsymbol{u}$ be an arithmetic function bounded by $1$, i.e.\ $\boldsymbol{u}\colon \mathbb{N}\to \mathbb{U}$. Each accumulation point $\rho$ of $\frac{1}{N}\sum_{n\leq N} \delta_{(T^nx,S^n\boldsymbol{u})}$ is a $(T\times S)$-invariant measure on $X\times \mathbb{U}^\mathbb{Z}$. Obviously, it is a {\em joining} of its projections onto both coordinates. Hence, $\rho$ is a joining of $(T,X,\mathscr{B}(X),\rho|_X)$ with a Furstenberg system of $\boldsymbol{u}$. \paragraph{Nilrotation} Let $G$ be a connected, simply connected nilpotent Lie group and $\Gamma\subset G$ a lattice (a discrete, cocompact subgroup). For any $g_0\in G$ we define $T_{g_0}(g\Gamma):=g_0g\Gamma$. Then the topological system $(T_{g_0},G/\Gamma)$ is called a {\em nilrotation}. \section{Definition of the Subject} Sarnak in 2010 formulated a now celebrated conjecture on the M\"obius orthogonality. It states that each topological dynamical system $(T,X)$ of zero entropy is M\"obius orthogonal, i.e.\ whenever $h_{top}(T,X)=0$, \begin{equation}\label{e1} \lim_{N\to\infty}\frac{1}{N}\sum_{n\leq N}f(T^nx)\boldsymbol{\mu}(n)=0\text{ for each } f\in C(X) \text{ and }x\in X. \end{equation} It has become a bridge between analytic number theory and dynamics. In particular, it keeps stimulating a quick development of disjointness theory in dynamics originated by Furstenberg in 1967, with potential applications, for example in number theory. In this article we will focus on ergodic-theoretic aspects of recent progress in the area. {Unless we need this for a historical reason, we rather avoid describing numerous particular classes of topological systems in which M\"obius disjointness has been shown (an extended bibliography is provided at the end of the article). We concentrate on main ideas, and describe some general results.} \section{Introduction} The article is organized as follows. In Section~\ref{se:chowla} and Section~\ref{se:sarnak} respectively, we discuss Chowla and Sarnak's conjecture from the ergodic-theoretic viewpoint, including dynamical interpretations of purely number-theoretic statements and the main strategies used to attack Sarnak's conjecture. Section~\ref{se:arithm} is a survey of results on Sarnak's conjecture, arranged with respect to the properties of the M\"obius function that come into play in the proof. Finally, in Section~\ref{se:future}, we state some open problems. \section{Chowla Conjecture}\label{se:chowla} \subsection{\eqref{chsfor}} The Chowla conjecture deals with higher order correlations of the M\"obius function and asserts that \begin{equation}\label{chsfor}\tag{{\bf C}} \lim_{N\to \infty}\frac1N\sum_{n\leq N}\boldsymbol{\mu}^{j_0}(n+k_0)\boldsymbol{\mu}^{j_1}(n+k_1)\ldots\boldsymbol{\mu}^{j_r}(n+k_r)=0 \end{equation} whenever $0\leq k_0<\ldots<k_r$, $j_s\in\{1,2\}$ not all equal to 2, $r\geq0$.\footnote{In \cite{Ch}, the Chowla conjecture it is formulated for the Liouville function. We follow \cite{Sa}. For a discussion on an equivalence of the Chowla conjecture with $\boldsymbol{\mu}$ and $\boldsymbol{\lambda}$, see~\cite{MR3821722}. } The above statement can be translated into dynamical language. To see this, consider first $|\boldsymbol{\mu}|=\boldsymbol{\mu}^2$, i.e.\ the characteristic function of the set $\mathscr{S}$ of square-free numbers. As a member of $\{0,1\}^\mathbb{Z}$, it is well-known to be a generic point for {an ergodic measure, namely for} the so-called Mirsky measure $\nu_{\mathscr{S}}$ {(also denoted as $\nu_{\boldsymbol{\mu}^2}$)}, given by the ordinary average frequencies of blocks, see e.g.\ \cite{Ab-Ku-Le-Ru}. \eqref{chsfor} is equivalent to the following: \begin{equation} \begin{minipage}{.8\textwidth \centering the point $\boldsymbol{\mu}\in\{-1,0,1\}^{\mathbb{Z}}$ is generic for the relatively independent extension $\widehat{\nu}_{\mathscr{S}}$ of $\nu_{\mathscr{S}}$, via the natural map $\{-1,0,1\}^{\mathbb{Z}}\ni(x_n)_{n\geq1}{\mapsto}(x^2_n)_{n\geq1}\in\{0,1\}^{\mathbb{Z}}$. \end{minipage} \end{equation} The measure $\widehat{\nu}_{\mathscr{S}}$ is given by the following condition: for each block $C$ over the alphabet $\{-1,0,1\}$, we have $\widehat{\nu}_{\mathscr{S}}(C)=2^{-k}\nu_{\mathscr{S}}(C^2)$, where $C^2$ is obtained from $C$ by taking the square (or, equivalently, {absolute value}) at each coordinate and $k$ is the cardinality of the support of $C$. To see this, we use the following: \begin{Remark} The {span of the} family of continuous functions $\{F^{j_0}\circ S^{k_0} \cdot F^{j_1}\circ S^{k_1}\cdot \ldots\cdot F^{j_\ell}\circ S^{k_\ell} : j_i \geq 0, k_i\in \mathbb{Z}\}$ forms an algebra that distinguishes points. It follows directly from the Stone-Weierstrass theorem that the values of integrals $\int F^{k_1}\circ S^{r_1} \cdot F^{k_2}\circ S^{r_2}\cdot \ldots\cdot F^{k_\ell}\circ S^{r_\ell} \, d\kappa$ determine measure $\kappa$. \end{Remark} Now, it suffices to look at measures $\kappa\in V(\boldsymbol{\mu})$ and compare the value of the integrals $\int F^{k_1}\circ S^{r_1} \cdot F^{k_2}\circ S^{r_2}\cdot \ldots\cdot F^{k_\ell}\circ S^{r_\ell} \, d\kappa$ with the corresponding values of $\int F^{k_1}\circ S^{r_1} \cdot F^{k_2}\circ S^{r_2}\cdot \ldots\cdot F^{k_\ell}\circ S^{r_\ell} \, d\widehat{\nu}_{\mathscr{S}}$. We will also use the fact that \begin{equation}\label{ergmu} \widehat\nu_{\mathscr{S}}\in M^e(S,X_{\boldsymbol{\mu}}). \end{equation} The simplest instance of Chowla conjecture, i.e.\ $\lim_{N\to\infty}\frac{1}{N}\sum_{n\leq N}\boldsymbol{\mu}(n)=0$, is known to hold and it is equivalent to the Prime Number Theorem, as shown by Landau. Moreover, if exactly one of the exponents $j_i$ is odd, then~\eqref{chsfor} also holds (in~\cite{MR3810678} a quantitative version of this fact has been proved). We also have the following conditional result of Frantzikinakis on~\eqref{chsfor}: \begin{Th}[\cite{MR3742396}]\label{t:fr1} Assume that $V(\boldsymbol{\lambda})=\{\kappa\}$ and $\kappa$ is ergodic. Then~\eqref{chsfor} holds for $\boldsymbol{\lambda}$.\footnote{\label{f:nikos} That is, $\kappa$ equals the $\frac12$-Bernoulli measure on $\{-1,1\}^{\mathbb{Z}}$. Cf.\ Theorem~\ref{t:fr1a}.} \end{Th} \subsection{\eqref{chsfor} vs. \eqref{clog}} Analogously, one can study the logarithmic Chowla conjecture~\cite{MR3676413}, asserting that \begin{equation}\tag{{\bf C$_{\log}$}}\label{clog} \lim_{N\to\infty}\frac1{\log N}\sum_{n\leq N}\frac1n\boldsymbol{\mu}^{j_0}(n+k_0)\boldsymbol{\mu}^{j_1}(n+k_1)\ldots\boldsymbol{\mu}^{j_r}(n+k_r)=0, \end{equation} with all parameters as before. Again, this can be translated to the dynamical language, using the notion of a logarithmically generic point. Thus, using \eqref{ergmu},~\eqref{gkl1} and~\eqref{glr1}, we have the following: \begin{Cor}[cf.\ \cite{MR3821718,MR3676413}]\label{wnio} \eqref{clog} implies \eqref{chsfor} along a subsequence of full logarithmic density. \end{Cor} \subsection{More on \eqref{clog}} We begin this section with a conditional result of Frantzikinakis on~\eqref{clog}:\footnote{Cf.\ Footnote~\ref{f:nikos}.} \begin{Th}[\cite{MR3742396}]\label{t:fr1a} Assume that $\kappa\in V^{\log}(\boldsymbol{\lambda})$ is an ergodic measure. Then \eqref{clog} holds for $\boldsymbol{\lambda}$ along the same subsequence for which $\boldsymbol{\lambda}$ is quasi-generic for $\kappa$. \end{Th} The above result is already formulated in the language of (logarithmically) quasi-generic points. We pass now to number-theoretic results, with their ergodic-theoretic consequences. \begin{Th}[\cite{MR3569059}]\label{ch2} \eqref{clog} holds for $r=1$ (for $\boldsymbol{\lambda}$ in place of $\boldsymbol{\mu}$), i.e.\ for each $0\neq h\in\mathbb{Z}$, we have $$ \lim_{N\to\infty}\frac1{\log N}\sum_{n\leq N}\frac{\boldsymbol{\lambda}(n)\boldsymbol{\lambda}(n+h)}n=0. $$ \end{Th} In fact, Tao in~\cite{MR3569059} proves a stronger result, being an instance of logarithmically averaged Elliott conjecture. One of the consequences is that the analogue of Theorem~\ref{ch2} holds for $\boldsymbol{\mu}$: \begin{Cor}[\cite{MR3569059}]\label{mob2} \eqref{clog} holds for $r=1$, i.e.\ for each $0\neq h\in\mathbb{Z}$, we have $$ \lim_{N\to\infty}\frac1{\log N}\sum_{n\leq N}\frac{\boldsymbol{\mu}(n)\boldsymbol{\mu}(n+h)}n=0. $$ \end{Cor} This, in turn, has the following interpretation in terms of spectral measures: \begin{Cor}[\cite{MR3821717}]\label{lebesgue} For each logarithmic Furstenberg system $(S,X_{\boldsymbol{\mu}},\mathscr{B}(X_{\boldsymbol{\mu}}),\kappa)$ of $\boldsymbol{\mu}$, the spectral measure $\sigma_F$ of $F$ is Lebesgue. The same holds for $\kappa\in V^{\log}(\boldsymbol{\lambda})$. \end{Cor} Moreover, we have the following: \begin{Th}[\cite{MR3992031,MR3938639}]\label{odd corr} \eqref{clog} holds for odd order correlations, i.e.\ for all even values of $r$. \end{Th} Again, we want to interpret this from the dynamical viewpoint: \begin{Cor} For each logarithmic Furstenberg system $(S,X_{\boldsymbol{\mu}},\mathscr{B}(X_{\boldsymbol{\mu}}),\kappa)$, the element $Ry:=-y$ preserves measure $\kappa$ and commutes with the shift $S$, hence $R$ belongs to the centralizer of $S$. \end{Cor} To see that the above statement is true, it suffices to take $\kappa\in V^{\log}(\boldsymbol{\mu})$ and check that we also have $\kappa\in V^{\log}(-\boldsymbol{\mu})$. For this, one uses the family of continuous functions $\{F^{j_0}\circ S^{k_0} \cdot F^{j_1}\circ S^{k_1}\cdot \ldots\cdot F^{j_\ell}\circ S^{k_\ell} : j_i \geq 0, k_i\in \mathbb{Z}\}$. Then, for even correlations the desired equalities are obvious and for odd correlations one applies Theorem~\ref{odd corr}. Finally, as a consequence {of Corollary~\ref{wnio}}, we have the following: \begin{Cor}[\cite{MR3821718}] Suppose that \eqref{clog} holds. Then $\hat{\nu}_{\boldsymbol{\mu}^2}\in V(\boldsymbol{\mu})$. \end{Cor} \subsection{Averaged~\eqref{chsfor}} Also, an averaged version of Chowla conjecture is present in the literature. As we will see later, this form of averaging will play a special role from the point of view of Sarnak's program, under the name of convergence on short intervals. Matom\"aki, Radziwi\l\l \ and Tao showed the following result on the order two correlations of $\boldsymbol{\mu}$: \begin{Th}[\cite{MR3435814}]\label{mrt} We have $$ \lim_{\substack{M,H\to \infty\\ \text{ with }H=o(M)}}\frac{1}{HM}\sum_{h\leq H}\left|\sum_{m\leq M}\boldsymbol{\mu}(m)\boldsymbol{\mu}(m+h)\right|=0. $$ \end{Th} As a consequence of the above, we have (cf.\ Corollary~\ref{lebesgue}): \begin{Cor}[\cite{MR3821717}]\label{con1} For each Furstenberg system $(S,X_{\boldsymbol{\mu}},\mathscr{B}(X_{\boldsymbol{\mu}}),\kappa)$, the spectral measure $\sigma_F$ of $F$ is continuous. \end{Cor} \subsection{\eqref{chsfor} vs. other conjectures}\label{ell} Chowla conjecture is thought of as a multiplicative analogue of the twin primes problem. Indeed, Twin Primes Conjecture in its quantitative form expects that $\sum_{n\leq N}\boldsymbol\Lambda(n)\boldsymbol\Lambda(n+2)=(2\prod_{\mathbb{P}\ni p\geq 3}(1-\frac1{(p-1)^2}))N+{\rm o}(N)$, where $\boldsymbol\Lambda$ is the von Mangoldt function.\footnote{We have $\boldsymbol\Lambda(p^k)=\log p$ for each prime $p$ and $k\geq1$, and $\boldsymbol\Lambda$ vanishes at all other values of $n$. This function is a good approximation of $\mathbbm{1}_{\mathbb{P}}$ and it is {\bf not} a multiplicative function.} Moreover, Chowla conjecture is a special instance of Elliott conjecture on correlations of multiplicative functions. Let $\boldsymbol{u}_0,\dots, \boldsymbol{u}_k\colon \mathbb{N}\to \mathbb{U}$ be multiplicative. Elliott conjecture, in a corrected form given in~\cite{MR3435814}, asserts that $$ \lim_{N\to\infty}\frac{1}{N}\sum_{n\leq N}\boldsymbol{u}_0(n+k_0)\boldsymbol{u}_1(n+k_1)\dots \boldsymbol{u}_k(n+k_r)=0, $$ if for some $0\leq j\leq k$, $\boldsymbol{u}_j$ is strongly aperiodic. This conjecture was stated first in~\cite{MR1222182,MR1292619} and in its original version turned out to be false, see \cite{MR3435814} for details. Also, a logarithmically averaged version of Elliott conjecture appears in the literature. For the details, see~\cite{MR4039498,MR3992031} and references therein. \section{Sarnak's Conjecture}\label{se:sarnak} \subsection{\eqref{sar}} Sarnak's Conjecture from 2010 states that \begin{equation}\tag{{\bf S}}\label{sar} \begin{minipage}{.9\textwidth \centering each (topological) zero entropy system $(T,X)$ is M\"obius orthogonal, i.e.\ satisfies~\eqref{defmo} for all $f\in C(X)$ and $x\in X$. \end{minipage} \end{equation} As zero entropy expresses the fact that the system is ``deterministic'' (or of ``low complexity''), Sarnak's conjecture captures our expectation that prime numbers behave globally as a random sequence, or, more precisely, that they cannot be predicted by a low-complexity object. One can relax the entropy assumptions on~$T$ in Sarnak's conjecture in the following way: \begin{Th}[\cite{Ab-Ku-Le-Ru}] \eqref{sar} is equivalent to M\"obius orthogonality for each topological dynamical system $(T,X)$, each continuous function $f\in C(X)$ and each completely deterministic point $x\in X$ (i.e.\ \eqref{defmo} holds at $x$ for all $f\in C(X)$). \end{Th} \begin{Remark} The difficulty in Sarnak's conjecture comes from the requirement ``for all $x\in X$''. An a.e.\ version of M\"obius orthogonality is true for {\bf all} dynamical systems. The proof makes use of Davenport's estimate~\eqref{daven} (below), see~\cite{Sa,Ab-Ku-Le-Ru}. \end{Remark} Even though Sarnak's Conjecture is defined in terms of topological dynamics, it can be translated to ergodic-theoretic language. Namely $$ \frac{1}{N}\sum_{n\leq N}f(T^nx)\boldsymbol{\mu}(n)=\int_{X\times X_{\boldsymbol{\mu}}} f\otimes F\, d\left(\frac{1}{N}\sum_{n\leq N}\delta_{(T^nx,S^n\boldsymbol{\mu})}\right). $$ Thus, we need to study the properties of joinings given by the limit points of $\frac1N\sum_{n\leq N}\delta_{(T^nx,S^n\boldsymbol{\mu})}$. The simplest case where \eqref{sar} is known to hold is the one-point dynamical system: $\lim_{N\to \infty}\frac{1}{N}\sum_{n\leq N}\boldsymbol{\mu}(n)=0$ is equivalent to the Prime Number Theorem. \eqref{sar} for rotations on finite groups is equivalent to the Dirichlet's Prime Number Theorem. For irrational rotations,~\eqref{sar} follows from an old (quantitative) result of Davenport~\cite{Da}: for an arbitrary $A>0$, \begin{equation}\label{daven} \max_{t \in \mathbb{T}}\left|\displaystyle\sum_{n \leq N}e^{2\pi int}{\boldsymbol{\mu}}(n)\right|\leq C_A\frac{N}{\log^{A}N}\text{ for some }C_A>0\text{ and all }N\geq 2. \end{equation} As we will see later, an important role in the research around Sarnak's conjecture is played by nil-systems. Green and Tao obtained the following quantitative version of \eqref{sar}: \begin{Th}[\cite{Gr-Ta}] Let $G$ be a simply-connected nilpotent Lie group with a discrete and cocompact subgroup $\Gamma$. Let $p \colon \mathbb{Z} \to G$ be any its polynomial sequence\footnote{I.e.\ $p(n)=a_1^{p_1(n)}\ldots a_k^{p_k(n)}$, where $p_j\colon\mathbb{N}\to\mathbb{N}$ is a polynomial, $j=1,\ldots,k$.} and $f\colon G/\Gamma\to \mathbb{R}$ a Lipschitz function. Then $$\left|\sum_{n\leq N} f(p(n)\Gamma)\boldsymbol{\mu}(n)\right|={\rm O}_{f,G,\Gamma,A}\left(\frac N{\log^AN}\right)$$ for all $A > 0$. \end{Th} In particular, all nilrotations are M\"obius orthogonal. \subsection{\eqref{sar} vs. \eqref{chsfor}} Sarnak's Conjecture was originally mainly motivated by Chowla conjecture; we have the following result: \begin{Th}\label{ChtS} \eqref{chsfor} implies \eqref{sar}. \end{Th} Theorem~\ref{ChtS} is already stated in~\cite{Sa}. In fact, it is a purely ergodic theory claim: we have already noticed that both conjectures have their ergodic theory reformulation and a joining proof of Theorem~\ref{ChtS} can be found in \cite{Ab-Ku-Le-Ru}. The main idea is the following: suppose that $\frac1{N_k}\sum_{n\leq N_k}\delta_{(T^nx,S^n\boldsymbol{\mu})}\to \rho$. The projection of this joining onto $X$ is a zero entropy measure $\kappa$, whereas the projection onto $X_{\boldsymbol{\mu}}$ equals $\widehat{\nu}_\mathscr{S}$ by Chowla conjecture. Moreover, $(S,X_{\boldsymbol{\mu}},\widehat{\nu}_\mathscr{S})$ has the property of being relative Kolmogorov with respect to its factor $(S,X_{\boldsymbol{\mu}^2},{\nu}_\mathscr{S})$. On the other hand, the restriction of $\rho$ to $X\times X_{\boldsymbol{\mu}^2}$ is of relative zero entropy over $X_{\boldsymbol{\mu}^2}$. This yields relative disjointness of $(S,X_{\boldsymbol{\mu}},\widehat{\nu}_\mathscr{S})$ and $(T\times S, X\times {X_{\boldsymbol{\mu}^2}},\rho|_{X\times {X_{\boldsymbol{\mu}^2}}})$ over their common factor $(S,X_{\boldsymbol{\mu}^2},\nu_{\mathscr{S}})$. To complete the proof, we use the orthogonality of $F$ to $L^2(X_{\boldsymbol{\mu}^2},\nu_\mathscr{S})$. \begin{Remark} It still remains open whether \eqref{sar} implies \eqref{chsfor}, see however Remark~\ref{tauw}. \end{Remark} In~\cite{HUANG2019827}, M\"obius orthogonality for low complexity systems is discussed. Following~\cite{Ferenczi_1997}, we say that the measure-complexity of $\mu\in M(T,X)$ is weaker than $a=(a_n)_{n\geq 1}$ if $$ \liminf_{n\to\infty}\frac{\min\{m\geq1: \mu(\bigcup_{j=1}^nB_{d_n}(x_i,\varepsilon))>1-\varepsilon\text{ for some } x_1,\ldots,x_m\in X\}}{a_n}=0 $$ for each $\varepsilon>0$ (here $d_n(y,z)=\frac1n\sum_{j=1}^nd(T^jy,T^jz)$). \begin{Th}[\cite{HUANG2019827}] Suppose that \eqref{chsfor} holds for correlations of order 2 (i.e.\ for $r=1$). Then $(T,X)$ is M\"obius {orthogonal} whenever all invariant measures for $(T,X)$ are of complexity weaker than $n$. \end{Th} To obtain a non-conditional result, Huang, Wang and Ye use a difficult estimate of Matom\"aki, Radziwiłł and Tao (namely, ``Truncated Elliott on the average'', applied to $\boldsymbol{\mu}$) from~\cite{MR3435814}. The cost to be paid is a further strengthening of the assumptions on the complexity of $(T,X)$. \begin{Th}[\cite{HUANG2019827}] Suppose that all invariant measures of $(T,X)$ are of sub-polynomial complexity, i.e.\ their complexity is weaker than $(n^\tau)_{n\geq 1}$ for each $\tau>0$. Then $(T,X)$ is M\"obius orthogonal. \end{Th} See~\cite{Huang:aa} for the most recent application of this result. Finally, let us point out a consequence of the result on correlations of $\boldsymbol{\mu}$ of order~2. Directly from Corollary~\ref{con1}, we have: \begin{Cor}\label{AA} All topological dynamical systems whose all invariant measures yield systems with discrete spectrum are M\"obius orthogonal.\footnote{In the uniquely ergodic case, an earlier and independent proof of this fact was given by Huang, Wang and Zhang~\cite{MR3959363} (for the totally uniquely ergodic case, see~\cite{Ab-Le-Ru2}). The result also follows from~\cite{HUANG2019827}.} \end{Cor} \subsection{Strong MOMO property}\label{smomo} Given an arithmetic function $\boldsymbol{u}$, following~\cite{MR3874857}, we say that $(X,T)$ satisfies the \emph{strong $\boldsymbol{u}$-OMO property} if, for any increasing sequence of integers $0=b_0<b_1<b_2<\cdots$ with $b_{k+1}-b_k\to\infty$, for any sequence $(x_k)$ of points in $X$, and any $f\in C(X)$, we have \begin{equation} \label{eq:defMOMOSI} \frac{1}{b_{K}} \sum_{k< K} \left|\sum_{b_k\le n<b_{k+1}} f(T^{n-b_k}x_k) \boldsymbol{u}(n)\right| \tend{K}{\infty} 0. \end{equation} If $\boldsymbol{u}=\boldsymbol{\mu}$ we speak about the strong MOMO\footnote{The acronym MOMO stands for M\"obius Orthogonality of Moving Orbits.} property. Strong MOMO property was introduced in~\cite{MR3874857} to deal with M\"obius orthogonality of uniquely ergodic models of a given measure-theoretic dynamical system. Moreover, we have: \begin{Th}[\cite{MR3874857}] \label{th:3wki} The following conditions are equivalent: \begin{enumerate}[(i)] \item All zero entropy systems are M\"obius orthogonal, i.e.\ Sarnak's conjecture holds. \item For each zero entropy system $(T,X)$, we have $\lim_{N\to\infty}\frac1N\sum_{n\leq N}f(T^nx)\boldsymbol{\mu}(n)=0$ when $N\to\infty$, for each $f\in C(X)$, uniformly in $x\in X$, i.e.\ uniform Sarnak's conjecture holds. \item All zero entropy systems enjoy the strong MOMO property. \end{enumerate} \end{Th} By taking $f=1$, we obtain that strong $\boldsymbol{u}$-OMO implies the following: \begin{equation}\label{eq:Mobius-like} \frac{1}{b_{K}} \sum_{k< K} \left|\sum_{b_k\le n<b_{k+1}} \boldsymbol{u}(n)\right| \tend{K}{\infty} 0 \end{equation} for every sequence $0=b_0<b_1<b_2<\cdots$ with $b_{k+1}-b_k\to\infty$. In particular, $\frac1N\sum_{n\leq N}\boldsymbol{u}(n)\tend{N}{\infty}0$. In a similar way (by considering finite rotations), one can deduce $\frac1N\sum_{n\leq N}\boldsymbol{u}(an+b)\tend{N}{\infty}0$. Thus,~\eqref{eq:defMOMOSI} can be seen as a form of aperiodicity. A further analysis reveals that, in fact, we deal with a special behaviour of $\boldsymbol{u}$ on a typical short interval. All strongly aperiodic multiplicative functions satisfy~\eqref{eq:Mobius-like} (this follows from Theorem~A.1~\cite{MR3435814}), hence condition~\eqref{eq:Mobius-like} is satisfied both for $\boldsymbol{\mu}$ and~$\boldsymbol{\lambda}$, cf.\ Section~\ref{szort}. Recently, in~\cite{Gomilko:ab}, the strong $\boldsymbol{u}$-OMO property was rephrased in the language of functional analysis; and it is equivalent to $$ \lim_{K\to \infty}\frac{1}{b_{K+1}}\sum_{k\leq K}\left\| \sum_{b_k\leq n<b_{k+1}}\boldsymbol{u}(n)f\circ T^n\right\|_{C(X)}=0 \text{ for all }f\in C(X),\;(b_k)\text{ as above}. $$ Usefulness of the strong MOMO concept is seen in the following result: \begin{Prop}[\cite{MR3874857}]\label{p:sMOMO} If $(R,Z,\mathcal{D},\kappa)$ is an ergodic (measure-theoretic) dynamical system and $(T,X)$ is its uniquely ergodic model satisfying the strong MOMO property then {\bf all} uniquely ergodic models of $(R,Z,\mathcal{D},\kappa)$ are M\"obius orthogonal. In fact, the strong MOMO holds in all of them. \end{Prop} \subsection{M\"obius orthogonality of positive entropy systems} If we take a positive entropy system $(T,X)$, it is natural to expect that it is not M\"obius orthogonal. Indeed, trivially, the full shift on $\{0,1\}^{\mathbb{Z}}$ is not, and more generally subshifts of finite type are not, see~\cite{Kar2}. One can also show that the subshift $(S,X_{\boldsymbol{\mu}^2})$ (which is of positive entropy, see~\cite{MR3430278}) is not M\"obius orthogonal, despite the fact that $\boldsymbol{\mu}^2$ itself is a completely deterministic point and M\"obius orthogonality holds at it: $\lim_{N\to\infty}\frac1N\sum_{n\leq N}f(S^n\boldsymbol{\mu}^2)\boldsymbol{\mu}(n)=0$ for each $f\in C(X_{\boldsymbol{\mu}^2})$, see~\cite{MR3821717}. However, Sarnak's conjecture does not exclude a possibility that {\bf some} positive entropy system is also M\"obius orthogonal.\footnote{It is mentioned in~\cite{Sa} that Bourgain (unpublished) had such a construction.} Downarowicz and Serafin proved the following general result: \begin{Th}[\cite{MR3961705}]\label{dose1} Fix an integer $N\geq2$. Let $\boldsymbol{u}$ be any bounded, real, aperiodic sequence. Then, there exists a subshift $(S,X)$ over $N$ symbols of entropy arbitrarily close to $\log N$, uncorrelated to $\boldsymbol{u}$: $\lim_{N\to\infty}\frac1N\sum_{n\leq N}f(S^nx)\boldsymbol{u}(n)=0$ for each $f\in C(X)$ and $x\in X$.\end{Th} Even more surprisingly, they proved a uniform version of the above result: \begin{Th}[\cite{Do-Se1902.04162}] \label{dose2} Under the same assumption on $\boldsymbol{u}$, given $N\geq2$, there exists a strictly ergodic subshift over $N$ symbols, of entropy arbitrarily close to $\log N$, {\bf uniformly} uncorrelated to $\boldsymbol{u}$. \end{Th} Realizing that, one might be anxious what finally is the class of systems which are M\"obius orthogonal, and in particular, why zero entropy should play a special role. As we will see however, positive entropy systems are not expected to enjoy the strong MOMO property, cf.\ Theorem~\ref{th:3wki}. Indeed, the following has been proved in~\cite{MR3874857}:\footnote{This result has been proved in \cite{MR3874857} for the Liouville function but it can also be proved for $\boldsymbol{\mu}$.} \begin{Th}[\cite{MR3874857}]\label{th:MOMOzero} Let $\boldsymbol{u}\in\{-1,0,1\}^{\mathbb{Z}}$ be a generic point for the measure $\widehat{\nu}_{\boldsymbol{\mu}^2}$. Then the following conditions are equivalent: \begin{enumerate}[(i)] \item $(T,X)$ satisfies strong $\boldsymbol{u}$-OMO property. \item $(T,X)$ is of zero entropy. \end{enumerate} \end{Th} As an immediate consequence, we have the following: \begin{Cor}\label{SMposent} If Chowla conjecture holds then a system $(T,X)$ has the strong MOMO property if and only if it has zero entropy.\end{Cor} \subsection{\eqref{slog} vs.\ \eqref{clog}} The logarithmic version of Sarnak's conjecture was formulated in~\cite{MR3569059} along with \eqref{clog} and it postulates that \begin{equation}\tag{{\bf S$_{\log}$}}\label{slog} \lim_{N\to\infty}\frac1{\log N}\sum_{n\leq N}\frac1n f(T^nx)\boldsymbol{\mu}(n)=0 \end{equation} (with all parameters as in~\eqref{e1}). In~\cite{MR3676413}, Tao showed the following: \begin{Th}\label{SClog} \eqref{slog} is equivalent to \eqref{clog}. \end{Th} \begin{Remark}\label{tauw} Combining Theorem~\ref{SClog} with Corollary~\ref{wnio}, we obtain that \eqref{slog} implies \eqref{chsfor} along a subsequence of logarithmic density 1. In particular, ({\bf S}) implies \eqref{chsfor} along a subsequence of full logarithmic density. \end{Remark} Let us here recall one more ``logarithmic conjecture'' from~\cite{MR3676413} which confirms a special role played by nil-systems in dynamics. Let $(T_{g_0},G/\Gamma)$ be a nilrotation. Let $f\in C(G/\Gamma)$ be Lipschitz continuous and $x_0\in G$. Then (for $H\leq N$ with $H\to\infty$) \begin{equation}\label{Slognil}\tag{{\bf S}$_{\log}^{\text{nil}}$} \sum_{n\leq N}\frac{\sup_{g\in G}\left|\sum_{h\leq H}f(T_g^{h+n}(x_0\Gamma))\boldsymbol{\mu}(n+h)\right|}n={\rm o}(H\log N). \end{equation} \begin{Th}[\cite{MR3676413}] \eqref{Slognil} is equivalent to \eqref{slog} (and \eqref{clog}). \end{Th} Finally, as a consequence of the result on logarithmic correlations of $\boldsymbol{\mu}$ of order~2 (using Corollary~\ref{lebesgue}), we obtain: \begin{Cor} All topological dynamical systems whose all invariant measures yield systems with singular spectrum are logarithmically M\"obius orthogonal. \end{Cor} In general, we do not know if we can replace ``all invariant measures'' with ``all ergodic invariant measures'' in the above corollary (the same applies to Corollary~\ref{AA}). This replacement is possible however, when there are only countably many ergodic invariant measures, cf.\ the discussion in~\cite{MR3779960} in Section~\ref{fusy}. \subsection{\eqref{sar} vs.\ \eqref{slog}} Clearly, \eqref{sar} implies \eqref{slog}. As for the other direction, we have the following: \begin{Th}[\cite{Gomilko:ab}] Suppose that \eqref{slog} holds. Then there exists a sequence of logarithmic density 1, along which \eqref{sar} holds for all zero entropy topological dynamical systems. \end{Th} The idea of the proof is to use Theorem~\ref{SClog}, Remark~\ref{tauw} and then repeat the arguments from the proof of Theorem~\ref{ChtS}. Notice that the sequence of logarithmic density~1 in the above result is universal for all zero entropy systems. In~\cite{Gomilko:ab}, one more version of M\"obius orthoginality is studied, namely so-called {\em logarithmic strong MOMO property} (cf.\ Section~\ref{smomo}): $$ \lim_{K\to\infty}\frac{1}{\log b_{K+1}}\sum_{k\leq K}\left\|\sum_{b_k\leq n<b_{k+1}}\frac{\boldsymbol{\mu}(n)}{n} f\circ T^n \right\|_{C(X)}=0. $$ Equivalently, for all increasing sequences $(b_k)\subset \mathbb{N}$ with $b_{k+1}-b_k\to\infty$, all $(x_k)\subset X$ and $f\in C(X)$, $$ \lim_{K\to \infty}\frac{1}{\log b_{K+1}}\sum_{k\leq K}\left|\sum_{b_k\leq n<b_{k+1}}\frac{1}{n}f(T^{n-b_k}x_k)\boldsymbol{\mu}(n) \right|=0. $$ \begin{Th}[\cite{Gomilko:ab}] Assume that a topological system $(T,X)$ satisfies the logarithmic strong MOMO property. Then there exists a sequence $A=A(T,X)\subset \mathbb{N}$ with full logarithmic density such that, for each $f\in C(X)$, $$ \lim_{A\ni N\to\infty}\left\| \frac1N\sum_{n\leq N}\boldsymbol{\mu}(n)f\circ T^n\right\|_{C(X)}=0. $$ In particular, M\"obius orthogonality holds along a subsequence of full logarithmic density. \end{Th} \begin{Remark} In \cite{Gomilko:ab}, using \cite{MR3779960}, it is proved that each system $(T,X)$ for which $M^e(T,X)$ is countable satisfies the logarithmic strong MOMO property, hence, for each such system Sarnak's conjecture holds in (logarithmic) density, cf.\ Theorem~\ref{HF}.\end{Remark} \subsection{Strategies}\label{se:strategie} The first years of activity around Sarnak's conjecture were devoted to proving M\"obius orthogonality in {\bf selected classes} of zero entropy dynamical systems. While this proved fruitful, and often some brilliant arguments were found ad hoc, with a strong dependence on the class under consideration, it quickly became clear that it won't be sufficient. Two main strategies to attack~\eqref{sar} arose: \paragraph{A} The first strategy is to look for some additional intrinsic structure in zero entropy systems that could be used to prove orthoginality from $\boldsymbol{\mu}$, namely {\bf internal disjointness}. Here, a priori, one does not use any other property of $\boldsymbol{\mu}$ than multiplicativity and boundedness. \paragraph{B} As we have seen, \eqref{sar} is intimately related to \eqref{chsfor}, and therefore one cannot expect to confirm~\eqref{sar} without using further {\bf number-theoretic properties of $\boldsymbol{\mu}$}. This directs attention to {\bf aperiodicity} and behaviour on so-called {\bf short intervals}. It extends further to studying {\bf Furstenberg systems of $\boldsymbol{\mu}$} (including the logarithmic ones) and trying to interpret arithmetic properties of $\boldsymbol{\mu}$ as ergodic properties of the corresponding dynamical systems. One can hope finally to deduce (some kind of) Furstenberg (!) disjointness of Furstenberg systems of $\boldsymbol{\mu}$ with a wide subclass of zero entropy systems (hopefully, with all such systems). As we will see, these two approaches often intertwine, proving once again that number theory and ergodic theory should not be studied separately from each other. \section{Arithmetic properties of the M\"obius function}\label{se:arithm} \subsection{Multiplicativity} \paragraph{Internal disjointness} Joinings (introduced in a seminal paper of Furstenberg~\cite{Fu}) have been present in ergodic theory for over 50 years. Disjointness (absence of non-trivial joinings), as a form of an extremal non-isomorphism and a measure-theoretic invariant, has always played a crucial role in classification problems.\footnote{Recall also that different powers for a typical automorphism of a standard Borel space are pairwise disjoint~\cite{Ju}. See also more recent~\cite{Kanigowski_2020}.} It appeared however in many other contexts, including homogenous dynamics, with applications in number theory. Sarnak's conjecture gave yet a new impetus, in particular for studying (approximate) disjointness for different sub-actions. A basic method to prove orthogonality with a multiplicative function comes from the Multiplicative Orthogonality Criterion (MOC): \begin{Th}[\cite{Ka,Bo-Sa-Zi}]\label{t:kbsz} Assume that $(f_n)\subset\mathbb{C}$ is a bounded sequence. Assume that for all (sufficiently large) prime numbers $p\neq q$, \begin{equation}\label{kbsz1} \lim_{N\to\infty}\frac1N\sum_{n\leq N}f_{pn}\overline{f}_{qn}=0. \end{equation} Then, for each bounded multiplicative function $\boldsymbol{u}$, we have $ \lim_{N\to\infty}\frac1N\sum_{n\leq N}f_n\boldsymbol{u}(n)=0$. In particular, $(f_n)$ is M\"obius orthogonal. \end{Th} \begin{Remark} Notice that Theorem~\ref{t:kbsz} does not require from $\boldsymbol{u}$ anything but multiplicativity and boundedness. \end{Remark} In the dynamical context $(T,X)$ the simplest way to use Theorem~\ref{t:kbsz} is to take $f_n=f(T^nx)$. In this form, MOC appeared for the first time in~\cite{Bo-Sa-Zi} and was used to prove that the horocycle flows are M\"obius orthogonal. To see how MOC is used and how it is related to Furstenberg's disjointness theory~\cite{Fu}, assume that $M(T,X)=\{\mu\}$, $\int_Xf\,d\mu=0$, and the corresponding measure-theoretic system is totally ergodic. Then, any measure $\rho\in V((x,x))$ (considered in the topological dynamical system $(T^p\times T^q,X\times X)$) is a joining of $T^p$ and $T^q$. If we now assume that $(T^p,X,\mu)$ and $(T^q,X,\mu)$ are disjoint for sufficiently large primes $p\neq q$ then $\rho=\mu\otimes\mu$ and, as a result, the limit in~\eqref{kbsz1} equals $\int_{X\times X}f\otimes\overline{f}\,d\rho=0$, i.e.\ the assumptions of MOC are satisfied. In general, a use of MOC is not that simple. Consider an irrational rotation $Tx=x+\alpha$ on the circle $X=\mathbb{R}/\mathbb{Z}$. To see that~\eqref{kbsz1} holds for all characters, one uses the Weyl criterion on uniform distribution. However, there are continuous zero mean functions for which~\eqref{kbsz1} fails~\cite{Ku-Le}, which shows clearly, that in general we can only expect~\eqref{kbsz1} to hold for a linearly dense set of continuous functions. In some cases, MOC cannot be applied directly (e.g.\ when the systems under consideration fail to be weakly mixing) and the spectral approach can help. Examples can be found in~\cite{Ab-Le-Ru,Bo1,Ab-Ka-Le}. \paragraph{AOP property} The following ergodic counterpart of MOC was developed in~\cite{Ab-Le-Ru2}: an ergodic automorphism $(T,X,\mathscr{B},\mu)$ is said to have {\em asymptotically orthogonal powers} (AOP) if for each $f,g\in L^2_0(X,\mathscr{B},\mu)$, we have \begin{equation}\label{momoe4} \lim_{\mathbb{P}\ni p,q\to\infty, p\neq q} \sup_{\kappa\in J^e(T^p,T^q)}\left|\int_{X\times X} f\otimes g\,d\kappa\right|=0. \end{equation} Clearly, if the powers of $T$ are pairwise disjoint then $T$ enjoys the AOP property. However, this condition is not necessary, the powers of $T$ having AOP property may even be isomorphic. Moreover, AOP implies total ergodicity and zero entropy~\cite{Ab-Le-Ru2}. The relation between strong MOMO and AOP properties is described by the following result: \begin{Th}[\cite{Ab-Le-Ru2,Ab-Ku-Le-Ru}]\label{thmB} Let $\boldsymbol{u}$ be a bounded multiplicative function. Suppose that $(R,Z,\mathscr{C},\kappa)$ satisfies AOP. Then the following are equivalent: \begin{itemize} \item $\boldsymbol{u}$ satisfies \eqref{eq:Mobius-like}; \item The strong $\boldsymbol{u}$-OMO property is satisfied in each uniquely ergodic model $(T,X)$ of $R$. \end{itemize} In particular, if the above holds, for each $f\in C(X)$, we have $$ \frac1N\sum_{n\leq N}f(T^nx)\boldsymbol{u}(n)\tend{N}{\infty} 0 \text{ uniformly on } X. $$ \end{Th} \subsection{Aperiodicity} As all periodic sequences are orthogonal to $\boldsymbol{\mu}$, one can expect that sequences with some properties similar to periodicity will also be M\"obius orthogonal. Notice also that M\"obius orthogonality of periodic sequences (\eqref{sar} for rotations on finite groups) corresponds to $\boldsymbol{\mu}$ being aperiodic. This is the simplest situation where some additional properties of $\boldsymbol{\mu}$ (other than multiplicativity) begin to play a significant role. \paragraph{Zero entropy continuous interval maps} In~\cite{Kar1}, \eqref{sar} for zero entropy continuous intervals maps and orientation-preserving circle homeomorphisms is established. The starting point for developing the main tools is the result of Davenport~\eqref{daven}, which shows clearly that the examples under consideration are indeed ``relatives'' of irrational rotations. Additionally, in order to treat the case of interval maps one studies $\omega$-limit sets and it turns out that, in fact, one deals with an odometer. \paragraph{Synchronized automata} In~\cite{De-Dr-Mu}, Deshouillers, Drmota and M\"ullner prove that~\eqref{sar} is true for automatic sequences generated by synchronizing automata (the inputs are read with the most significant digit first). In fact, they prove orthogonality of such sequences from any bounded function $\boldsymbol{u}$ that is aperiodic. \paragraph{Almost periodic sequences} We say that a sequence is {\em Weyl rationally almost periodic} (WRAP) whenever it can be approximated arbitrarily well by periodic sequences in Weyl pseudo-metric $d_W$ given by $d_W(x,y)=\limsup_{N\to\infty}\sup_{\ell\geq 1}\frac1N|\{ \ell \leq n\leq\ell+N : x(n)\neq y(n)\}|$. It is proved in~\cite{MR3989121} that each subshift $(S,X_x)$ given by a Weyl almost periodic sequence is M\"obius orthogonal (in fact, we have orthogonality to any bounded aperiodic arithmetic function $\boldsymbol{u}$). \subsection{Behaviour on short intervals}\label{szort} During the last four years, an enormous progress concerning the short interval behavior of strongly aperiodic multiplicative functions has been made due to the breakthrough result of Matom\"aki and Radziwi\l\l. Their main result of~\cite{Ma-Ra}, for $\boldsymbol{\mu}$, in its simplified form can be written as \begin{equation}\label{condmu} \lim_{\substack{M,H\to \infty\\ \text{ with }H=o(M)}}\frac1M\sum_{1\leq m\leq M}\frac1H\left| \sum_{m\leq h<m+H}\boldsymbol{\mu}(h)\right|=0. \end{equation} This gave an impetus to study convergence on short intervals in ergodic theory and it has become a new, crucial player from the point of view of Sarnak's conjecture. Condition~\eqref{condmu} can be also reformulated in the following way: for each $(b_n)\subset\mathbb{N}$ with $b_{n+1}-b_n\to\infty$, we have \begin{equation}\label{12} \lim_{K\to\infty}\frac{1}{b_{K+1}}\sum_{k\leq K}\left|\sum_{b_k\leq n<b_{k+1}}\boldsymbol{\mu}(n) \right|=0, \end{equation} cf.\ Section~\ref{smomo}. \paragraph{Almost periodic sequences} In~\cite{MR3989121}, in case of WRAP $x$, the authors also ask about the behaviour of averages of the form \begin{equation}\label{eq:sc-ep-7-a} \frac1H\sum_{m\leq h<m+H}f(S^hz)\boldsymbol{\mu}(n) \end{equation} (where $z\in X_x$) for large values of $H$ and arbitrary $m\in\mathbb{N}$. Under \eqref{chsfor}, convergence to zero uniformly in $m$ does not take place, however, it is shown in~\cite{MR3989121} that for a ``typical'' $m\in\mathbb{N}$ the averages in \eqref{eq:sc-ep-7-a} are small. The key argument in the proof comes from a result of Matom\"aki, Radziwi\l \l \ and Tao: \begin{Th}[\cite{MR3435814}] For each periodic sequence $a(n)$, we have \begin{equation}\label{sarSHORT} \lim_{\substack{H,M\to\infty\\ H={\rm o}(M)}}\frac1M\sum_{M\leq m<2M}\left|\frac1H\sum_{m\leq h<m+H}a(h)\boldsymbol{\mu}(h)\right|=0. \end{equation} \end{Th} As a consequence, we have: \begin{Th}[\cite{MR3989121}]\label{Wshort-2} Suppose that $x\in \mathbb{A}^\mathbb{Z}$ is WRAP. Then for all $f\in C(X_x)$ and $z\in X_x$, \begin{equation} \label{eq:sc-ep-4} \lim_{\substack{H,M\to\infty\\ H={\rm o}(M)}} \frac1M\sum_{M\leq m<2M}\Big|\frac1H\sum_{m\leq h<m+H}f(S^hz)\boldsymbol{\mu}(h)\Big|=0. \end{equation} \end{Th} Moreover, it is shown that all synchronized automata yield WRAP sequences. Thus, the above theorem strengthens the aforementioned result by Deshouillers, Drmota and M\"ullner in~\cite{De-Dr-Mu}. \paragraph{Rigid systems} In~\cite{Kanigowski:aa}, Kanigowski, Lema\'{n}czyk and Radziwi\l{}\l{} study rigid systems.\footnote{A measure-theoretic system $(R,Z,\mathscr{C},\kappa)$ is {\em rigid} if, for some increasing sequence $(q_n)$ of natural numbers, we have $f\circ R^{q_n}\to f$ in $L^2(Z,\kappa)$ for each $f\in L^2(Z,\kappa)$. Rigid systems are of zero entropy. Moreover, the typical measure-theoretic automorphism is rigid and weakly mixing.} To formulate their results, we need some definitions and facts. Given a natural number $q$, the sum $\sum_{\mathbb{P}\ni p | q} 1/p$ is called the {\em prime volume} of $q$. The prime volume grows slowly with $q$: $$ \sum_{p|q}\frac1p\leq \log\log\log q+O(1). $$ However ``most'' of the time, the prime volume of $q$ stays bounded: if we set $$ \mathcal{D}_j:=\Big\{q\in \mathbb{N} : \sum_{p|q}\frac{1}{p}<j\Big\}, $$ then $d(\mathcal{D}_j)\to1$ when $j\to\infty$. A topological system $(T,X)$ is said to be {\em good} if for every $\nu\in M(X,T)$ at least one of the following conditions holds: \begin{itemize} \item ({\bf BPV rigidity}): $(T, X, \mathscr{B}, \nu)$ is rigid along a sequence $(q_n)_{n\geq1}$ with {\bf bounded prime volume}, i.e.\ there exists $j$ such that $(q_n)_{n\geq1}\subset \mathcal{D}_j$; \item ({\bf PR rigidity}): $(T,X,\mathscr{B},\nu)$ has {\bf polynomial rate} of rigidity, i.e.\ there exists a linearly dense (in $C(X)$) set $\mathcal{F}\subset C(X)$ such that for each $f\in\mathcal{F}$ we can find $\delta>0$ and a sequence $(q_n)_{n\geq1}$ satisfying $$ \sum_{j=-q_n^\delta}^{q_n^\delta}\|f\circ T^{jq_n}-f\|_{L^2(\nu)}^2\to 0. $$ \end{itemize} They prove the following: \begin{Th} \begin{enumerate}[(a)] \item Assume that $(T,X)$ is a topological system such that $(T,X,\mathscr{B}(X),\mu)$ is good. Then $(T,X)$ is M\"obius orthogonal. \item Suppose that each {\bf ergodic} invariant measure of $(T,X)$ yields either BPV rigidity or PR rigidity and $M^e(T,X)$ is countable then $(T,X)$ is M\"obius orthogonal. \end{enumerate} \end{Th} A key tool here is a strengthening of the main result of Matom\"aki and Radziwi\l{}\l{}~\cite{Ma-Ra} (cf.\ \eqref{condmu}) to short interval behaviour along arithmetic progressions: \begin{Th}[\cite{Kanigowski:aa}] \label{klr} For each $\varepsilon>0$, there exists $L_0$ such that for each $L\geq L_0$ and $q\geq 1$ satisfying $\sum_{p|q}1/p\leq(1-\varepsilon)\sum_{p\leq L}1/p$ we can find $M_0=M_0(q,L)$ such that for all $M\geq M_0$, we have $$ \sum_{j=0}^{M/Lq}\sum_{a=0}^{q-1}\left|\sum_{\substack{m\in[z+jLq,z+(j+1)Lq] \\ m\equiv a\bmod q}}\boldsymbol{\mu}(m) \right|<\varepsilon M $$ for some $0\leq z<Lq$. \end{Th} \begin{Remark} Despite the fact that PR rigidity does not seem to be stable under different (uniquely ergodic) models of a measure-preserving transformation, assuming $(T,X)$ is uniquely ergodic, it is proved that $(T,X)$ satisfies the strong MOMO property whenever its unique invariant measure yields either BPV or PR rigidity. Via Proposition~\ref{p:sMOMO}, we obtain that if in a model PR rigidity holds, all of the models are M\"obius orthogonal. This, in particular, applies to all ergodic transformations with discrete spectrum. Moreover, it is shown in \cite{Kanigowski:aa} that for a.e.\ IET (of $d\geq 3$ intervals) BPV rigidity holds, so a.e.\ IET (and all their uniquely ergodic models) is M\"obius orthogonal. This is to be compared with previously known results for 3-IETs~\cite{Bo1,Chaika_2019,Ferenczi_2018,Karagulyan:aa}. Other applications are given for $C^{2+\varepsilon}$ Anzai skew products and for some so-called Rokhlin extensions of rotations. \end{Remark} One more of the consequences is the following result: \begin{Cor}[\cite{Kanigowski:aa}]\label{norigid} No Furstenberg system of the M\"obius function $\boldsymbol{\mu}$ is either BPV or PR rigid. The same holds for the Liouville function $\boldsymbol{\lambda}$. \end{Cor} \subsection{Logarithmic Furstenberg systems}\label{fusy} Frantzikinakis and Host study logarithmic Furstenberg systems associated to $\boldsymbol{\mu}$ (and $\boldsymbol{\lambda}$). They prove the following remarkable result: \begin{Th}[\cite{MR3779960}]\label{HF} Each zero entropy topological system $(T,X)$ with only countably many ergodic measures is logarithmically M\"obius orthogonal. \end{Th} In particular, uniquely ergodic systems of zero topological entropy satisfy~\eqref{slog}. The key argument in the proof of Theorem~\ref{HF} is the following structural result on the logarithmic Furstenberg systems of $\boldsymbol{\mu}$ and $\boldsymbol{\lambda}$: \begin{Th}[\cite{MR3779960}]\label{HF1} Each logarithmic Furstenberg system of $\boldsymbol{\mu}$ or $\boldsymbol{\lambda}$ is a factor of a system that: \begin{itemize} \item has no irrational spectrum, \item has ergodic components isomorphic to direct products of infinite-step nilsystems and Bernoulli systems. \end{itemize} \end{Th} The starting point for the proof of the above theorem, resulting in a reduction of the problem to purely ergodic context, is an identity of Tao (implicit in~\cite{MR3569059}) showing that self-correlations of $\boldsymbol{\mu}$ (and $\boldsymbol{\lambda}$) are averages of its dilated self-correlations with prime dilates. Frantzikinakis and Host also prove that logarithmic Furstenberg systems of $\boldsymbol{\mu}$ (and $\boldsymbol{\lambda}$) are ``almost determined'' by strongly stationary processes (introduced by Furstenberg and Katznelson in the 90's). The structure of (measure-theoretic) dynamical systems given by strongly stationary processes has been described by Jenvey~\cite{Jenvey_1997} who proved that ergodicity implies Bernoulli (cf.\ Theorem~\ref{t:fr1} and~\ref{t:fr1a}) and Frantzikinakis~\cite{Frantzikinakis_2004} who described the ergodic decomposition in the non-ergodic case. The above results are extended in~\cite{Frantzikinakis:aa} to strongly aperiodic multiplicative functions. Moreover, the following multi-dimensional result is proved: \begin{Th}\label{nooo} Let $f_1,\dots, f_\ell\colon\mathbb{N}\to\mathbb{U}$ be multiplicative functions. Let $(R,Y)$ be a topological dynamical system and let $y\in Y$ be a logarithmically generic point for a measure $\nu$ with zero entropy and having at most countably many ergodic components, all of which are totally ergodic. Then for every $g\in C(Y)$ that is orthogonal in $L^2(\nu)$ to all $R$-invariant functions we have \begin{equation} \lim_{N\to\infty}\frac{1}{\log N}\sum_{n\leq N}\frac{g(R^ny)\prod_{j=1}^{\ell}f_j(n+h_j)}{n}=0 \end{equation} for all $h_1,\dots, h_\ell\in\mathbb{Z}$. \end{Th} The unweighted version of (with $g=1$) is expected to hold if the shifts are distinct and at least one of the multiplicative functions is strongly aperiodic. This is the logarithmically averaged version of Elliott conjecture~\cite{MR1042765,MR1222182,MR3435814}. In the special case of irrational rotations, with $\ell=1$, Theorem~\ref{nooo} is the logarithmically averaged variant of a classical result of Daboussi~\cite{MR675168,MR0332702,AST_1975__24-25__321_0}. Already in case $\ell=2$ it is completely new. \section{Future Directions}\label{se:future} \subsection{Detecting zero entropy} As shown in Theorem~\ref{th:MOMOzero} and Corollary~\ref{SMposent}, under the Chowla conjecture, the M\"obius function is a sequence that: \begin{itemize} \item is strong MOMO orthogonal to all zero entropy systems, \item is never strong MOMO orthogonal to any positive entropy system. \end{itemize} In view of this, it is natural to study the following problem: \begin{Question} Which numerical sequences distinguish between zero and positive entropy systems? \end{Question} Note that in view of the results of Downarowicz and Serafin (Theorem~\ref{dose1} and Theorem~\ref{dose2}), ``usual orthogonality'' or even its uniform version is insufficient for these needs. \subsection{Proving the strong MOMO property} It was already asked in \cite{MR3821717} whether whenever we have a zero entropy system $(T,X)$ for which we can prove M\"obius orthogonality, then we can prove the strong MOMO property. Recently, Lema\'{n}czyk and M\"ullner in~\cite{Lema_czyk_2020} proved the strong MOMO property for (primitive) automatic sequences (previously known to be M\"obius orthogonal by~\cite{M_llner_2017}), answering a question from~\cite{MR3821717}. Yet another question persists: \begin{Question} Do horocycle flows satisfy the strong MOMO property?\end{Question} We do not even know whether M\"obius orthogonality takes place in all uniquely ergodic models of horocycle flows. \subsection{Mixing properties of Furstenberg systems} Corollary~\ref{norigid} induces the following problem: \begin{Question} Are Furstenberg systems of $\boldsymbol{\lambda}$ mildly mixing? \end{Question} For $\boldsymbol{\mu}$ we need to take into account that its Furstenberg systems have the discrete spectrum factor given by the Mirsky measure of $\boldsymbol{\mu}^2$. \subsection{Furstenberg disjointness in non-ergodic case} As Host and Frantzikinakis' analysis shows, if the (potential) logarithmic Furstenberg systems of $\boldsymbol{\lambda}$ or $\boldsymbol{\mu}$ are non-ergodic, then they are very non-ergodic. One of open questions by Frantzikinakis is whether the system $\mathbb{T}^2\ni (x,y)\mapsto (x,y+x)\in \mathbb{T}^2$ considered with Lebesgue measure can be a Furstenberg system of $\boldsymbol{\lambda}$. Of course this example is a measure-theoretic system which is Furstenberg disjoint from all ergodic systems. It seems to be a problem of independent interest to fully understand the class of transformations disjoint from all ergodic transformations. \section*{Acknowledgements} \noindent Research supported by Narodowe Centrum Nauki grant UMO-2019/33/B/ST1/00364. \nociteNew{Ab-Le-Ru1 \nociteNew{Baake:2015aa \nociteNew{MR3731019 \nociteNew{MR3803141 \nociteNew{MR1954690 \nociteNew{Bo0 \nociteNew{MR3296562 \nociteNew{MR3296562 \nociteNew{Da-Te \nociteNew{Do-Ka \nociteNew{Dr \nociteNew{MR3825824 \nociteNew{Fe-Ku-Le-Ma \nociteNew{MR3850672 \nociteNew{Gr \nociteNew{MR3821719 \nociteNew{MR3947636 \nociteNew{Keller:2017aa \nociteNew{MR3803667 \nociteNew{Ku-Le-We \nociteNew{Ku-Le-We1 \nociteNew{MR4000514 \nociteNew{Li-Sa \nociteNew{MATOM_KI_2016 \nociteNew{Ma-Ri2 \nociteNew{MR3612882 \nociteNew{MR0021566 \nociteNew{Mi \nociteNew{Pe-Hu \nociteNew{Ry \nociteNew{MR3666035 \nociteNew{MR3660308 \nociteNew{MR3829173 \nociteNew{McNamara:aa \nociteNew{HLSY \nociteNew{MR3859364 \nociteNew{Sa:Af \nociteNew{Sawin:aa \nociteNew{Ei1 \nociteNew{Tablog5 \nociteNew{Veech_2017 \nociteNew{MR3820018 \nociteNew{Sun:aa \nociteNew{MR3819999 \nociteNew{Forni:aa \nociteNew{Konieczny_2020 \nociteNew{MR3927855 \nociteNew{Houcein-el-Abdalaoui:aa \nociteNew{He:aa \nociteNew{El_Abdalaoui_2018 \allowdisplaybreaks \small \bibliographystyle{abbrv}
1,116,691,501,333
arxiv
\subsubsection*{\bf 1. Where, how and when?} \noindent Gaia will be launched from ESA/Kourou (French Guyana) onboard a Soyuz-Fregat rocket in June 2013. Deployment will be at the L2 Lagrange Point, with the first community release of alerts expected in mid 2014 (internal verification will begin in early 2014). The mission is scheduled to end in 2018--2019. \subsubsection*{\bf 2. What telescopes will Gaia have?} \noindent Gaia will be equipped with two 1.45x0.5m primary mirrors, forming two fields of view separated by 106.5 degrees. The light from both mirrors will be imaged onto a single focal plane. Gaia will reach down to V=20 in the Astrometric Field detectors. \subsubsection*{\bf 3. What instruments will Gaia have?} \begin{figure}[h] \begin{center} \includegraphics[width=4.4in]{focalplane.png} \caption{Focal plane of Gaia.} \label{fig:focalplane} \end{center} \end{figure} \noindent Each object traverses through the focal plane (4.4 sec per CCD), see Figure \ref{fig:focalplane}. \noindent{\bf SM}: Objects are detected in Sky Mapper CCDs, and are allocated windows for the remaining detectors.\\ {\bf AF}: Source positions and G-band magnitudes are measured in the Astrometric Field CCDs (platescale $\sim 0.04 \times 0.1$ milliarcsecs).\\ {\bf BP/RP}: Low-dispersion spectro-photometry (330-680nm, 640-1000nm) in 120 samples.\\ {\bf RVS}: Intermediate-dispersion (R$\sim$11,500) spectroscopy (847-874nm) around the Calcium Infrared triplet to V$<$17 mag.\\ \subsubsection*{\bf 4. What is the data latency?} \noindent Gaia will be visible from the Earth for only 8h a day. All data from the last 24h will be downlinked during a contact. After initial processing, alerts will be issued from between a couple of hours, and up to 48 hours, after the observation. \subsubsection*{\bf 5. What is downloaded?} \noindent Most of the sky is empty. Gaia will only transmit small windows around stars detected at each transit on the Star Mapper CCDs and associated data. \subsubsection*{\bf 6. How does the scanning law allow for full sky coverage?} \noindent Gaia has a pre-defined plan for scanning the sky. The spin axis is maintained at a 45 deg angle from the Sun, with a period of 6h. For details see Figure \ref{fig:scanninglaw}. \begin{figure}[h] \begin{center} \includegraphics[width=2.2in]{scanninglaw1.png} \includegraphics[width=2.2in]{scanninglaw2.png} \caption{Nominal Scanning Law principles for Gaia satellite.} \label{fig:scanninglaw} \end{center} \end{figure} \subsubsection*{\bf 7. What is the typical sampling?} \noindent On average, each object will be observed 80 times, though at the Ecliptic nodes, objects are scanned in excess of 200 times. Observations occur in pairs (two FOVs), separated by $\sim$2 hours. The next pair will typically occur between 6 hours and $\sim$30 days later. \subsubsection*{\bf 8. What is the precision of the instantaneous photometry and astrometry?} \noindent In a single observation (transit) the photometry will reach milli-magnitude precision at G=14, and 1\% at G=19. The astrometric precision will be in the range 20-80$\mu$as at G=8-15 (see Figure~\ref{fig:precision} for the effects of gating), falling to 600$\mu$as at G=19. This astrometric precision will only be reached later in the mission. \begin{figure}[h] \begin{center} \includegraphics[width=2.6in]{photometry.png} \includegraphics[width=2.6in]{astrometry.png} \caption{Precision of instantaneous photometry and astrometry of Gaia satellite. (from \cite{Varadi2009}).} \label{fig:precision} \end{center} \end{figure} \subsubsection*{\bf 9. How will the anomalies be detected?} \noindent Using simple recipes:\\ 1. Compare the most recent observation with the historic data available.\\ 2. Inspect for unexpected changes.\\ 3. No history? - new transient! \subsubsection*{\bf 10. How will the anomalies be classified?} \noindent 1. From the light-curve.\\ 2. Using low-dispersion BP/RP spectroscopy.\\ 3. Cross-matching with archival data. \subsubsection*{\bf 11. How will the BP/RP spectra be used?} \noindent Self-Organizing Maps (\cite{WyrzykowskiBelokurov2008}) built from the low-dispersion spectra can confirm a non-stellar nature, classify Supernova types, measure Supernova ages and possibly even constrain the redshift. \begin{figure}[h] \begin{center} \includegraphics[width=3.5in]{SOM.png} \caption{A Self-Organizing Map (left) can distinguish between different spectral types of stars and supernova at different epochs, as built from Gaia synthesized BP/RP spectra (right).} \label{fig:som} \end{center} \end{figure} \subsubsection*{\bf 12. How will the alerts be disseminated?} \noindent Skyalert.org, email, www server, Twitter, iPhone app, etc. \subsubsection*{\bf 13. What will be in an alert?} \noindent The coordinates, a small cutout image from the SM, the Gaia light-curve, a low-resolution spectrum at the trigger, the classification results, and the cross-matching results. \subsubsection*{\bf 14. What will the the main triggers be?} \noindent Supernovae, Classical novae, dwarf novae, Microlensing events, Be stars, GRB afterglows, M-dwarf flares, R CrB-type stars, FU Ori-type stars, Asteroids, Suprises. \subsubsection*{\bf 15. How many Supernovae will Gaia detect over 5 years?} \noindent 6000 SNe expected down to G=19. About 2000 will be detected before the maximum (\cite{BelokurovSN}). \subsubsection*{\bf 16. How many Microlensing Events will Gaia detect?} \noindent 1000+ events (mostly long $t_E>$30d) are expected to be detected photometrically, mainly in the Galactic bulge and plane. Astrometric centroid motion will be detectable in real-time (for larger deviations of about 100$\mu$as) in on-going events, and alerts may be triggered to obtain complementary photometry (\cite{BelokurovMICROLENSING}). \begin{figure}[h] \begin{center} \includegraphics[width=4.3in]{astrometric_alert.png} \caption{Trajectory of a source due to proper motion and centroid shift during a microlensing event.} \label{fig:astrometricalert} \end{center} \end{figure} \subsubsection*{\bf 17. Will Gaia alert on GRB optical counterparts?} \noindent Gaia sampling and data latency is not good for alerting on GRBs. However, we still expect to detect 1-2 bright on-axis afterglows and 5-15 orphan afterglows (\cite{Japelj2011}). \subsubsection*{\bf 18. How many Asteroids will Gaia see?} \noindent About 250,000 asteroids (mostly known). Alerts on new asteroids and NEO candidates will be based on unsuccessful star matching. \subsubsection*{\bf 19. What about known anomalous objects?} \noindent Such objects can be added to the {\bf Watch List}. Every time Gaia observes them, their data will become available for inspection. \subsubsection*{\bf 20. How can I get involved now?} \noindent {\bf - with my telescope time:} prepare for Gaia Alerts, register at Skyalert.org, set-up your alerts on CRTS stream (\cite{DrakeCRTS}) (SNe, CVs, blazars, etc.), follow-up the alerts, contact us with your data! \noindent {\bf - with my scientific interests:} suggest what would be worth detecting and alerting on, propose detection algorithms and classification techniques, suggest interesting known targets to be observed. \subsubsection*{\bf More information on the web:} \begin{itemize} \item Gaia ESA web pages: \href{http://gaia.esa.int}{{\it http://gaia.esa.int}} \\and \href{http://www.rssd.esa.int/index.php?project=GAIA\&page=index}{{\it http://www.rssd.esa.int/index.php?project=GAIA\&page=index}} \item Gaia Science Alerts Working Group wiki: \href{http://www.ast.cam.ac.uk/ioa/research/gsawg/}{{\it http://www.ast.cam.ac.uk/ioa/research/gsawg/}} \item original poster on Gaia Alerts presented at the IAU Symposium in Oxford in September 2011: \href{http://www.ast.cam.ac.uk/ioa/wikis/gsawgwiki/index.php/Detection_system}{{\it http://www.ast.cam.ac.uk/ioa/wikis/gsawgwiki/index.php/Detection\_system}} \end{itemize} \subsubsection*{\bf Acknowledgement} This work relies on efforts of numerous people involved in the preparations for the Gaia mission within Data Processing and Analysis Consortium (DPAC). Their work is acknowledged here and thanked for.
1,116,691,501,334
arxiv
\section*{Torsion-balance probes of fundamental physics} \setcounter{page}{1} \setcounter{section}{0} {\bf E. G. Adelberger\newline Department of Physics, University of Washington, Seattle WA 98195-4290} \newline Modern torsion-balance experiments address a wide range of contemporary problems in fundamental physics. Because the experiments are are sensitive to extremely feeble forces they provide powerful constraints on many proposed extensions of the standard models of particle physics and gravity. Furthermore, the flexibility and relatively short time-scale of many of these experiments allow investigators to respond rapidly to new theoretical developments. Here we outline the many motivations for this work, summarize some of the more interesting results and their implications, and project the future developments in this area. A previous review\cite{ad:09} covers much of this field in more detail. \subsection*{Tests of the universality of free fall} Conjectures about new scalar and vector fields permeate much modern thinking in particle physics and cosmology. Such quantum fields necessarily violate the universality of free fall (UFF) because they couple to `charges' rather than mass, so that different electrically-neutral materials do not have the same free-fall acceleration. UFF tests are interesting because their extraordinary sensitivity allows one to see effects many orders of magnitude below the gravitational scale that forms an irreducible background in conventional experiments. Such experiments therefore provide broad-gauge tests for new fundamental physics with length scales greater than 1 cm as well as testing the weak equivalence principle (WEP), a fundamental prediction of the standard model of gravity. It is conventional to parameterize UFF violation between electrically neutral atoms as a Yukawa interaction with range $\lambda$ that couples to generalized atomic `charges', $\tilde{q} = (Z\cos\tilde{\psi}+N\sin\tilde{\psi})$, where $Z$ and $N$ are the atom's proton and neutron numbers, and $\tilde{\psi}$ specifies the details of the `charges'. This parameterization is exact for vector interactions, and a reasonable approximation for scalar fields. UFF tests compare the accelerations of two different materials in a composition dipole toward an attractor which can be a laboratory source, the earth, the sun, our galaxy or the entire cosmos. Because there is always a value of $\tilde{\psi}$ for which the `charge' of any object vanishes, unbiased searches for new physics require UFF tests using at least 2 composition dipoles and 2 attractors. The current state of this work (sensitive to forces $10^{13}$ times weaker than gravity) and some its implications are summarized in Ref.~\cite{wa:12}. Differential accelerations toward the galactic center are particularly interesting because these laboratory experiments demonstrate that any non-gravitational, long-range interactions between hydrogen and galactic dark matter produces less than 10\% of the total acceleration\cite{wa:12}. Differential accelerations toward the sun, combined with the lunar laser-ranging EP test\cite{tu:07}, provide the best unambiguous test of the strong EP for gravitational self-energy. Measurements of differential accelerations in the field of the earth yield high sensitivity to WEP-violating interactions with ranges between 1 m and infinity. An interesting application of differential accelerations of objects falling in the earth's field concerns speculations that antimatter may have different gravitational properties from normal matter. In field-theory language, this would imply that gravity has a vector component. But any vector component of gravity is so strongly constrained by WEP tests that the gravitational acceleration of antihydrogen is expected to differ from that of hydrogen by less than 1 part in $10^9$\cite{wa:12}. \subsection*{Tests of the gravitational inverse-square below the dark-energy length scale} It is useful to parameterize violation of the gravitational inverse-square law (ISL) in terms of a Yukawa interaction, but in this case one that couples to mass instead of a `charge'. The most sensitive current test\cite{ka:07} has shown that any ISL-violating interactions with gravitational strength must have a length scale less than about 50 $\mu$m. As detailed in a previous review\cite{ad:03}, particle and gravitational physics considerations provide compelling reasons to search for violations of the gravitational inverse-square law (ISL) at the shortest possible length scales. The universe is apparently dominated by `dark energy' with a density $\rho_{\rm d} \approx 3.8$ keV/cm$^3$. This corresponds to a distance $\lambda_{\rm d}=\sqrt[4]{\hbar c/\rho_{\rm d}}\approx 85\mu$m that may represent a fundamental length scale of gravity\cite{dv:02} below which new phenomena may occur. These fall into 2 categories: new geometrical effects (extra-dimensions\cite{ar:98,dv:99}, etc) or extra forces from exchange of meV scale bosons\cite{ad:03}. These 2 categories can be distinguished by checking if the violation violates or obeys the UFF. An effect that obeys the UFF would constitute a `bombproof' signature of extra dimensions, an effect violating the UFF would be clear evidence for a new scale of particle physics (perhaps associated with M-theory's hundreds originally massless scalar particles with `gravitational' scale couplings). Even if ISL violation is not seen, a rigorous upper-bound can be placed on the size of the largest extra dimension (currently 44 $\mu$m\cite{ka:07}. ISL tests also probe recent interesting speculations in non-gravitational particle physics. A particularly important example is the chameleon mechanism\cite{kh:04}. If scalar bosons, for example, are given very small self-couplings then, in the presence of matter, essentially massless particles acquire effective masses that screen the interior of test bodies so that only a thin outer shell of the bodies is effective in sourcing or responding to the scalar field\cite{gu:04}. This essentially destroys the experimental limits on such bosons derived from astronomical and conventional laboratory EP and ISL tests. However, the test bodies in recent ISL tests are small enough to probe chameleons that couple to matter and to themselves with gravitational strength\cite{up:06}. Upadhye, Hu and Khoury\cite{up:12} recently noted that the 2007 ISL test \cite{ka:07,ad:07} excludes almost all ``chameleon field theories whose quantum corrections are well controlled and couple to matter with nearly gravitational strength regardless of the specific form of the chameleon potential''. They argue that a two-fold improvement in the minimum distance probed would test {\em all} such theories. The next generation of torsion-balance experiments should reach this sensitivity. \subsection*{Planck-scale tests of Lorentz-symmetry violating and non-commutative geometry scenarios and searches for novel spin-dependent interactions} Conventional EP and ISL experiments use unpolarized test bodies and attractors and are completely insensitive to the purely spin-dependent forces such as those arising from the first-order exchange of unnatural parity ($0^-$, $1^+$, {\em etc}.) bosons. Experiments with electron-spin polarized pendulums and attractors probe such interactions and also provide a means to test for preferred-frame effects involving intrinsic spin. Dobrescu and Mocioiu\cite{do:06} have enumerated the kinds of potentials that can arise from one-boson exchange, constrained only by rotational and translational invariance. Most of these involve intrinsic spin. Perhaps the best motivated of these interactions is the spin dipole-dipole interaction that occurs in theories with symmetries that are spontaneously broken at high energies\cite{ki:87} as well as in torsion gravity, an extension of GR that arises in attempts to construct a gauge theory of gravity\cite{sh:02}. The masses and couplings of pseudo-Goldstone bosons created during spontaneous symmetry-breaking have the remarkable property that their masses as well as their coupling strengths are inversely proportional to the symmetry-breaking scale. Long-range (light exchange particle), ultra-feeble interactions are exactly the regime in which torsion pendulums excel. Kosteleck\'y and collaborators\cite{co:97,ko:09} have developed a widely used theoretical framework, the Standard Model Extension (SME), for analyzing possible preferred-frame effects. Again, most of these involve spin. Non-commutative geometry scenarios have received renewed interest from string theorists\cite{hi:02,an:01}. This scenario predicts that a lepton spin will prefer to point in some direction in inertial space. The Eot-Wash group developed a torsion pendulum that contains $\sim 10^{23}$ electron spins with essentially no external magnetic field\cite{he:08} as well as spin sources based on the same technology. The spin pendulum was placed in a rotating torsion balance and used to searched for torques on the spins that tracked the orientation of the apparatus relative to celestial coordinates or to an array of spin sources fixed in the lab. Sensitive searches for preferred-frame effects defined by the entire cosmos were made by checking whether the spins in the pendulum preferred to orient themselves in a direction fixed in inertial space, or if they had a generalized helicity defined by their velocity with respect to the rest-frame of the cosmic microwave background. Tight bounds were set on 9 combinations of Lorentz-symmetry violating violating SME parameters Finally, the effects of non-commutative space-time geometries were explored. In every case case, the constraints are interesting because of their extraordinary sensitivity. Upper bounds on some SME coefficients were between 4 and 5 orders of magnitude below the Planck-scale benchmark, while the bounds on non-commutative geometry are equivalent to an energy scale of $3\times 10^{13}$ GeV. \subsection{Future prospects} None of the above experiments have reached practical limits; all can be made more sensitive by challenging, but achievable, technical improvements. The UFF work is limited by thermal noise and changing gravity gradients. The thermal noise can be lowered by using lower loss suspension fibers. Gravity-gradients can be continuously monitored, allowing corrections that greatly reduce this systematic effect. The physics reach can be extended by employing new test-body pairs that are `more different'. It is reasonable to foresee an order of magnitude improvement in the physics reach in the next decade. The short-distance ISL work is currently limited by both systematic effects and short-distance electrostatic noise. Better metrology and surface preparation can extend the sensitivity to shorter length scales. However, the ISL violating signal diminishes rapidly as the length-scale falls so that it seems unlikely that current approaches can probe gravitational-strength interactions with length scales less than 10 $\mu$m or so. Substantial improvements in the some of the searches for new spin-dependent interactions are expected from new designs with higher symmetry.
1,116,691,501,335
arxiv
\section{Introduction} \label{sec:intro} The properties of galaxies are directly affected by their host environment. In the local universe, red, passive, early-type galaxies are preferentially found in dense regions and galaxy clusters while blue, star-forming, late-type galaxies dominate in less dense, field environments \citep[e.g.,][]{Dressler1980, Kauffmann2004, Balogh2004}. Studies of the effect of large-scale structure on galaxy properties are usually mostly confined to field versus clusters. However, the intermediate environments such as galaxy groups, cluster outskirts and filaments are equally important \citep[e.g.,][]{Kodama2001}, as they host the vast majority of galaxies in the local universe \citep[e.g.,][]{Jasche2010, Tempel2014_f, Tempel2014_g, Cautun2014}. In particular, filaments that connect groups and clusters of galaxies may contain up to 40 per cent of the matter in the Universe \citep{Forero2009, Jasche2010}. Theoretical studies \citep[e.g.,][]{Cen1999} have suggested that about half of the warm gas in the Universe, presumably accounting for the low-redshift missing baryons \citep{Fukugita1998, Viel2005}, is hidden in filaments. Recently, \cite{Nicastro2018} observed highly ionized oxygen systems in regions characterized by large galaxy over-densities, supporting the prediction of warm gas in the extragalactic universe. Trying to dissect the role of these environments on galaxy properties is therefore of extreme importance to shed light on the processes that regulate galaxy evolution. Several mechanisms have been proposed to govern galaxy properties. Dark matter filaments can trap and compress gas, shock heating the accreted gas at the boundary of filaments. This gas then cools rapidly and condenses into filaments centre. Filaments can therefore assist gas cooling and enhance star formation in their haloes \citep{Liao2018}. In filaments, mild galaxy-galaxy harassment and interactions \citep{Lavery1988, Moore1996, Coppin2012} are favored. Their environment is colder than clusters: the typical temperature of filaments is $\sim 10^5-10^7$ K \citep[e.g.,][]{Cen2006, Werner2008, Zappacosta2002, Nicastro2005}{, even though filaments must also contain cool gas (T$\sim 10^4$ K), as predicted by Lyman alpha forest observations \citep[e.g.,][]{Kooistra2017}}. Therefore, galaxies in filaments can still hold their gas content to form stars. Nonetheless, for low mass galaxies ($M_\ast<10^{10}M_\odot$), simulations show that filaments falling onto clusters are able to produce increased stripping of hot gas even beyond a distance of 5$r_{200}$ from a galaxy cluster centre and that this is predominant at low-redshift \citep{Bahe2013}, suppressing the fuel for star formation. In filaments, ram-pressure stripping \citep{Gunn1972} is known as cosmic web stripping and is due to the interaction of the galaxies and the filaments, and might also play a role, especially for low mass galaxies, whose shallow potential wells can provide a relatively small restoring force from the ram-pressure force of the IGM in filaments \citep[e.g.,][]{Benitez2013}. However, this effect has been never observed and the fate of the gas that remains in the galaxy or is accreted later is not clear. The cosmic web stripping is so far a purely hydro-dynamical effect that requires simulations of large volumes able to resolve properly both the cosmic web and the internal halo properties. Another process that is also expected to be quite effective in filaments is gas accretion, which increases the availability of cold gas for galaxies inducing an enhancement of the star formation \citep[e.g.,][]{Darvish2014}. Several works have shown that filaments affect the evolution of the integrated properties of galaxies \citep[e.g.,][]{ Koyama2011, Geach2011, Sobral2011, Mahajan2012, Tempel_Libeskind2013, Tempel2013, Zhang2013, Pintos2013, Koyama2014, Santos2014, Malavasi2017, Mahajan2018} and the distribution of satellites around galaxies \citep{Guo2014}, at any redshift, but results are still controversial. Overall, filament galaxies tend to be more massive, redder, more gas poor and have earlier morphologies than galaxies in voids \citep{Rojas2004, Hoyle2005, Kreckel2011, Beygu2017, Kuutma2017}. On the other hand, some studies have reported an increased fraction of star-forming galaxies \citep{Fadda2008, Tran2009, Biviano2011, Darvish2014}, and higher metallicities and lower electron densities \citep{Darvish2015} in filaments with respect to field environments. { Differences in the results might also be due to the different techniques adopted by different teams to define filaments. Indeed, due to the observational biases in large galaxy surveys and unvirialized nature of the large-scale structures, their characterization is a nontrivial task and many assumptions come into play \citep[e.g., ][]{Biviano2011, Tempel2014_f, Poudel2017}.} { From the theoretical point of view,} \cite{Gay2010} have investigated the influence of filaments on the spectroscopic properties of galaxies, using the MareNostrum simulation. They found that the large-scale filaments are only dynamical features of the density field, reflecting the flow of galaxies accreting on clusters; the conditions in the filaments are not dramatic enough to influence strongly the properties of the galaxies it encompasses. On the other hand, \cite{Aragon2016} showed that the star formation quenching in galaxies can be explained as the influence of filamentary environment. So far, no studies have investigated how spatially resolved properties could be affected by filaments, from neither an observational nor theoretical point of view, except for our attempt in \citet[Paper XII]{Vulcani2018b}. In this paper we present the analysis of { four} field spiral galaxies in the local universe showing asymmetric features { and an extended H$\alpha$\xspace distribution, proxy for extended H{\sc ii} regions,} that we will argue are most likely due to the effect of the hosting filaments. { H{\sc ii} regions signifying the presence of ionising OB stars are usually found in the luminous inner regions of galaxies \citep[see, e.g.,][]{Martin2001}. The evidence of star formation in outer disks, instead, raises new questions about the nature of star formation in diffuse environments. Indeed, outer disks are usually considered inhospitable environments for star formation. In fact, a deviation in the Kennicutt-Schmidt Law \citep{Kennicutt1998b, Kennicutt1989} has been observed at a gas surface density of 3-5 M$_\odot \, pc^{-2}$, where the H$\alpha$\xspace intensity suddenly drops \citep[but see][who suggest this is merely a stochastic effect]{Boissier2007}. This is generally interpreted as a threshold density for star formation \citep{Kennicutt1989, Martin2001}, most likely due to a transition between dynamically unstable and stable regions of the galaxy \citep[e.g.,][]{Toomre1964} or to a phase transition of the gas \citep[e.g.,][]{Elmegreen1994, Schaye2004, Krumholz2009}. However, H$\alpha$\xspace knots at large radii have been observed in a few galaxies \citep{Kennicutt1989, Martin2001, Ferguson1998} and $\sim$30\% of disk galaxies have UV emitting sources beyond their optical disks \citep{Thilker2005, Thilker2007, GildePaz2005, Zaritsky2007, Christlein2008}. These complexes are often coincident with local H{\sc i} over-densities \citep{Ferguson1998}. In M83 and NGC 4625, the UV knots have been identified as low-mass stellar complexes and, if visible in the H$\alpha$\xspace, are generally ionised by a single star \citep{GildePaz2007}. These knots are dynamically cold and rotating, indicating that outer disk complexes are extensions of the inner disk \citep{Christlein2008}. Isolated H{\sc ii} regions have also been discovered in the extreme outskirts of galaxy halos in the Virgo Cluster \citep{Gerhard2002, Cortese2004}, in gaseous tidal debris \citep{RyanWeber2004, Oosterloo2004} and in between galaxies in galaxy groups \citep{Sakai2002, Mendes2004}. These appear as tiny emission-line objects in narrow-band images at projected distances up to 30 kpc from the apparent host galaxy. These regions sometimes are associated with previous or ongoing galaxy interactions \citep{Thilker2007, Werk2008}. H$\alpha$\xspace radiation has also been observed to be emitted by the gaseous halos of nearby galaxies \citep{Zhang2018}. This emission is extremely faint (flux${\rm \ll 10^{-17} erg/cm^2/s/}$\AA{}) and has been observed up to several hundreds of kpc from the main galaxy. An explanation for the existence of these outer knots could be that at some sites the gas density may exceed a star formation threshold locally, allowing stars to form beyond the radius where the azimuthally averaged gas density is at or below a threshold density \citep{Kennicutt1989, Martin2001, Schaye2004, Elmegreen2006, GildePaz2007}. All of above studies are based on traditional observational techniques, such as narrow-band imaging and Fabry-Perot staring technique. These techniques only permit the detection and basic characterization of the H{\sc ii} regions, without giving spatially resolved information on the chemical composition and age of the regions. The galaxies we discuss in this paper instead are drawn from a Integral Field Spectrographs (IFS) survey that has been designed to focus on the galaxy external regions and allows us to perform a detailed analysis of the galaxy outskirts. } GASP\footnote{\url{http://web.oapd.inaf.it/gasp/index.html}} (GAs Stripping Phenomena in galaxies with MUSE), an ESO Large programme that exploits the integral-field spectrograph MUSE mounted at the VLT with the aim to characterise where, how and why gas can get removed from galaxies in different environments. A complete description of the survey strategy, data reduction and analysis procedures is presented in \cite[][Paper I]{Poggianti2017a}. First results on single objects in clusters are discussed in \citealt[(Paper II)]{Bellhouse2017}; \citealt[(Paper III)]{Fritz2017}; \citealt[(Paper IV)]{Gullieuszik2017}; \citealt[(Paper V)]{Moretti2018}; and in lower-density environments in \citealt[(Paper VIII)]{Vulcani2017c}; \citealt[(Paper VII)]{Vulcani2018}; \citetalias{Vulcani2018b}. GASP includes a sample of galaxies selected for presenting a B-band morphological asymmetry suggestive of unilateral debris \citep{Poggianti2016} plus a subset of undisturbed galaxies, used as control sample. Throughout all the papers of the GASP series, we adopt a \cite{Chabrier2003} initial mass function (IMF) in the mass range 0.1-100 M$_{\odot}$. The cosmological constants assumed are $\Omega_m=0.3$, $\Omega_{\Lambda}=0.7$ and H$_0=70$ km s$^{-1}$ Mpc$^{-1}$. \section{Data} \subsection{The target selection} { In this paper, unless otherwise stated, we consider only the GASP galaxies selected from the field sample. All galaxies are drawn from the Millennium Galaxy Catalog \citep{Liske2003, Driver2005} and selected from the PM2GC \citep{Calvi2011}.} { We exclude from the GASP sample interacting \citepalias[e.g.][]{Vulcani2017c} and passive \citepalias[e.g,][]{Vulcani2018b} galaxies, counter-rotating disks \citepalias[e.g.][]{Vulcani2018b} and galaxies with a central H$\alpha$\xspace hole (Moretti et al. in prep.). We compute the maximum extension of the H$\alpha$\xspace distribution in units of effective radius ($r_e$). Specifically, we measure the radius containing 99\% of the H$\alpha$\xspace flux having a S/N>3. Details on the H$\alpha$\xspace images used for selecting the galaxies are given in Sec 2.2 and 2.3. The effective radius, along with the inclination and the position angle of the galaxies, were obtained from the analysis of the I-band images achieved from the integrated MUSE datacubes on the Cousins I-band filter response curve, as explained in Franchetto et al. (in prep.). Briefly, they were obtained using {\tt ellipse} \citep{Jedrzejewski1987} of the software IRAF that allows an isophotal segmentation of the galaxy and draws the luminosity growth curve \begin{equation} L(R)=2\pi\,\int_{0}^{R}I(a)\,(1-\varepsilon(a))\,a\,da, \end{equation} where $I(a)$ is the surface brightness profile, $\varepsilon(a)$ is the isophotal ellipticity profile and $a$ is the semi-major-axis of the elliptical isophotes. Taking advantage of the ample sky coverage of the GASP data, we extended the fitting up to the most external part of the galaxies to probe the behaviour of the surface brightness at large radii. Sources extraneous to the galaxy, brighter knots -often located along the spiral arms- and bad pixels were masked out to prevent erroneous measurements. Although the I-band image is obtained from sky-subtracted MUSE datacube, it presents residual sky intensity comparable to the intensities of last fitted isophotes. Thus, we subtracted the value of the intensity of the last isophote to the image and proceeded with the computation of the luminosity growth curve. Assuming that the galaxy regions over the largest isophote negligibly contribute to the total galaxy luminosity, we approximated $L_{\rm tot}\approx L(a_{\rm max})$ - with $a_{max}$ semi-major-axis of the largest isophote - and we estimated the effective radius as the radius $R_{\rm e}$ such as $L(R_{\rm e})/L_{\rm tot}=0.5$. From the surface brightness profile we selected the isophotes that trace the stellar disk to measure their mean position angle ($PA$), the mean ellipticity ($\varepsilon$) and corresponding errors. \begin{figure} \centering \includegraphics[scale=0.4]{filaments_Rmax.png} \caption{{Maximum extent of H$\alpha$\xspace in units of $r_e$ (R(H$\alpha$\xspace)$_{max}$) distribution in the GASP field sample. Black line and shaded area show the median value and its error. The red line shows the threshold used to select galaxies in this work. } \label{fig:rmax} } \end{figure} \begin{figure} \centering \includegraphics[scale=0.58]{fil_images_v3.pdf} \caption{{ RGB (left) and H$\alpha$\xspace (right) images of the targets. From top to bottom P95080, P19482, P63661, and P8721 are shown. The reconstructed $g$, $r$, $i$ filters from the MUSE cube have been used. North is up, and east is left. Color map is inverted for display purposes. In all the plots, the green ellipses show the $r_e$, the dashed blue ellipses show the maximum radius at which H$\alpha$\xspace is detected (see text for details). { Purple areas show the detached clouds (see text for details).} Asymmetries in the star and gas distributions are seen in all galaxies, with one side of the galaxies extending more than the other. All these galaxies have H$\alpha$\xspace extending beyond 4$r_e$ and show patchy H$\alpha$\xspace distribution. } \label{fig:rgb_image} } \end{figure} The H$\alpha$\xspace disk extension of the sample is shown in Fig. \ref{fig:rmax}. The median H$\alpha$\xspace disk extension is 3.1$\pm$0.2 times $r_e$. We then selected the galaxies with maximum H$\alpha$\xspace extension larger than four $r_e$, corresponding to 90th percentile. Four galaxies passed the selection and they are listed in Table \ref{tab:gals}, which } summarises some important information that will be further used and discussed throughout the paper. Figure \ref{fig:rgb_image} shows the color composite images of the targets, obtained combining the reconstructed $g-$, $r-$ and $i-$filters from the MUSE datacube along with the H$\alpha$\xspace maps. Overplotted in green are the $r_e$, while overplotted in blue are the maximum radii at which H$\alpha$\xspace is detected. For comparison, Figure \ref{fig:rgb_image_control} shows the color composite images and H$\alpha$\xspace maps of four representative galaxies of the control sample. Figures 1 and 6 in Vulcani et al. (submitted, Paper XX), show the images for all the galaxies in the GASP control sample that will be also used in this paper (sec 3.5). The presence of detached clouds { (highlighted in purple in Fig. \ref{fig:rgb_image})} in the H$\alpha$\xspace disk of the selected galaxies is astonishing, especially if compared with the absence of the same features among the control sample galaxies. The clouds extend beyond the spiral arms of the galaxies, suggesting they might not strictly related to them. { A quantitative identification of the clouds is deferred to the next Section.} \begin{figure} \centering \includegraphics[scale=0.57]{fil_images_control_v2.pdf} \caption{{ Same as Fig.\ref{fig:rgb_image}, but for four galaxies representative of the GASP control sample. In these galaxies H$\alpha$\xspace extends at most to 3$r_e$ and the galaxy boundaries are not jagged. } \label{fig:rgb_image_control} } \end{figure} \begin{table*} \caption{Properties of the targets. For each galaxy, the ID, redshift, coordinates, total stellar mass, effective radius, maximum extension of H$\alpha$\xspace, position angle, ellipticity and physical scale are given. \label{tab:gals}} \centering \begin{tabular}{lrrrrrrrrr} \hline \multicolumn{1}{c}{ID} & \multicolumn{1}{c}{z} & \multicolumn{1}{c}{RA} & \multicolumn{1}{c}{DEC} & \multicolumn{1}{c}{$\log M$} & \multicolumn{1}{c}{$r_e$} & \multicolumn{1}{c}{$R(H\alpha)_{max}$} & \multicolumn{1}{c}{PA} & \multicolumn{1}{c}{$\epsilon$} & \multicolumn{1}{c}{phys. scale} \\ \multicolumn{1}{c}{} & \multicolumn{1}{c}{} & \multicolumn{1}{c}{(J2000)} & \multicolumn{1}{c}{(J2000)} & \multicolumn{1}{c}{($M_\ast/M_\odot$)}& \multicolumn{1}{c}{($^{\prime\prime}$)} & \multicolumn{1}{c}{($r_e$)} & \multicolumn{1}{c}{(deg)} & \multicolumn{1}{c}{} & \multicolumn{1}{c} {kpc/$^{\prime\prime}$} \\ \hline P90580 & 0.04038 &198.03625 &-0.23903 & 9.98 &7.5$\pm$0.8 & 4.1&83 & 0.32 & 0.7985\\ P19482 & 0.04063 & 170.63021 & -0.01711 & 10.27 &4.8$\pm$0.4 &7.9&127 &0.42& 0.8030\\ P63661 & 0.05516 & 218.09081 &0.17823 & 10.26 & 6.0$\pm$0.6 &4.3&-63 & 0.41 &1.0718 \\ P8721 &0.06477 & 158.53624 & 0.00101 & 10.75 & 6.0$\pm$0.2 &4.6&62& 0.67 &1.2443 \\ \hline\end{tabular} \end{table*} \subsection{Observations and data reduction} All the GASP targets were observed in service mode with the MUSE spectrograph, mounted at the Nasmyth focus of the UT4 VLT, at Cerro Paranal in Chile. Each galaxy was observed with clear conditions; the seeing remained below 0$\farcs$9 during observations. For each galaxy, a total of four 675 seconds exposures were taken with the Wide Field Mode. AS far as the galaxies discussed in this paper is concerned, P95080 was observed on 2017, February 4; { P15982 on 2017, May 5;} P63661 on 2017, May 1 and P8721 on 2016, January 9. The data reduction process for all galaxies in the GASP survey is presented in \citetalias{Poggianti2017a}. For all galaxies, we average filtered the datacubes in the spatial direction with a 5$\times$5 pixel kernel, corresponding to 1$^{\prime\prime}$ \citepalias[see][for details]{Poggianti2017a}. At the redshifts of the galaxies presented here, 1$^{\prime\prime}$ that corresponds to 0.8-1.2 kpc, depending on the redshift of the target. \subsection{Methods}\label{sec:analysis} \citetalias{Poggianti2017a} extensively presents the methods used to analyse galaxies within the GASP program. Here we just recall the basic procedures and references useful for the following analysis. In brief, we corrected the MUSE reduced datacubes for extinction due to our Galaxy and then we measured (1) the total fluxes and kinematic properties of the gas, by running the {\sc kubeviz}\xspace \citep{Fossati2016} code; (2) the kinematic properties of the stars, by running the Penalized Pixel-Fitting (pPXF) software \citep{Cappellari2004}, which works in Voronoi binned regions of given S/N \citep{Cappellari2003} and smoothed using the two-dimensional local regression techniques (LOESS) as implemented in the Python code developed by M. Cappellari;\footnote{\url{http://www-astro.physics.ox.ac.uk/~mxc/software}} (3) the properties of the stellar populations, such as star formation histories, luminosity and mass weighted ages, surface mass densities, by running the spectral fitting code {\sc sinopsis}\xspace \citepalias{Fritz2017}; (4) the dust extinction A$_V$ from the absorption-corrected Balmer decrement assuming an intrinsic H$\alpha$\xspace/H$\beta$ ratio equal to 2.86 and adopting the \cite{Cardelli1989} extinction law; and (5) ionised gas metallicity, by running a modified version of the pyqz Python \citep{Dopita2013} v0.8.2 (F. Vogt 2017, private communication). Further details will be discussed in the next section, where needed. \section{RESULTS}\label{sec} In this section we characterise separately each of the targets. In the following sections we will highlight what these galaxies have in common and look for the reasons of such similarities. \subsection{P95080} \begin{figure*} \centering \includegraphics[scale=0.5]{bpt.png} \caption{BPT line-ratio diagram for [OIII]5007/H$\beta$ vs [NII]6583/H$\alpha$\xspace for the three galaxies. Lines are from \citet[][K03]{Kauffmann2003}, \citet[][K01]{Kewley2001} and \citet[][SB10]{Sharp2010} to separate Star-forming, Composite, AGN and LINERS. In the inset the BPT line-ratio map is shown. Only spaxels with a $S/N> 3$ in all the emission lines involved are shown. \label{fig:BPT} } \end{figure*} \begin{figure*} \centering \includegraphics[scale=0.51,clip, trim=0 0 0 0]{P95080_summary_plot_referee.png} \caption{P95080. The different panels show the MUSE map of H$\alpha$\xspace (a), the H$\alpha$\xspace velocity (b) and velocity dispersion (c) maps, stellar velocity (d) and stellar velocity dispersion (e) maps, metallicity map for the ionised gas (f), $A_V$ maps (g), luminosity weighted age (h) and stellar mass density (i) maps. More details are given in the text. In all plots, (0, 0) is the center of the MUSE image. \label{fig:P95080} } \end{figure*} P95080, seen in the { top panels} of Fig.\ref{fig:rgb_image}, is a spiral galaxy with a moderate inclination and a possibly a bar. The right panel shows the MUSE map for H$\alpha$\xspace, uncorrected for intrinsic dust extinction, but corrected for stellar absorption and Galactic extinction. We plot only the spaxels with H$\alpha$\xspace S/N$> 3$. The H$\alpha$\xspace distribution is quite patchy in the central regions of the galaxy, where many peaks are visible. It emerges that the ionised gas is characterised a number of clouds detached from the main body that extend beyond the visible light. These surround the galaxy without having a preferred orientation. As these clouds might be only due to spurious spaxels, we decided to plot only the spaxels that, in addition to having H$\alpha$\xspace S/N$> 3$, { are surrounded by spaxels with measured velocity at S/N>3. Specifically, we build a 3$\times$3 matrix centered on each spaxel and we keep only those spaxels that are surrounded by at least 7 (out of 9) spaxels with non-zero velocity}. In addition, for the spaxels in the outskirts of the galaxy and in possible isolated clouds, we will plot only the spaxels whose velocity is within $3\sigma$ of the mean velocity of the the galaxy{ , considering separately the approaching and receding sides}. This approach helps to remove possible spurious signal. Given the redshift of P95080, the sky line at $\lambda$=6830\AA{} falls very close to H$\alpha$\xspace. Therefore, only for this galaxy, we also exclude all the spaxels in the clouds whose velocity is within $\pm 50$ $\rm km \, s^{-1}$\xspace the velocity of the sky line. We will clean also the following plots adopting the same approach. We are therefore confident that the clouds we detect are real and due to some specific physical process. { To identify the clouds using a quantitative metrics, we select all H$\alpha$\xspace regions in the luminosity range $\rm 10^{-17.7}-10^{-15.5} \, erg/s/cm^2/acrsec^2$ that have no pixels in common with the main body of the galaxy, have a size larger than 10 pixels and are within R(H$\alpha$\xspace)$_{max}$. With this approach, we identify 32 clouds.} In P95080, the measured extension of the H$\alpha$\xspace disk is 4.1$\times$ the extension of the stellar disk, defined by $r_e$. We note that the H$\alpha$\xspace images shown in Fig. \ref{fig:rgb_image_control} have been produced following the same procedure, therefore clouds of similar size and intensity would be detected. { In contrast, the aforementioned approach does not identify any clouds in the galaxies belonging to the control sample. } The map of H$\alpha$\xspace, when in combination to those of H$\beta$\xspace, [OIII] 5007 \AA{}, [OI] 6300 \AA{}, H$\alpha$\xspace, [NII] 6583 \AA{}, and [SII] 6716+6731 \AA{}, can be used to determine the main ionising source at each position. The lines' intensities are measured after subtraction of the continuum, exploiting the pure stellar emission best fit model provided by {\sc sinopsis}, to take into account any possible contamination from stellar photospheric absorption. Only spaxels with a $S/N> 3$ in all the emission lines involved are considered. All the diagnostic diagrams \citep[BPT,][]{Baldwin1981} are concordant in finding that young stars produce the ionised gas \citep[``Star-forming'' according to][]{Kauffmann2003, Kewley2006} throughout the galaxy and in excluding the presence of AGN in the galaxy center (see the [OIII]/H$\beta$\xspace vs [NII]/H$\alpha$\xspace plot in the left panel of Fig.\ref{fig:BPT}, the other plots are not shown). This is in agreement with previous classifications found in the literature for the same galaxy \citep[e.g.,][]{Veron2010}. Since the ionisation source is mostly photoionisation by young stars, we can now measure the total ongoing SFR, obtained from the dust- and absorption-corrected H$\alpha$\xspace luminosity adopting the \cite{Kennicutt1998a}'s relation for a \cite{Chabrier2003} IMF. Integrating the spectrum over the galaxy, we get a value of SFR=0.74 $\rm{M_\odot \, yr^{-1}}$. Figure \ref{fig:P95080} presents the maps of other quantities obtained from the MUSE datacubes. From left to right, top to bottom it shows the gas and stellar kinematics, the metallicity of the ionised gas, the extinction map, the luminosity weighted age and the surface mass density. Panels (a) and (b) show the H$\alpha$\xspace velocity and velocity dispersion maps. The gas is rotating around the North-south direction, the East side is receding, the West side is approaching. The velocity field is quite regular and spans the range ($-100<$v/$\rm km \, s^{-1}$\xspace$<100$). The median error on the gas velocity in the spaxels is $\sim$5 $\rm km \, s^{-1}$\xspace. Uncertainties on the stellar motion are the formal errors of the fit calculated using the original noise spectrum datacube and have been normalized by the $\chi^2$ of the fit. The velocity of the detached clouds is that expected given their position with respect to the galaxy, suggesting that they indeed belong to the object. To further assess the values obtained for the velocity of the clouds, we integrated the spaxels of each cloud and run {\sc kubeviz}\xspace on the integrated spectra. Values obtained on the spatially resolved and integrated spectra are largely in agreement. The few spaxels in the West region of the galaxy with velocity $\sim$100 $\rm km \, s^{-1}$\xspace are residuals of the sky line emission discussed above and do not carry any information. The velocity dispersion is overall low, having a median value of 16 $\rm km \, s^{-1}$\xspace. This is indicative of a dynamically cold medium. The East side has a systematically higher velocity dispersion than the West one. All the clouds have a typically low velocity dispersion. Panels (c) and (d) of Fig. \ref{fig:P95080} show the stellar velocity and stellar velocity dispersion maps, respectively, for Voronoi bins with S/N> 10. The velocity field of the stellar component of P95080 is similar to that of the gas, spanning a similar range ($-100<$v/$\rm km \, s^{-1}$\xspace$<100$), though less spatially extended. The median error in stellar velocity is $\sim50$ $\rm km \, s^{-1}$\xspace. The bending of the locus of zero-velocity is due to the presence of the bar{, as discussed in Erroz-Ferrer et al. (2015)}. Also the velocity dispersion of the stars is typically low ($<20$ $\rm km \, s^{-1}$\xspace, which is below the resolution limit). Deviations are seen in the eastern part of the galaxy, where larger errors prevent us from drawing solid conclusions. Panel (e) of Fig. \ref{fig:P95080} presents the spatial distribution of the metallicity of the ionised gas, i.e. $12+\log[O/H]$. Only the spaxels with S/N is >3 for all the lines involved in the computation of the metallicity ([NII]6585/[SII]6717+6731 vs [OIII]/[SII]6717+6731) are plotted. P95080 is characterised by quite high values of the metallicity in the center ($12+\log[O/H]\sim 9$) and then by a smooth decline towards the outskirts, which are characterised by $12+\log[O/H]\sim 8$. P95080 lays on the typical mass-metallicity relation for local field galaxies \citep[see][]{Tremonti2004}. Unfortunately, given the low S/N of some of the lines, we can not properly constrain the metallicity for the clouds. Panel (f) shows the $A_V$ maps for spaxels with a S/N(H$\alpha$\xspace)> 3. Overall, P95080 is characterised by low values of extinction, almost always $<1$mag. Relatively higher values of extinction are found preferentially in the central regions and trace one of the spiral arms of the galaxy. Panel (g) presents the map of the luminosity weighted ages. This provides an estimate of the average age of the stars weighted by the light we actually observe, and gives an indication on when the last episode of star formation occurred. The map shows that in the central regions the typical luminosity weighted age of the galaxy is $\sim 10^{9-9.5}$ yr, and it decreases towards the outskirts, where it reaches values of $\sim10^{7}$ yr. While in the center the distribution of ages is quite homogeneous, towards the outskirts it becomes more patchy, showing many knots of younger ages. Typically, they corresponds to the H$\alpha$\xspace blobs seen in the top right panel of Fig. \ref{fig:rgb_image}. Finally, panel (h) shows the stellar mass density. The vast majority of the mass is confined in the central parts of the galaxy, while the outskirts, and especially the clouds around the galaxy, are extremely less massive, reaching a mass density of $\rm 3\times 10^6 M_\odot/kpc^2$. The bar and two main spiral arms, already detected in the H$\alpha$\xspace map, are seen also in the stellar mass density. Running {\sc sinopsis}\xspace on the integrated spectra of the entire galaxy, we obtain a total M$_\ast$ of 9.5$\rm \times 10^{9} \, M_\odot$. Given its values of SFR and stellar mass, P95080 lays on the typical SFR-mass relation for star-forming field galaxies \citep[Paper XIV]{Vulcani2018c}. \subsection{P19482} \begin{figure*} \centering \includegraphics[scale=0.51,clip, trim=0 0 0 0]{P19482_summary_plot_referee.png} \caption{P19482. Panels are as in Fig.\ref{fig:P95080}. \label{fig:P19482} } \end{figure*} { We now focus on P19482, whose color composite image is presented in the second row of Fig. \ref{fig:rgb_image}. This is a spiral galaxy, with a slightly higher inclination than P95080. A spiral arm extends towards South-West. { We detect the presence of 42 clouds}, whose size is much larger than the typical size of the noise, seen e.g. in the corners of the image. Most of the detached clouds follow the spiral arms, but especially in the North -East region no stellar disk seems to be associated to the presence of the clouds. The maximum extension of the H$\alpha$\xspace disk is $7.9\times r_e$. Figure \ref{fig:P19482} present all the other properties of the galaxy. The analysis of the velocity field (panel (a)) indicates that these clouds belong to the galaxy, as their velocity is consistent with that of the part of the galaxy that is close to them. As we did for P95080, we integrated the spaxels of each cloud and run {\sc kubeviz}\xspace on the integrated spectra. Values obtained on the spatially resolved and integrated spectra are largely in agreement. Overall, in each position, the gas and the stars (panel (c)) rotate around the same axis and at similar speed ($-180<$v/$\rm km \, s^{-1}$\xspace$<180$). The median error on the gas velocity is 4 $\rm km \, s^{-1}$\xspace, the one on the stellar velocity is 50 $\rm km \, s^{-1}$\xspace. The gas velocity dispersion (panel (b)) is overall $<10$ $\rm km \, s^{-1}$\xspace. The median error on the gas velocity dispersion is $\sim 6$ $\rm km \, s^{-1}$\xspace. The velocity dispersion of the stellar component (panel (d)) is overall quite low ($\sim40$ $\rm km \, s^{-1}$\xspace). In the South East region it is systematically higher, reaching values of 80 $\rm km \, s^{-1}$\xspace. The analysis of the diagnostic diagrams (central panel of Fig.\ref{fig:BPT}) does not detect the presence of an AGN in the galaxy center. The emission-line ratios are consistent with gas being photoionised by young stars. The metallicity map of the ionised gas, presented in panel (e), ranges from $12+\log[O/H]=8.5$ in the outskirts to $12+\log[O/H]=9.5$ in the core. This value sharply decline towards the outskirts, where the typical metallicity is $\sim 8.5$. We can estimate the metallicity also in few clouds, finding that this is consistent with the edges of the galaxy. P19482 presents low values of dust extinction (panel (f)) ranging from 0.2 mag in the outskirts and 2 mag in the center. The luminosity weighted age (panel (g)) strongly varies across the galaxy. Maximum values are reached in the galaxy center, where LWA$\sim 10^9$ yr, while moving towards the outskirts, the luminosity weighted age progressively decreases, down to a minimum vlaue of LWA$\sim 10^7$ yr. As for P95080, most of the mass (panel (h)) is contained in the central part of the galaxy. Running {\sc sinopsis}\xspace on the integrated spectra of the entire galaxy, we obtain a total M$_\ast$ of 1.9$\rm \times 10^{10} \, M_\odot$. The total ongoing SFR of P19482 is 1.3 $\rm{M_\odot \, yr^{-1}}$, its position on the SFR-mass relation is that expected for typical for star-forming field galaxies \citepalias{Vulcani2018c}. } \subsection{P63661} We now move our attention to P63661, whose RGB image is presented in the third row of Fig. \ref{fig:rgb_image}. Similarly to the previous galaxies, this is a spiral galaxy, with a moderate inclination. A spiral unwinding arm extends towards South-West. \begin{figure*} \centering \includegraphics[scale=0.51,clip, trim=0 0 0 0]{P63661_summary_plot_referee.png} \caption{P63661. Panels are as in Fig.\ref{fig:P95080}. \label{fig:P63661} } \end{figure*} { The H$\alpha$\xspace map shown in Figure \ref{fig:rgb_image} and Figure \ref{fig:P63661} unveils a much more complicated structure. Focusing on the H$\alpha$\xspace distribution, we find that it extends well beyond the stellar disk. The maximum extension of the H$\alpha$\xspace disk is $4.3\times r_e$}. On the West side of the galaxy, the H$\alpha$\xspace map presents a rift. A portion of the gas is detached from the main body. We remind the reader that GASP data reach a surface brightness detection limit of $V\sim 27$ mag arcsec$^{-2}$ and $\rm{\log (H\alpha [erg \, s^{-1} \, cm^{-2} arcsec^{-2}]) \sim -17.6}$ at the 3$\sigma$ confidence level \citepalias{Poggianti2017a}. On the North-East side of the galaxy, the H$\alpha$\xspace distribution is somehow broken off and presents a sharp edge. In addition, a number of smaller clouds surround the galaxy { for a total of 16.} . From the analysis of the velocity field (panel (a)) it results that these clouds belong to the galaxy, as their velocity is consistent with that of the part of the galaxy that is close to them. Running {\sc kubeviz}\xspace on the integrated spectra of each cloud, we obtained values compatible with those obtained on the spaxels. Overall, in each position, the gas and the stars (panel (c)) rotate around the same axis and at similar speed ($-100<$v/$\rm km \, s^{-1}$\xspace$<100$). In the external regions, where there are no stars in correspondence of the gas, the gas reaches velocities of $|v|\sim 120$ $\rm km \, s^{-1}$\xspace. The median error on the gas velocity is 4 $\rm km \, s^{-1}$\xspace, the one on the stellar velocity is 40 $\rm km \, s^{-1}$\xspace. The gas velocity dispersion (panel (b)) is overall $<20$ $\rm km \, s^{-1}$\xspace, except in the core, where it reaches values of $\sim 45$ $\rm km \, s^{-1}$\xspace. The median error on the gas velocity dispersion is <2 $\rm km \, s^{-1}$\xspace. The velocity dispersion of the stellar component (panel (d)) is more chaotic, especially towards South-East, where a spiral arm is present. Nonetheless, typical values do not exceed $\sim$50 $\rm km \, s^{-1}$\xspace. No central AGN is detected from the analysis of the diagnostic diagrams (central panel of Fig.\ref{fig:BPT}). The emission-line ratios are consistent with gas being photoionised by young stars. This finding confirms previous classifications \citep[e.g.,][]{Veron2010}. The metallicity map of the ionised gas is presented in panel (e). The central part of the galaxy has a $12+\log[O/H]>9$. This value sharply decline towards the outskirts, where the typical metallicity is $\sim 8.5$. From what we can infer from the significantly meaningful spaxels in the detached part of the galaxy towards West, the metallicity of the region is significantly lower. The $A_V$ map (panel (f)) shows that overall P63661 is characterised by low values of extinction, almost always $\leq1$mag. The last two panels of Fig.\ref{fig:P63661} show the properties of the stellar populations. The maximum luminosity weighted age of the galaxy (panel (g)) is found in the galaxy center: LWA$\sim 10^9$ yr. Moving towards the outskirts, the luminosity weighted age constantly decreases, to reach the minimum values in the detached region in the North-West side and towards East, along the extension of one spiral arm. Most of the mass (panel (h)) is contained in the central part of the galaxy. Beyond the R25 the stellar mass density reaches values of $\rm 3\times 10^6 M_\odot/kpc^2$. Running {\sc sinopsis}\xspace on the integrated spectra of the entire galaxy, we obtain a total M$_\ast$ of 1.8$\rm \times 10^{10} \, M_\odot$. The total ongoing SFR of P63661 is 0.86 $\rm{M_\odot \, yr^{-1}}$, its position on the SFR-mass relation is that expected for typical for star-forming field galaxies \citepalias{Vulcani2018c}. \subsection{P8721} P8721 is a spiral galaxy with a quite high inclination. Projection effects can therefore affect the interpretation of the results and possible detached clouds seen in projection might appear as part of the galaxy. \begin{figure*} \centering \includegraphics[scale=0.51,clip, trim=0 0 0 0]{P8721_summary_plot_referee.png} \caption{P8721. Panels are as in Fig.\ref{fig:P95080}. \label{fig:P8721} } \end{figure*} The ionised gas is much extended, especially towards South-West. The maximum extension of the H$\alpha$\xspace disk is $4.6\times r_e$. Strikingly, Fig. \ref{fig:rgb_image} shows that the stellar disk extends mostly towards North-East with respect to the galaxy center while the ionised gas disk extends mostly towards South-west. It therefore appears that the light distribution in B and H$\alpha$\xspace are distinct. A bow of bright H$\alpha$\xspace knots is visible in the Southern part of the galaxy. { 12} clouds of detached gas are visible both towards South-West and towards North-East. The velocity of the gas (panel (a) in Fig. \ref{fig:P8721}) in these clouds is consistent with them belonging to P8721. The gas velocity field is regular and spans the range -220$<v/$$\rm km \, s^{-1}$\xspace$<$220. The median error on the gas velocity is $\sim4$ $\rm km \, s^{-1}$\xspace. The gas velocity dispersion, shown in panel (b) is overall quite low ($<35$ $\rm km \, s^{-1}$\xspace), except for the core and two external regions. The median error on the gas velocity dispersion is $<4$ $\rm km \, s^{-1}$\xspace. The right panel of Fig.\ref{fig:BPT} shows that while an AGN is not detected, the central region of the galaxy has a composite spectrum, indicative of either shocks or old evolved stars. The stellar kinematics (panels (c) and (d)) is regular, except for a protuberance in the southern region, probably due to a spiral arm, and similar to that of the gas in the same spatial position. The median error on the stellar velocity is $\sim25$ $\rm km \, s^{-1}$\xspace. The stellar velocity dispersion ranges from 40 $\rm km \, s^{-1}$\xspace to 80 $\rm km \, s^{-1}$\xspace. The metallicity of the ionised gas (panel (e)) is $12+\log[O/H]\sim 9.1$ within R25 and then abruptly decreases. It reaches minimum values in the tail towards South West. The two sides of the galaxy (SW and NE) show different slopes of the gradients. No metallicity values are reliable for the gas in the clouds. The $A_V$ map (panel (f)) shows a peak of dust attenuation in the core of the galaxy ($A_V\sim 2.5$ mag) and a decline towards the outskirts, where it reaches values of $\sim 0.5$ mag. \begin{figure*} \centering \includegraphics[scale=0.3]{morphs_cont_onlystage0_label.png} \includegraphics[scale=0.3]{morphs_Ha_onlystage0_label.png} \caption{The morphological parameters G, M20, C, A and S for the three galaxies presented in this paper (colored stars) and galaxies of a GASP control sample visually selected for not having morphological distortions \citepalias{Vulcani2018c}(black points). The histograms show the distribution of the parameters. Left panels are based on the continuum underlying H$\alpha$\xspace, right panels on the H$\alpha$\xspace images. \label{fig:morphs} } \end{figure*} The luminosity weighted age (panel (g)) is slightly older within R25, with typical values around 10$^{9.5}$ yr. Outside the R25, it has average values around 10$^{8.5}$ yr. The youngest region of the galaxy is found in the South-West part of the object. The mass density map (panel (h)) shows that the bulk of the mass in located in the galaxy core ($\rm 10^9 M_\odot/kpc^2$), while the South-West part of the object gives a very little contribution to the total mass of P8721. Running {\sc sinopsis}\xspace on the integrated spectra of the entire galaxy, we obtain a total M$_\ast$ of 5.6$\rm \times 10^{10} \, M_\odot$. The total ongoing SFR is 1.05 $\rm{M_\odot \, yr^{-1}}$, it therefore lays on the SFR-mass relation of star-forming galaxies in the field \citepalias{Vulcani2018c}. \subsection{Morphological analysis}\label{sec:morph} To further assess the peculiarity of the light distribution in these galaxies, we run a number of non-parametric morphological measurements, exploiting the python package statmorph \citep{Rodriguez2018}, on the images of the continuum underlying the H$\alpha$\xspace (red continuum) and the H$\alpha$\xspace images, obtained from the fits of {\sc kubeviz}\xspace. Specifically, we measured: \begin{itemize} \item {\it Concentration C}: Ratio of the circular radius containing 80 per cent ($r_{80}$) of a galaxy's light to the radius containing 20 per cent ($r_{20}$) of the light \citep{Bershady2000, Conselice2003, Peth2016}. A large concentration value indicates a majority of light is concentrated at the center of the galaxy, i.e the presence of a bulge. \item {\it Asymmetry A}: Difference between the image of a galaxy and the galaxy rotated by 180 degrees \citep{Conselice2000, Peth2016}. This determines a ratio of the amount of light distributed symmetrically to all light from the galaxy. A large value of asymmetry indicates that most of the light is not distributed symmetrically. \item {\it Gini Coefficient G}: Measure of the equality of light distribution in a galaxy \citep{Lorenz1905, Abraham2003, Lotz2004, Conselice2014}. A value of G = 1 is obtained when all of the flux is concentrated in a single pixel,a value of G = 0 when the brightness distribution is homogeneous. \item {\it M20}: Second order moment of the brightest regions of a galaxy \citep{Lotz2004} tracing the spatial distribution of any bright clumps. It is sensitive to bright structure away from the center of the galaxy; flux is weighted in favor of the outer parts. It therefore is relatively sensitive to merger signals and tidal structures, specifically star-forming regions formed in the outer spiral or tidal arms. If no such structures are in the image, the 20\% brightest pixels will most likely be concentrated in the center of the galaxy, which is weighted lower. Low values of M20 are obtained for smooth galaxies with bright nucleus (Ellipticals, S0 or Sa), much higher values (less negative) for galaxies with extended arms featuring bright H{\sc ii} regions. \item {\it Smoothness S}: Degree of small-scale structure \citep{Conselice2003, Takamiya1999}. Larger values of S actually correspond to galaxies that are less smooth (i.e. more `clumpy'). \end{itemize} Figure \ref{fig:morphs} compares the values of the morphological measurements of the three galaxies under inspection to those of a GASP (field+cluster) control sample carefully selected for not having morphological distortions \citepalias{Vulcani2018c}. We carefully checked that all the galaxies of the control sample do not have any similar detached cloud. As far as the red continuum is concerned, the { four} galaxies present relatively high concentration values, slightly higher than the bulk of the control sample. They also present similar asymmetry and Gini values, indicating that stars are symmetrically and quite homogeneously distributed. { Only P19482 is offset, indicating the presence of dishomogeneites also in the stellar component.} They do not stand out in the M20-Asymmetry and M20-Gini planes, excluding ongoing mergers for these objects \citep[e.g.][]{Lotz2008a, Lotz2008b}. Considering H$\alpha$\xspace, it emerges that the star formation is less concentrated than the stars, both for these galaxies and the control sample, P8721 is one of the less concentrated objects of all galaxies. Moving to asymmetry and smoothness, overall all galaxies are characterised by higher absolute values than for the stellar continuum. P8721 and P63661 really stand out in these distributions. P8721 is peculiar also in terms of Gini coefficient, while the other two galaxies follow the control sample trends. { P19482 present low values of M20, confirming the presence of many clumpy star forming regions spread across the disk.} Taken together, these results suggest that as for the red continuum, which traces the stars, galaxies are ``normal''. Peculiarities with respect to a control sample of undisturbed galaxies emerge when looking at the star forming regions only. The H$\alpha$\xspace distribution is clumpy and with small scale structures. P8721 is the most peculiar object, followed by P63661 and P19482, while P95080 is more regular. This analysis therefore corroborates the previous analysis based on visual morphology that all the three galaxies are regular when the stellar light distribution is considered. In almost all the cases, the evidence for anomalies is stronger when the H$\alpha$\xspace images are analysed, suggesting that the ionised gas is the most disturbed component. \section{Discussion} In the previous section we have described the spatially resolved properties of three galaxies that present peculiar light distributions and a number of common features. First of all, they are all characterised by a ``tattered'' H$\alpha$\xspace distribution. They all present H$\alpha$\xspace clouds beyond the stellar disk, up to several kpc. They are visible even in P8721, an unfavored case given its high inclination. These clouds have typically a size of 3-5 kpc, but a detached region $\sim 20$ kpc long is visible in P63661. Within the GASP sample, these, along with P5215 discussed in \citetalias{Vulcani2018b}, are the only galaxies presenting such extended and peculiarly tattered gas distribution. According to the gas kinematics, these clouds do belong to the galaxy: their velocity is similar to that of the closest part of the galaxy. Both the gas and the stellar kinematics are regular and resemble each other. We can therefore exclude processes that involve a redistribution of the stellar orbits, such as mergers \citepalias[see, e.g.,][]{Vulcani2017c} or processes strongly affecting the gas distribution, such as strong ram pressure stripping (see, e.g., \citealt{Gunn1972}, \citetalias{Poggianti2017a}). { Simulations by e.g. Jesseit et al. (2007), Kronberger et al. (2007) have indeed studied the 2D kinematic analysis for a sample of simulated binary disc merger remnants with different mass ratios, showing how merger remnants usually show a multitude of phenomena, such as heavily distorted velocity fields, misaligned rotation, embedded discs, gas rings, counter-rotating cores and kinematic misaligned discs. None of these features are evident from our maps.} At least other two pieces of evidence exclude that the galaxies have undergone a recent merger. On one side, the BPT diagrams presented in Fig. \ref{fig:BPT} show { that there are no signs of tidally induced shocks, associated with the interaction process, contributing to the ionization of the gas (Colina, Arribas \& Monreal-Ibero 2005). Extended shock ionization has been previously reported in local U/LIRGs (Monreal-Ibero, Arribas \& Colina 2006; Rich et al. 2011; Rich, Kewley \& Dopita 2014). In all these cases, shock ionization exhibits characteristics of extended low-ionization nuclear emission-line region (LINER)-like emission with broadened line profiles. In P95080, P19482 and P63661 we find no broadened line profiles falling in the so-called composite region. Only P8721 has a few central spaxels characterised by composite spectra, but composite regions due to interactions would be expected more in the outer parts of the galaxy. } In addition, we see continuous distribution, not different sequences, confirming again that we are observing gas belonging to one galaxy. Indeed, in cases of e.g. mergers or gas accretion we should see different line ratios indicative of different chemical abundances in the different regions of the galaxies { \citepalias[see, e.g., Fig. 12 in][]{Vulcani2018b}}. On the other side, the observed metallicity distribution has a smooth gradient, suggesting that no strong process altered it considerably. The asymmetry of the metallicity gradient in P8721 might however suggest that this galaxy is accreting low metallicity gas from the Southern side, similarly to what presented for another GASP galaxy in \citetalias{Vulcani2018}, but no other pieces of evidence support this scenario. P8721 is also the most peculiar object when the morphological analysis on the H$\alpha$\xspace is performed, being at the tail of the distributions in all the quantities analysed. \begin{figure*} \centering \includegraphics[scale=0.27]{filaments_sfh_v2.png} \caption{Stellar maps of different ages, illustrating the average star formation rate per kpc$^2$ during the last $2\times 10^7$ yr (left), between $2\times 10^7$yr and $5.7 \times 10^8$yr (central left), $5.7 \times 10^8$yr and $5.7 \times10^9$yr (central right) and $> 5.7 \times 10^9$yr ago (right), for P95080 (upper), P63661 (central) P8721 (bottom). In all the plots, the green ellipses show the $r_e$, the dashed blue ellipses show the maximum radius at which H$\alpha$\xspace is detected (see text for details). \label{fig:SFH} } \end{figure*} \begin{figure*} \centering \includegraphics[scale=0.25]{filaments_sfh_v2_control.png} \caption{Same as \ref{fig:SFH}, but for galaxies in the control sample. \label{fig:SFH_control} } \end{figure*} In addition to looking at the luminosity weighted age maps, to better investigate the mode of growth of these galaxies (inside out or inside in) in Fig. \ref{fig:SFH} we present the galaxy spatially resolved star formation histories. These plots show the variation of the SFR across cosmic time in four age bins in such a way that the differences between the spectral characteristics of the stellar populations are maximal (\citealt{Fritz2007} and \citetalias{Fritz2017}). Note that {\sc sinopsis} tends to include an unnecessary small percentage of old (t>$5.7\times 10^8$) stars when the spectra have a low signal-to-noise values. To be conservative, we neglect the contribution of stars older than $5.7\times 10^8$ yr in low S/N spectra (S/N$<$3). The entire contribution of young stars, instead, is taken into account, given the fact that it is estimated from the emission lines, which are more reliable features. In Fig. \ref{fig:SFH} each row corresponds to a galaxy, each column to a different age bin. For P95080, { P19482} and P63661 the inside-out growth is outstanding. In the two oldest age bins ($t>5.7\times 10^8$ yr ago), the star formation mostly occurred within half of the current { H$\alpha$\xspace maximum disk} and only in recent epochs ($t<5.7\times 10^8$ yr ago) the outer part of the disk started to form. The maximum spatial extension of the star forming disk is observed in the current age bin ($t<2\times 10^7$ yr). In P8721, instead, the SFR is relatively constant with time overall in the galaxy and { external regions} might have been already forming stars even in the oldest age bin. This is another piece of evidence that distinguishes P8721 from the other galaxies. { For comparison, Figure \ref{fig:SFH_control} shows the maps of SFR in the oldest and in the youngest age bins for the four galaxies in the control sample already presented in Fig.\ref{fig:rgb_image_control}. In these galaxies the spatial extension of the disk is very similar at the two ages, suggesting that not all galaxies are characterised by strong inside-out growth.} The next step to better understand the possible physical mechanisms acting on these galaxies is to characterise the environment around them. \subsection{The environments} To characterise the environments of the galaxies we have presented in the previous section, we exploit two publicly available catalogs. Both are based on the spectroscopic sample of the galaxies of SDSS data release 10, complete down to m$_r$= 17.77 mag. The first catalogue identifies galaxy groups and clusters and was published by \cite{Tempel2014_g}. The second catalogue identifies galaxy filaments and was published by \cite{Tempel2014_f}. \begin{table*} \caption{Properties of the groups hosting the galaxies. Values are taken from \citet{Tempel2014_g}. For each group, the redshift (z$_{\rm gr}$), the coordinates (RA$_{\rm gr}$ and DEC$_{\rm gr}$), the number of group members (N$_{\rm gals, \, gr}$), the virial radius R$_{\rm vir, \, gr}$, the mass of the halo both assuming a NFW and a Hernquist profile ($\log M^{NFW}_{\rm halo, \, gr}$, $\log M^{Her}_{\rm halo, \, gr}$) are given. P63661b is the bigger cluster close to the system of P63661. \label{tab:groups}} \centering \setlength{\tabcolsep}{2pt} \begin{tabular}{crrrrrrrrrrrrr} \hline \multicolumn{1}{c}{ID} & \multicolumn{1}{c}{z$_{\rm gr}$} & \multicolumn{1}{c}{RA$_{\rm gr}$} & \multicolumn{1}{c}{DEC$_{\rm gr}$} & \multicolumn{1}{c}{N$_{\rm gals, \, gr}$} & \multicolumn{1}{c}{R$_{\rm vir, \, gr}$} & \multicolumn{1}{c}{$\log M^{NFW}_{\rm halo, \, gr}$} & \multicolumn{1}{c}{$\log M^{Her}_{\rm halo, \, gr}$} \\ \multicolumn{1}{c}{} & \multicolumn{1}{c}{} & \multicolumn{1}{c}{(J2000)} & \multicolumn{1}{c}{(J2000)} & \multicolumn{1}{c}{} & \multicolumn{1}{c}{(kpc)} & \multicolumn{1}{c}{(M$\odot$)} & \multicolumn{1}{c}{(M$\odot$)} \\ \hline P95080 &0.04136 &198.08969 &-0.23002 &3 & 315 & 12.63 & 12.84 \\ P19482 &- &- &- &- & - & - & - \\ P63661 &0.05597 &218.06171 &0.17165 &2 & 232 &12.58 &12.58 \\ P63661b &0.05612 &217.49573 &0.30415 &32 & 493 &13.77 &13.99\\ P8721 &0.06609 &158.51371 &0.00926 &4 & 276 & 11.61 & 11.81\\ \hline \end{tabular} \end{table*} Table \ref{tab:groups} presents some useful information regarding the groups, Fig. \ref{fig:envs} shows the position on the sky of the targets and their surroundings. All the values are drawn from \cite{Tempel2014_g}. Besides detecting the filaments, \cite{Tempel2014_f} do not give any quantity useful to better characterise these structures. None of these galaxies is located in massive clusters, and { three of them} are members of small (Milky-Way-like, or slightly more massive, with two or three bright members) groups that are embedded in filaments, { while P19482 most likely does not have any close companion, but is still embedded in a filament}. P95080 is part of a three-member group that is located in the center of a long filament { of 32 galaxies}. The galaxy is at 0.5 R$_{\rm vir, \, gr}$ and its closest galaxy is at $\sim$200 kpc (see Tab. \ref{tab:gal_groups}). { P19482 is at the edges of a filament of 12 members and at the intersection among four different filaments all located at the same redshift (z$\sim$0.040-0.044). In total, the structures have more than 100 members. We remind the reader that the identification of filaments is a delicate task (see also the Introduction), therefore it might be that all these galaxies actually belong to the same structure.} \begin{table} \caption{Distances of the galaxies from the center of their group, in unit of R$_{\rm vir, \, gr}$ and distance of the closest galaxy, in kpc. For P63661, the value in parenthesis gives the distance from the larger group. \label{tab:gal_groups}} \centering \setlength{\tabcolsep}{2pt} \begin{tabular}{crr} \hline \multicolumn{1}{c}{ID} & \multicolumn{1}{c}{d$_{\rm r_{200}}$} & \multicolumn{1}{c}{d$_{\rm closest}$} \\ \multicolumn{1}{c}{} & \multicolumn{1}{c}{} & \multicolumn{1}{c}{(kpc)} \\ \hline P95080 &0.50 & 193\\ P63661 &0.23 (4.84) & 233\\ P19482 & - & 1750 \\ P8721 & 0.39 & 165\\ \hline \end{tabular} \end{table} P63661 is part of a binary system and its companion is at a distance of 233 kpc. It is at $\sim 0.2 R_{\rm vir, \, gr}$ from the center of the system. About 1 Mpc western P63661 a quite massive group is found, with 32 members. The properties of this group are also listed in Tab.\ref{tab:groups}. P63661 is found at 4.8 R$_{\rm vir, \, gr}$ from the center of this massive group. This group is at the center of a filament, which extends both towards NW and towards SE for several Mpc { and includes 51 galaxies}. \begin{figure*} \centering \includegraphics[scale=0.36,clip, trim=0 0 0 0]{groups_summaryenvs_v2.png} \caption{Position on the sky of the targets, represented by the stars. Filled red squares represent galaxies in groups, according to \citet[][T14, group]{Tempel2014_g}. Empty circles represent galaxies in filaments, according to \citet[][T14, filament]{Tempel2014_f}. Dashed circles indicate the virial radius of the groups. The scale in the bottom right corner shows 1 Mpc at the redshift of each target. { For P19482, the smaller points with different shades of blue show filaments intersecting the one hosting the galaxy.} \label{fig:envs} } \end{figure*} Finally, P8721 is part of a four-member system, embedded in the center of an extended filament { of 35 galaxies}. It is at 0.3 R$_{\rm vir, \, gr}$ from the group center and the closest of the two other galaxies of the group is at 165 kpc. { Just for reference, we note that in the control sample used here (Sec. \ref{sec:morph}) 13 out of the 14 field galaxies are either binary or single systems, supporting the scenario that the environment might indeed play a role. } { Based on the definition of filaments by \cite{Tempel2014_f}, 7/14 galaxies are in small filaments (less than 25 objects), while the others are at the boundaries of filamentary structures. } To understand whether the perturbed morphology of the galaxies are the result of tidal interactions with their closest neighbors, we follow a crude approach that was already exploited by, e.g., \cite{Wolter2015, Merluzzi2016} and estimate the acceleration $a_{tid}$ produced by the closest neighbour on the ISM of the galaxy of interest and compare it with the acceleration from the potential of the galaxy itself, $a_{gal}$. Following \cite{Vollmer2005}, $$ \frac{a_{tid}}{a_{gal}} = \frac{M_{neighbour}}{M_{gal}} \left(\frac{r}{R}-1\right)^{-2} $$ where R is the distance from the centre of the galaxy, r is the distance between the galaxies \citep{Vollmer2005}, and $\frac{M_{neighbour}}{M_{gal}}$ the stellar mass ratio. This formulation would require the true distance, that we obviously do not have, therefore we can only use the projected distance. In all the three cases, $\frac{a_{tid}}{a_{gal}}<<1$ and we can exclude tidal interactions with the other group members. Another source of perturbation to the galaxy morphology might be their position within the filament. Galaxies in filaments are indeed expected to have a very different experience from those in largely empty regions \citep{Bahe2013}. All the three galaxies analysed have the major axis more or less aligned to the filament they are embedded in. They could therefore either be flowing along the filament or crossing it perpendicularly. In filaments the IGM density is enhanced, rising the possible ram pressure intensity. In particular, galaxies with shallow potential wells can provide a relatively small restoring force, and a significant gas stripping can take place at typical gas densities and velocities \citep[e.g.,][]{Benitez2013}. The clouds we observe could therefore actually be the result of a galaxy crossing a filament. Analytically quantifying the effect of the filament is not straightforward and also simulations have never been able to quantify the impact of this environment on the spatially resolved properties of the galaxies. Accurately measure the density of the IGM in these environments and estimate the 3D center of the filament needed to quantify the distance of the galaxy from it is indeed a quite hard task. It is however tantalizing to suppose filaments are responsible for the observed gas distribution. We might therefore be witnessing the cosmic web stripping acting on galaxies more massive than dwarfs \citep{Benitez2013}. However, stripping requires a relatively high velocity difference between the galaxy and the filament and the galaxies simulated by \cite{Benitez2013} were low mass objects, while the galaxies discussed here have $\log(M_\ast/M_\odot)\gtrsim 10$. So, rather than stripping, we are most likely seeing gas compression due to the flowing of the galaxies within the filaments. This compression can be induced by an increase in surrounding thermal pressure and can switch on the surrounding clouds. Numerical simulations by \cite{Liao2018} show that filaments can assist the gas cooling and increase the star formation in their residing dark matter haloes. As a consequence, it might be possible that the densest regions in the circumgalactic gas get switched on in their star formation when the galaxy impacts with the sparse IGM. The detached clouds observed around the galaxies with no preferential orientation might be indeed an evidence for this phenomenon, that we call ``Cosmic web enhancement''. In \citet[][Paper XII]{Vulcani2018b} we have presented another galaxy with similar features and found in a similar environment. Nonetheless, there are no simulations specifically focusing on the spatial properties of galaxies in filaments. Developing this kind of simulations is now urgent to better investigate this peculiar environment and its effect on galaxies. Indeed, different conditions of the filaments (density, extent, orientation), as well as the inclination of the galaxy with respect to the filament itself, could also have different impacts on the embedded galaxies, and this could explain the differences observed in P8721 with respect to the other two galaxies. \subsection{External H{\sc ii} regions in the literature}\label{sec:liter} { As mentioned in the Introduction, in the literature few studies have identified H$\alpha$\xspace knots at large radii \citep{Kennicutt1989, Martin2001, Ferguson1998} or isolated \citep{Gerhard2002, Cortese2004, RyanWeber2004, Oosterloo2004, Sakai2002, Mendes2004}. } All of above studies are based on traditional observational techniques to observe the H$\alpha$\xspace emission in the outskirts of galaxies. The most exploited one is narrow-band imaging with subsequent subtraction of broad-band continuum emission. This is however generally insufficiently sensitive to probe large radii. The limitation lies both in the achievable signal-to-noise ratio (S/N) and in the stellar continuum subtraction. Higher spectral resolution is generally preferable and very narrow filter bandpasses have also been adopted, as also traditional spectroscopy, that however can have quite low throughput. Since the pioneering work of Bland-Hawthorn et al. (1997), also the Fabry-Perot staring technique has been used. These techniques however only permit the detection and basic characterization of the H{\sc ii} regions, without giving spatially resolved information on the chemical composition and age of the regions. This is now possible thanks to the recent advent of Integral Field Spectrographs (IFS). However, the known on-going large IFS surveys like the Calar Alto Legacy Integral Field Area (CALIFA) Survey \citep{Sanchez2012}, the Sydney-AAO Multi-object Integral field spectrograph (SAMI) Survey \citep{Croom2012}, the Mapping Nearby Galaxies at Apache Point Observatory (MaNGA) Survey \citep{Bundy2015} typically reach out to 2.5-3 effective radii at most \citep{Bundy2015}, therefore are not designed to catch dis-homogeneities in the ionised gas distribution in the galaxy outskirts. GASP has been instead designed to focus on the galaxy external regions and allows us to perform a detailed analysis of the galaxy outskirts. { To put our results in context, } we have directly compared the observed H$\alpha$\xspace distributions of P95080, P19482, P63661 and P8721 to those of many other literature results, and confirmed that they present many peculiarities, hardly found in previous studies. Galaxies characterised by narrow band image surveys like the H$\alpha$\xspace Galaxy Survey (H$\alpha$\xspace GS, Shane et al. 2002), the H$\alpha$\xspace galaxy survey \citep{James2004}, the H-alpha Galaxy Groups Imaging Survey (H$\alpha$ggis, PI. Erwin), An H$\alpha$\xspace Imaging Survey of Galaxies in the Local 11 Mpc Volume \cite[11Hugs][]{Kennicutt2008}, Dynamo \citep{Green2014}, or Fabry-Perot observations like the Gassendi H$\alpha$\xspace survey of SPirals \cite[GHASP][]{Epinat2008} almost never present such extended and luminous ($\rm{\log (H\alpha [erg/s/cm^2/arcsec^2]>-17.5}$) H$\alpha$\xspace regions located well beyond R25. This might be due to the shallower surface brightness reached: the surface brightness limit of the observations of \cite{James2004} is SB(H$\alpha$\xspace${\rm +[N]) = 10^{-15}erg/cm^2/s/arcsec^2}$. In addition, at least some of these surveys \citep[e.g.,][]{Epinat2008} targeted galaxies in the cluster environment, such as Virgo, and those observed features are most likely due to the ram pressure exerted by the hot intracluster medium \citep{Gunn1972}. The detached H$\alpha$\xspace regions we detect are quite bright and large and we can exclude the galaxies are found in clusters. These regions are also much brighter than the emission produced by the gaseous haloes and are similar to the intergalactic H{\sc ii} regions discovered by \cite[e.g.][]{RyanWeber2004} in terms of H$\alpha$\xspace luminosity. However, the latter are not always bound to the main galaxy, while all the clouds we discussed present compatible velocities and ionised gas and stellar properties. \cite{RyanWeber2004} results are consistent with stars forming in interactive debris as a result of cloud-cloud collisions, while no signs of interactions are evident from our analysis. Unfortunately, no high resolution UV data are currently available for the three galaxies. They have been observed with GALEX, but these data are too shallow to detect any detached material. \section{Conclusions} GASP (GAs Stripping phenomena in galaxies with MUSE) is an ESO Large Program with the MUSE/VLT to study the causes and the effects of gas removal processes in galaxies in different environments in the local universe. Within the sample, we identified { four} galaxies that show peculiar ionised gas distributions: several H$\alpha$\xspace clouds have been observed { beyond 4 $r_e$}. The gas kinematics, metallicity map and the ratios of emission line fluxes (BPT diagrams) confirm that they do belong to the galaxy gas disk, the stellar kinematics shows that very weak stellar continuum is associated to them. Similarly, the star formation history and luminosity weighted age maps point to a recent formation of such clouds, as also of more than half of the stellar disk for P95080, { P19482} and P63661. The clouds are powered by star formation, and are characterised by intermediate values of extinction ($A_V\sim 0.3-0.5$). These, along with an object discussed in \citetalias{Vulcani2018b}, are the only three galaxies in all the GASP non cluster sample showing such tattered H$\alpha$\xspace distribution, and we have not found any similar object in the currently existing literature surveys. The three galaxies share a similar location in the Universe: they all belong to filamentary structures, therefore we point to a scenario in which the observed features are due to ``Cosmic web enhancement'': we hypothesize that we are witnessing galaxies passing through or flowing within filaments that are able to cool the gas and increase the star formation in the densest regions in the circumgalactic gas. Observed differences among the three galaxies might be due to the different conditions of the filaments. \cite{Liao2018} showed that filaments are an environment that particularly favors this gas cooling followed by condensation and star formation enhancement. In the recent years, there has been an increasing interest for the role of filaments in affecting galaxy properties, nonetheless, to our knowledge, this paper presents the first analysis of the effect of this environment on the spatially resolved properties of the galaxies, highlighting the importance of this kind of data to get insights on galaxy evolution as a function of environment. Targeted simulations illustrating the effect of filaments on galaxy properties are now crucial to make progress on the physical processes acting in the different environments. \section*{Acknowledgements} Based on observations collected at the European Organisation for Astronomical Research in the Southern Hemisphere under ESO programme 196.B-0578. We acknowledge funding from the INAF PRIN-SKA 2017 program 1.05.01.88.04 (PI Hunt). We acknowledge financial contribution from the contract ASI-INAF n.2017-14-H.0 Y.~J. acknowledges support from CONICYT PAI (Concurso Nacional de Inserci\'on en la Academia 2017) No. 79170132 and FONDECYT Iniciaci\'{o}n 2018 No. 11180558. \bibliographystyle{mnras}
1,116,691,501,336
arxiv
\section{Introduction} The most popular mechanism for the formation of large-scale structure and motions in the Universe is the gravitational growth of primordial density perturbations. According to this paradigm, if the density perturbations are small enough to be approximated by a linear theory, then the peculiar acceleration vector ${\bf g}({\bf r})$ induced by the matter distribution around position ${\bf r}$ is related to the mass by \begin{equation} {\bf g}({\bf r})= G\bar{\rho} \int\limits_{\bf r}^{\infty} d^3{\bf r}^{\prime} \delta_m({{\bf r}^{\prime}}) \frac{{\bf r}^{\prime}-{\bf r}}{|{\bf r}^{\prime}-{\bf r}|^3} \label{eqn:g(r)} \end{equation} where $\bar{\rho}$ is the mean matter density and $\delta_m({\bf r})$ = $(\rho_m({\bf r})-\bar{\rho})/\bar{\rho}$ is the density contrast of the mass perturbations. In linear theory, the peculiar velocity field, ${\bf v}({\bf r})$, is proportional to the peculiar acceleration: \begin{equation} {\bf v}({\bf r})= \frac{H_0 f(\Omega_{\rm m})}{4 \pi G \bar{\rho}} {\bf g}({\bf r})=\frac{2f(\Omega_{\rm m})}{3 H_0\Omega_{\rm m}}{\bf g}({\bf r}), \label{eqn:v(r)} \end{equation} where $H_0$ = 100 $h$ ${\rm km} {\rm s}^{-1} {\rm Mpc}^{-1}$ is the Hubble constant and $f(\Omega_{\rm m})\approx\Omega_{\rm m}^{0.6}$ is the logarithmic derivative of the amplitude of the growing mode of the perturbations in mass with respect to the scale factor (Peebles 1980). The factor $f(\Omega_{\rm m})$ is only weakly dependent on the cosmological constant (Lahav {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1991). During the past twenty five years, particular attention has been paid to the study of the gravitational acceleration and the peculiar velocity vectors of the Local Group (LG) of galaxies. It is now widely accepted that the cosmic microwave background (CMB) dipole is a Doppler effect arising from the motion of the Sun (but see e.g. Gunn 1988 and Paczy\'{n}ski \& Piran 1990 who argue that the CMB dipole is of primordial origin). In this case, the dipole anisotropy of the CMB is a direct and accurate measurement of the LG peculiar velocity (c.f. Conklin 1969 and Henry 1971). The LG acceleration can also be estimated using surveys of the galaxies tracing the density inhomogeneities responsible for the acceleration. By comparing the CMB velocity vector with the acceleration vector\footnote{Both the CMB velocity and the gravitational acceleration on the LG have units of velocity and are commonly referred to as `dipoles'. Hereafter, the terms `LG velocity' and `LG dipole' will be used interchangeably.} obtained from the galaxy surveys, it is possible to investigate the cause of the LG motion and its cosmological implications. This technique was first applied by Yahil, Sandage \& Tammann (1980) using the Revised Shapley-Ames catalogue and later by Davis \& Huchra (1982) using the CfA catalogue. Both catalogues were two-dimensional and the analyses were done using galaxy f\mbox{}l\mbox{}uxes. Since both the gravity and the f\mbox{}l\mbox{}ux are inversely proportional to the square of the distance, the dipole vector can be calculated by summing the f\mbox{}l\mbox{}ux vectors and assuming an average value for the mass-to-light ratio. Lahav (1987) applied the same method to calculate the dipole anisotropy using maps based on three galaxy catalogues, UGC, ESO, MCG. The most recent application of the galaxy flux dipole analysis was carried out by Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003), using the two-dimensional Two Micron All-Sky Survey (2MASS) extended source catalogue (XSC), with a limiting magnitude of $K_{\rm s}=13.57$. They found that the LG dipole direction is 16$^\circ$ away that of the CMB. Our ability to study the LG motion was greatly advanced by the whole-sky galaxy samples derived from $IRAS$ {\it Galaxy Catalogues}. Yahil, Walker \& Rowan-Robinson (1986), Meiksin \& Davis (1986), Harmon, Lahav \& Meurs (1987), Villumsen \& Strauss (1987) and Lahav, Rowan-Robinson \& Lynden-Bell (1988) used the two-dimensional $IRAS$ data to obtain the LG dipole. The dipole vectors derived by these authors are in agreement with each other and the CMB dipole vector to within 10$^\circ$-30$^\circ$ degrees. The inclusion of galaxy redshifts in the dipole analyses allowed the estimation of the distance at which most of the peculiar velocity of the LG is generated ({\it the convergence depth}). However, the estimates of the convergence depth from various data sets have not agreed. Strauss {\it et al.\ }} \def\pc{{\rm\thinspace pc} (1992, $IRAS$ sample), Webster, Lahav \& Fisher (1997, $IRAS$ sample), Lynden-Bell, Lahav \& Burstein (1989, optical sample) and da Costa {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2000, a sample of early-type galaxies) suggested that the LG acceleration is mostly due to galaxies $\lesssim 50 h^{-1} {\rm Mpc}$, while other authors such as Scaramella, Vettolani \& Zamorani (1994, Abell/ACO cluster sample), Branchini \& Plionis (1996, Abell/ACO cluster sample), Kocevski {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2004) and Kocevski \& Ebeling (2005, both using samples of X-ray clusters) claimed that there is significant contribution to the dipole from depths of up to $\approx$200 $h^{-1} {\rm Mpc}$. Dipole analyses are often used to estimate the combination of matter density and biasing parameters $\omegam$ and $b$. In theory, one can equate the velocity inferred from the CMB measurements with the value derived from a galaxy survey and obtain a value for $\beta$. In practice, however, the galaxy surveys do not measure the true total velocity due their finite depth (e.g. Lahav, Kaiser \& Hoffman 1990 and Juszkiewicz, Vittorio \& Wyse 1990). The true ${\bf v}_{LG}$, as obtained from the CMB dipole, arises from structure on all scales including structures further away than the distance a galaxy survey can accurately measure. Furthermore, the magnitude/f\mbox{}l\mbox{}ux/diameter limit of the survey and any completeness variations over the sky introduce selection effects and biases to the calculations. These effects amplify the errors at large distances (for redshift surveys) and faint magnitudes (for two dimensional surveys) where the sampling of the galaxy distribution becomes more sparse. This, combined with the fact that we sample discretely from an underlying continuous mass distribution leads to an increase in shot noise error. There may also be a significant contribution to the dipole from galaxies behind the Galactic Plane ({\it the zone of avoidance}). The analysis of the convergence of the dipole is further complicated by the redshift distortions on small and large scales which introduce systematic errors to the derived dipole ({\it the rocket effect}, Kaiser 1987). The following sections discuss these effects in the context of two different models of biasing. In this paper, we use the Two Micron All-Sky Redshift Survey (2MRS, Huchra {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2005) \footnote{This work is based on observations made at the Cerro Tololo Interamerican Observatory (CTIO), operated for the US National Science Foundation by the Association of Universities for Research in Astronomy.} to study the LG dipole. The inclusion of the redshift data allows the calculation of the selection effects of the survey as a function of distance and enables the study of convergence and thus improves the analysis of Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003). The paper is structured as follows: The Two Micron Redshift Survey is described in Section~\ref{sec:dip:data}. Section 3 discusses the method used in the analysis including the different weighting schemes, the rocket effect and the choice of reference frames. The results are presented in Section~\ref{sec:dip:results}. The final section includes some concluding remarks and plans for future work. \section{The Two Micron All-Sky Redshift Survey}\label{sec:dip:data} The Two Micron All-Sky Redshift Survey (2MRS) is the densest all-sky redshift survey to date. The galaxies in the northern celestial hemisphere are being observed mainly by the FLWO 1.5-m telescope and at low latitudes by the CTIO. In the southern hemisphere, most galaxies are observed as a part of the six degree field galaxy survey (6dFGS, Jones {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2004) conducted by the Anglo Australian Observatory. The first phase of the 2MRS is now completed. In this phase we obtained redshifts for approximately 23,000 2MASS galaxies from a total sample of about 24,800 galaxies with extinction corrected magnitudes (Schlegel, Finkbeiner \& Davis 1998) brighter than $K_{\rm s}=11.25$. This magnitude limit corresponds to a median redshift of $z\approx0.02$ ($\approx 60 h^{-1}$ Mpc). The majority of the 1600 galaxies that remain without redshifts are at very low galactic latitudes or obscured/confused by the dust and the high stellar density towards the Galactic Centre. Figure~\ref{fig:2MRS} shows all the objects in the 2MRS in Galactic Aitoff Projection. Galaxies with ${\rm z}\leq0.01$ are plotted in red, $0.01<{\rm z}\le0.025$ are plotted in blue, $0.025<{\rm z}<0.05$ are plotted in green and ${\rm z}\geq0.05$ are plotted in magenta. Galaxies without measured redshifts are plotted in black. The 2MRS can be compared with the deeper 2MASS galaxy catalogue (K$<$14th mag) shown in Jarrett (2004, Figure 1). \begin{figure*} \psfig{figure=Figures/2MRS.ps,angle=0,width=\textwidth,clip=} \caption[All Objects in the 2MASS Redshift Catalogue in Galactic Aitoff Projection] {All Objects in the 2MASS Redshift Catalogue in Galactic Aitoff Projection. Galaxies with ${\rm z}\leq0.01$ are plotted in red, $0.01<{\rm z}\le0.025$ are plotted in blue, $0.025<{\rm z}<0.05$ are plotted in green and ${\rm z}\geq0.05$ are plotted in magenta. Galaxies without measured redshifts are plotted in black. The masked region is outlined by dashed lines.} \label{fig:2MRS} \end{figure*} \subsection{Survey Completeness}\label{sec:2masscomp} The 2MASS\footnote{The 2MASS database and the full documentation are available on the WWW at http//www.ipac.caltech.edu/2mass.} has great photometric uniformity and an unprecedented integral sky coverage. The photometric uniformity is better than $4\%$ over the sky including the celestial poles (e.g. Jarrett {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000$a$, 2003). The uniform completeness of the galaxy sample is limited by the presence of the foreground stars. For a typical high latitude sky less than $2\%$ of the area is masked by stars. These missing regions are accounted for using a coverage map, defined as the fraction of the area of an 8\arcmin$\times$8\arcmin pixel that is not obscured by stars brighter than 10th mag. Galaxies are then weighted by the inverse of the completeness although the analysis is almost unaffected by this process as the completeness ratio is very close to one for most parts of the sky. The stellar contamination of the catalogue is low and is reduced further by manually inspecting the objects below a redshift of $cz=200$\kmps. The foreground stellar confusion is highest at low Galactic latitudes, resulting in decreasing overall completeness of the 2MASS catalogue (e.g. Jarrett {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000$b$) and consequently the 2MRS sample\footnote{See Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2005) who reduce the stellar contamination in the 2MASS XSC by cross-correlating stars with galaxy density.}. Stellar confusion also produces colour bias in the 2MASS galaxy photometry (Cambresy, Jarrett \& Beichman 2005) but this bias should not be significant for the 2MRS because of its relatively bright magnitude limit. In order to account for incompleteness at low Galactic latitudes we fill the Zone of Avoidance (the plane where $|b|<5^\circ$ and $|b|<10^\circ$ in the region $|l|<30^\circ$) with galaxies. We keep the galaxies with observed redshifts and apply two different methods to compensate for the unobserved (masked) sky: \begin{itemize} \item {\bf Method 1}: The masked region is filled with galaxies whose f\mbox{}l\mbox{}uxes and redshifts are chosen randomly from the whole data set. These galaxies are placed at random locations within the masked area. The masked region has the same average density of galaxies as the rest of the sky. \item {\bf Method 2}: The masked region is filled following Yahil {\it et al.\ }} \def\pc{{\rm\thinspace pc} (1991). The area is divided into 36 bins of $10^\circ$ in longitude. In each angular bin, the distance is divided into bins of 1000 \kmps. The galaxies in each longitude/distance bin are then sampled from the corresponding longitude/distance bins in the adjacent strips $-|b_{masked}|-10^\circ<b<|b_{masked}|+10^\circ$ (where $|b_{masked}|=5^\circ$ or $|b_{masked}|=10^\circ$). These galaxies are then placed in random latitudes within the mask region. This procedure gives similar results to the more elaborate method of Wiener reconstruction across the zone of avoidance (Lahav {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1994). The number of galaxies in each masked bin is set to a random Poisson deviate whose mean equals to the mean number of galaxies in the adjacent unmasked strips. This procedure is carried out to mimic the shot noise effects. \end{itemize} In reality, the shape of the Zone of Avoidance is not as symmetric as defined in this paper with Galactic Bulge centred at $l\approx+5^\circ$ and with latitude offsets (see Kraan-Korteweg 2005). However, since we keep the galaxies in the masked regions, our dipole determinations should not be greatly influenced by assuming a symmetric mask. We test this by changing the centre of the Galactic bulge. We confirm that our results are not affected. Figure~\ref{fig:2MRS+mask} shows the 2MRS galaxies used in the analyses in a Galactic Aitoff projection. The galaxies in masked regions are generated using the first (top plot) and the second method (bottom plot). \begin{figure*} $\begin{array}{c} \psfig{figure=Figures/2MRS+mask1.ps,angle=0,width=0.9\textwidth, height=75mm, clip=} \\ \psfig{figure=Figures/2MRS+mask2.ps,angle=0,width=0.9\textwidth, height=75mm, clip=} \\ \end{array}$ \caption[Objects in the 2MRS including the random galaxies] {Top: Objects in the 2MASS Redshift Catalogue used in the analysis in a Galactic Aitoff Projection, including the random galaxies generated by using the first technique. Galaxies with ${\rm z}\leq0.01$ are plotted in red, $0.01<{\rm z}\le0.025$ are plotted in blue, $0.025<{\rm z}<0.05$ are plotted in green and ${\rm z}\geq0.05$ are plotted in magenta.Bottom: Same as the top plot but now including the random galaxies generated in using the second technique. The regions inside the dashed lines in the plots are masked out and replaced with the random galaxies shown in the plots. There are 21510 galaxies in each plot.} \label{fig:2MRS+mask} \end{figure*} \subsection{Magnitude and F\mbox{}l\mbox{}ux Conversions} The 2MRS uses the 2MASS magnitude $K_{20}$, which is defined\footnote{Column 17 (k$\_$m$\_$k20fc) in the 2MASS XSC} as the magnitude inside the circular isophote corresponding to a surface brightness of $\mu_{K_s}=20 {\rm mag}$ arcsec$^{-2}$ (e.g. Jarrett {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000$a$). The isophotal magnitudes underestimate the total luminosity by $10\%$ for the early-type and $20\%$ for the late-type galaxies (Jarrett {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2003). Following Kochanek {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2001, Appendix), the offset of $\Delta=-0.20\pm0.04$ is added to the $K_{20}$ magnitudes. The galaxy magnitudes are corrected for Galactic extinction using the dust maps of Schlegel, Finkbeiner \& Davis (1998) and an extinction correction coefficient of $R_K=0.35$ (Cardelli, Clayton \& Mathis 1989). As expected, the extinction corrections are small for the 2MRS sample. The $K_{\rm s}$ band $k$-correction is derived by Kochanek {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2001) based on the stellar population models of Worthey (1994). The k-correction of $k(z)=-6.0\log(1+z)$, is independent of galaxy type and valid for $z\lesssim 0.25$. The f\mbox{}l\mbox{}uxes $S$ are computed from the apparent magnitudes using \begin{equation} S=S(0\,{\rm mag})10^{-0.4(K_{20}+ZPO)} \end{equation} where the zero point offset is $ZPO = 0.017\pm0.005$ and $S(0 {\rm mag})=1.122\times10^{-14}\pm1.891\times10^{-16} {\rm W cm}^{-2}$ for the $K_{\rm s}$ band (Cohen, Wheaton \& Megeath 2003). \subsection{The Redshift Distribution and the Selection Function}\label{sec:2mass:nz} The redshift distribution of the 2MRS is shown in Figure~\ref{fig:2MRSnz}. The $IRAS$ PSCz survey redshift distribution (Saunders {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000) is also plotted for comparison. The 2MRS samples the galaxy distribution better than the PSCz survey out to $cz=15000$\kmps. The selection function of the survey (i.e. the probability of detecting a galaxy as a function of distance) is modeled using a parametrised fit to the redshift distribution: \begin{equation} dN(z)=Az^\gamma\exp\left[-\left(\frac{z}{z_c}\right)^\alpha\right]dz\;, \label{eqn:dnz2mrs} \end{equation} with best-fit parameters of $A=116000\pm4000$, $\alpha=2.108\pm0.003$, $\gamma=1.125\pm0.025$ and $z_c=0.025\pm0.001$. This best-fit is also shown in Figure~\ref{fig:2MRSnz} (solid line). The overall selection function $\phi(r)$ is the redshift distribution divided by the volume element \begin{equation} \phi(r)=\frac{1}{\Omega_s r^2} \left(\frac{dN}{dz}\right)_r \left(\frac{dz}{dr}\right)_r\; \end{equation} where $\Omega_s(\approx 4\pi$ steradians) is the solid angle of the survey and $r$ is the comoving distance. \begin{figure} \psfig{figure=Figures/2mrs+pscz.ps,angle=90,width=0.5\textwidth,clip=} \caption[Redshift histogram for 2MRS galaxies] {Redshift histogram for 2MRS galaxies and a least squares fit (Equation~\ref{eqn:dnz2mrs}) to the data (black). For comparison, also plotted is a redshift histogram for PSCz galaxies (Saunders {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000) (red).} \label{fig:2MRSnz} \end{figure} \subsection{Taking out the Local Group Galaxies} Galaxies that are members of the Local Group need to be removed from the 2MRS catalogue to maintain the internal consistency of the analysis. We used the Local Group member list of thirty five galaxies (including Milky Way) given in Courteau and Van den Bergh (1999) to identify and remove eight LG members (IC 10, NGC 147, NGC 185, NGC 205, NGC 6822, M31, M32, M33) from our analysis. In Section 4, we will calculate the acceleration on to the Milky Way due to these LG members. \subsection{Assigning distances to nearby Galaxies} In order to reduce the distance conversion errors, we cross-identified 35 galaxies which have HST Key Project distances (see Freedman {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2001 and the references therein) and 110 galaxies which have distance measurements compiled from several sources (see Karachentsev {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2004 and the references therein). We assign these galaxies measured distances instead of converting them from redshifts. In addition, we identify nine blue-shifted galaxies in the 2MRS which are members of the Virgo cluster. We assign these galaxies the distance to the centre of Virgo ($\approx15.4$ Mpc, Freedman {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2001). Finally, there are four remaining blue-shifted galaxies without known distance measurements which are assigned to 1.18 $h^{-1}$ Mpc \footnote{This is the zero-velocity surface which separates the Local Group from the field that is expanding with the Hubble flow (Courteau \& Van Den Bergh, 1999).}. Thus by assigning distances to galaxies, we do not need to exclude any non-LG galaxy from the analysis. \section{The Methods and Weighting Schemes} In order to compare the CMB and the LG dipoles, it is necessary to postulate a relation between the galaxy distribution and the underlying mass distribution. In this paper, we will use both a {\it number weighted} and a {\it f\mbox{}l\mbox{}ux weighted} prescription. Although quite similar in formulation, these schemes are based on very different models for galaxy formation. The number weighted prescription assumes that the mass distribution in the Universe is a continuous density field and that the galaxies sample this field in a Poisson way. On the other hand, the f\mbox{}l\mbox{}ux weighted model is based on the assumption that the mass in the Universe is entirely locked to the mass of the halos of the luminous galaxies. \subsection{Number Weighted Dipole} It is commonly assumed that the galaxy and the mass distributions in the Universe are directly proportional to each other and are related by a proportionality constant\footnote{More complicated relations have been suggested for biasing models, examples include non-linear and `stochastic' relations. Also, the halo model of clustering involves a different biasing postulation.}, the linear bias parameter $b$: $\delta n/n\equiv\delta_g = b \delta_m$. In this case, Equation~\ref{eqn:v(r)} for the LG can be rewritten as \begin{equation} {\bf v}_{LG}=\frac{H_0 \beta}{4 \pi}\int_{{\bf r}}^{\infty} d^3{\bf r}^{\prime} \delta_g({{\bf r}^{\prime}}) \frac{{\bf r}^{\prime}-{\bf r}}{|{\bf r}^{\prime}-{\bf r}|^3} \label{eqn:v(r)_LG} \end{equation} where $\beta \equiv \omegam^{0.6}/b$. For the number weighted model, incomplete sampling due to the magnitude limit is described by the selection function, $\phi(r)$ given in Section~\ref{sec:2mass:nz}. Each galaxy $i$ is assigned a weight: \begin{equation} w_i=\frac{1}{\phi(r_i)C_{i}} \end{equation} where $\phi(r_i)$ and $C_{i}$ are the values of the radial selection function and the completeness ($0\leq C_{i} \leq1$) for each galaxy, respectively. The observed velocity of the Local Group with respect to the CMB is given by \begin{equation} {\bf v}({\bf r})= \frac{H_0 \beta}{4 \pi \bar{n}}\sum\limits_{i}^N\, \frac{w_i\hat{{\bf r}}_i}{r_{i}^2} \label{eqn:obsdip} \end{equation} where $\bar{n}$ is the mean galaxy density of the survey and $\hat{{\bf r}}_i$ is the unit vector of the galaxy's position. The sum in the equation is over all galaxies in the sample that lie in the distance range $r_{min}<r_i<r_{max}$. Calculated this way, the velocity vector does not depend on the Hubble constant ($h$ cancels out). If the galaxies are assumed to have been drawn by a Poisson point process from an underlying density field, then it is straightforward to calculate the shot noise errors. The shot noise is estimated as the $rms$ of the cumulative variance, $\sigma_{sn}^2$ given by \begin{equation} \sigma_{sn}^2= \left(\frac{H_0 \beta}{4 \pi \bar{n}}\right)^2\sum\limits_{i}^N\, \left(\frac{\hat{{\bf r}}_i}{r_{i}^2\phi(r_i)C_{i}}\right)^2\;. \end{equation} The shot noise error per dipole component is $\sigma_{1D}=\sigma _{sn}/ \sqrt{3}$. \subsection{F\mbox{}l\mbox{}ux Weighted Dipole} For this model, each galaxy is a `beacon' which represents the underlying mass. This is characterised by the mass-to-light ratio $\Upsilon={\rm M}/{\rm L}$. $\Upsilon$ is probably not constant and varies with galaxy morphology (e.g. Lanzoni {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2004) but mass-to-light ratios of galaxies vary less in the near-infrared than in the optical (e.g. Cowie {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1994; Bell \& de Jong 2001). In the context of dipole estimation, this model of galaxy formation implies that the Newtonian gravitational acceleration vector for a volume limited sample is \vspace{2mm} \begin{eqnarray}} \def\eeqn{\vspace{2mm {\bf g}({\bf r})&=& {\rm G} \sum\limits_{i}\, M_{i} \frac{\hat{{\bf r}}_i}{r_{i}^2} \backsimeq {\rm G} \left\langle\frac{M}{L}\right\rangle\sum\limits_{i}\, L_{i} \frac{\hat{{\bf r}}_i}{r_{i}^2}\nonumber \\ &=&4\pi{\rm G} \left\langle\frac{M}{L}\right\rangle\sum\limits_{i}\, {\rm S}_{i} \hat{{\bf r}}_i, \label{eqn:glum} \eeqn where the sum is over all galaxies in the Universe, $\left\langle M/L \right\rangle$ is the average mass-to-light ratio and ${\rm S}_i=L_i/4 \pi r^2$ is the f\mbox{}l\mbox{}ux of galaxy $i$. The peculiar velocity vector is derived by substituting Equation~\ref{eqn:glum} into the second line of Equation~\ref{eqn:v(r)}. For a f\mbox{}l\mbox{}ux limited catalogue the observed LG velocity is \begin{equation} {\bf v}({\bf r})= \frac{8\pi{\rm G}f(\Omega_{\rm m})}{3 H_0\Omega_{\rm m}b_{\rm L}} \left\langle\frac{M}{L}\right\rangle\sum\limits_{i}\, w_{L_i}{\rm S}_{i} \hat{{\bf r}}_i \label{eqn:vl} \end{equation} where $b_{\rm L}$ is the luminosity bias factor introduced to account for the dark matter haloes not fully represented by 2MRS galaxies and $w_{L_i}$ is the weight assigned to galaxy $i$ derived in the next section. The mass-to-light ratio, assumed as constant, is given by \begin{equation} \left\langle\frac{M}{L}\right\rangle = \frac{\rho_{\rm m}}{\rho_{\rm L}} =\frac{3 H_0^2 \Omega_{\rm m}}{8\pi G \rho_{\rm L}} \end{equation} where $\rho_{\rm L}$ is the luminosity density so Equation~\ref{eqn:vl} is rewritten as: \begin{equation} {\bf v}({\bf r})= \frac{H_0f(\Omega_{\rm m})}{\rho_{\rm L}b_{\rm L}}\sum\limits_{i}^{N}\, w_{L_i}{\rm S}_{i} \hat{{\bf r}}_i. \label{eqn:vlum} \end{equation} The flux weighting method (originally proposed by Gott) has been applied extensively to two-dimensional galaxy catalogues (e.g. Yahil, Walker \& Rowan-Robinson 1986, Villumsen \& Strauss 1987, Lahav, Rowan-Robinson \& Lynden-Bell 1988) and most recently to 2MASS XSC (Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2003). Since these surveys lack radial information, the dipoles were calculated by either assuming $w_{L_i}=1$ (e.g. Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2003) or by using a luminosity function based on a redshift survey in a section of the two-dimensional catalogue (e.g. Lahav, Rowan-Robinson \& Lynden-Bell 1988). In either case, it was not possible to determine the convergence of the dipole as a function of redshift. The three-dimensional $IRAS$ dipoles (Strauss {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1990, Webster, Lahav \& Fisher 1997, Schmoldt {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1999 and Rowan-Robinson {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000) were derived using the number weighted scheme because the $IRAS$ catalogues are biased towards star forming galaxies with widely varying mass-to-light ratios resulting in a very broad luminosity function making it difficult to estimate the distance. The 2MRS is mainly sensitive to total stellar mass rather than instantaneous star formation rates (e.g. Cole et al. 2001) and consequently the 2MRS mass-to-light ratios do not have as much scatter. Thus, for the first time, the 2MRS enables the determination the convergence of the flux dipole as a function of distance. There are many advantages to using the f\mbox{}l\mbox{}ux weighted model for the dipole calculation and these will be discussed in the coming sections. For the f\mbox{}l\mbox{}ux weighted case, the weighting function is derived as follows: Let $\rho_{\rm L}({\rm L}\geq 0)$ be the luminosity density in the volume element $\delta V$ of a volume limited catalogue. In this case the dipole velocity is simply \begin{equation} {\bf v}({\bf r})= \frac{H_0f(\Omega_{\rm m})}{\rho_{\rm L}b_{\rm L}}\sum\limits_{i}^{N}\, \frac{ \delta V_i \rho_{\rm L}({\rm L_i}\geq 0) \hat{{\bf r}}_i}{r_i^2}. \end{equation} In practice, however, we have a f\mbox{}l\mbox{}ux limited catalogue with $S \geq S_{\rm lim}$ so only galaxies with luminosity $L \geq L_{\rm lim}=4 \pi r^2 S_{\rm lim}$ are included in the survey. Thus the total luminosity in the infinitesimal volume $\delta V$ is \begin{equation} \delta V \rho_{\rm L}({\rm L}\geq 0)=L_{\rm obs}+\delta V \rho_{\rm L}({\rm L}< {\rm L}_{\rm lim}) \label{eqn:dv} \end{equation} where $L_{\rm obs}=\delta V \rho_{\rm L}({\rm L} \geq {\rm L}_{\rm lim})$ is the observed luminosity and $\delta V \rho_{\rm L}({\rm L} < {\rm L}_{\rm lim})$ is the luminosity that was not observed due to the f\mbox{}l\mbox{}ux limit of the survey. Substituting \begin{equation} \delta V= \frac{L_{\rm obs}}{\rho_{\rm L}({\rm L} \geq {\rm L}_{\rm lim})} \end{equation} into Equation~\ref{eqn:dv} yields \vspace{2mm} \begin{eqnarray}} \def\eeqn{\vspace{2mm \delta V \rho_{\rm L}({\rm L}\geq 0)& = & L_{\rm obs}\Bigg[1+\frac{\rho_{\rm L}({\rm L}< {\rm L}_{\rm lim})}{\rho_{\rm L}({\rm L}\geq {\rm L}_{\rm lim})}\Bigg] \nonumber \\ & = &L_{\rm obs}\Bigg[1+\frac{\rho_{\rm L}({\rm L}\geq 0)-\rho_{\rm L}({\rm L}\geq {\rm L}_{\rm lim})}{\rho_{\rm L}({\rm L}\geq {\rm L}_{\rm lim})}\Bigg] \nonumber \\ & = &L_{\rm obs}\frac{\rho_{\rm L}({\rm L}\geq 0)} {\rho_{\rm L}({\rm L}\geq {\rm L}_{\rm lim})}\equiv \frac{L_{\rm obs}} {\psi({\rm L}\geq {\rm L}_{\rm lim})} \label{eqn:phiL} \eeqn where $\psi({\rm L}\geq {\rm L}_{\rm lim})$ is the f\mbox{}l\mbox{}ux weighted selection function. In Figure~\ref{fig:lum}, the interpolated fit $\psi({\rm L}\geq {\rm L}_{\rm lim})$ for the 2MRS galaxies is shown as a function of redshift. \begin{figure} \psfig{figure=Figures/lumz.ps,angle=0,width=0.5\textwidth, height=75mm, clip=} \caption[F\mbox{}l\mbox{}ux Weighted Selection Function of 2MRS] {The f\mbox{}l\mbox{}ux weighted selection function as a function of redshift.} \label{fig:lum} \end{figure} Thus, the overall weight factor, $w_L$, is \begin{equation} w_{L_i}=\frac{1}{\psi(r_i)C_{i}}. \end{equation} The luminosity density of the 2MRS is \begin{equation} \rho_L=\frac{1}{V}\sum_i\; \frac{L_i}{\psi(L\geq L_{i,lim})}=(7.67\pm1.02)\times10^8 \rm\thinspace L_{\odot} h {\rm Mpc}^{-3} \end{equation} where $V$ is the survey volume. The value of $\rho_L$ is in good agreement with the value derived by Kochanek {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2001), $\rho_L=(7.14\pm0.75)\times10^8$ \Lsun $h {\rm Mpc}^{-3}$. We note that the number weighted selection function, $\phi(r)$, drops with distance faster than the luminosity weighted selection function, $\psi(r)$. At large distances, we observe only the most luminous galaxies, so the amount of `missing' luminosity from a volume of space is not as big as the number of `missing' galaxies. Therefore, as shown below, the f\mbox{}l\mbox{}ux weighted dipole is more robust at large distances than the number weighted dipole. For the f\mbox{}l\mbox{}ux weighted scheme, the shot noise is estimated as \begin{equation} \sigma_{sn}^2= \left(\frac{H_0f(\Omega_{\rm m})}{\rho_{\rm L}}\right)^2 \sum\limits_{i}\, \left(\frac{S_i\hat{{\bf r}}_i}{\psi(r_i)C_{i}}\right)^2\;. \end{equation} As the exact shot noise effects for the different models of galaxy formation are difficult to model, we will use the Poisson estimate above as an indicator of uncertainties (see also Kaiser \& Lahav 1989). However, we note that the quoted uncertainties overestimate the noise at small distances where the survey is volume limited and underestimate it at large distances where only the brightest galaxies are sampled. The uncertainties and the variation in the mass-to-light ratios relation also affect the calculation but they are not accounted for in this analysis. \subsection{The Redshift-Space Effects} It is well known that the peculiar velocities distort the pattern of density enhancements in redshift-space. The peculiar acceleration of the Local Group calculated using redshifts instead of real distances will differ from the actual LG acceleration (Kaiser 1987 and Kaiser \& Lahav 1989). This effect, referred to as {\it the rocket effect}, is easily visualised by supposing that only the Local Group has a velocity in an homogeneous universe without any peculiar velocities. If the LG frame redshifts are used as distance indicators then there will be a spurious contribution from the galaxies that are in the direction of the LG motion. The prediction for net spurious acceleration is given by (Kaiser \& Lahav 1988): \begin{equation} {\bf v}_{spur}=\frac{1}{3}{\bf v}({\bf 0})\Bigg[ \Bigg(2+\frac{d\ln\phi}{d\ln r}\Bigg)_{z_{vol}} \ln\frac{z_{vol}}{z_{min}}+\ln\frac{\phi(z_{max})z_{max}^2} {\phi(z_{vol})z_{vol}^2}\Bigg] \end{equation} where $z_{min}$ is the minimum, $z_{max}$ is the maximum redshift of the survey and $z_{vol}$ is the redshift for which the survey is volume limited ($cz_{vol}\approx$4500 \kmps for the 2MRS). The rocket effect is very important for the number weighted LG dipole calculation because of the dependence on $r_i$ in Equation~\ref{eqn:obsdip}. For the 2MRS, the predicted error from the rocket effect is a contribution to the total dipole by roughly ${\bf v}_{spur}\approx-0.6{\bf v}({\bf 0})$. There are two ways to overcome this error. One is to work in real-space instead of redshift-space. This will be discussed in a forthcoming paper where the Local Group dipole will be calculated using the Wiener reconstructed real-space density field. The other one is to use the f\mbox{}l\mbox{}ux weighted model. For the f\mbox{}l\mbox{}ux weighted LG dipole, the rocket effect is almost negligible as it plays a role only in the determination of the radii of the concentric spheres within which the dipole is calculated. \subsection{The Reference Frames} Brunozzi {\it et al.\ }} \def\pc{{\rm\thinspace pc} (1995) and Kocevski, Mullis \& Ebeling (2004) claim that the dipole in the LG and the CMB frames are over- and under-estimates of the real dipole, respectively. The redshift of an object is defined by \begin{equation} cz=H_0r+({\bf v}({\bf r})-{\bf v}({\bf 0}))\cdot\hat{{\bf r}} \end{equation} where ${\bf v}({\bf r})$ is the peculiar velocity of the object and ${\bf v}({\bf 0})$ is the observer's peculiar velocity. In the LG frame $|{\bf v}({\bf 0})|=627\pm22$\kmps and in the CMB frame $|{\bf v}({\bf 0})|=0$\kmps by definition. Therefore, the redshift of a galaxy that has the same direction of motion as the LG would be larger in the CMB frame than that in the LG frame. In this case, since the acceleration vector is proportional to $1/r^2$, the amplitude of the dipole in the LG frame is expected to be larger than the amplitude of the dipole in the CMB frame. As the dipole is thought to be dominated by the nearby objects that participate together with the LG in a bulk motion (i.e. ${\bf v}({\bf r})\approx{\bf v}({\bf 0})$ so that $cz_{LG}\approx H_0r$), it is often assumed that the real LG dipole is closer to the dipole in the LG frame than that in the CMB frame. On the other hand, Branchini \& Plionis (1996) find that the real-space reconstruction of the LG dipole gives a result halfway between the LG frame and the CMB frame values. We perform the analysis using both the LG and the CMB frame redshifts. All galaxies are referenced to the rest frame of the LG using the transformation in consistency with who use the same conversion Courteau \& Van Den Bergh (1999): \begin{equation} cz_{LG}=cz_{hel}-79\cos(l)\cos(b)+296\sin(l)\cos(b)-36\sin(b) \end{equation} where $z_{hel}$ is the heliocentric redshift and $l$ and $b$ are the longitude and the latitude of the galaxy in the Galactic coordinates, respectively. We convert from the LG frame to the CMB frame using \vspace{2mm} \begin{eqnarray}} \def\eeqn{\vspace{2mm cz_{CMB}&=&cz_{LG}+v_{LG}[\sin(b)\sin(b_{LG}) \nonumber \\ &+&\cos(b)\cos(b_{LG})\cos(|l_{LG}-l|)], \eeqn where $v_{LG}$ is the amplitude of the LG velocity with respect to the CMB and ($l_{LG}$, $b_{LG}$) is the direction of its motion. We use the CMB dipole value of Bennett {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003). Using the first year of data from WMAP, they find that the Sun is moving at a speed of 369.5$\pm$3.0 km ${\rm s}^{-1}$, towards ($l=263.85^\circ\pm0.10^\circ$, $b={48.25}^\circ\pm0.40^\circ$). Using the revised values of the motion of the Sun relative to the LG a velocity of 306$\pm$18 \kmps towards ($l=99^\circ\pm5^\circ$, $b={-4}^\circ\pm4^\circ$) derived by Courteau \& Van Den Bergh (1999), we find a LG velocity relative to the CMB of $v_{LG}=627\pm22$ \kmps, towards ($l_{LG}=273^\circ\pm3^\circ$, $b_{LG}=29^\circ\pm3^\circ$). The choice of reference frames highlights another advantage of the f\mbox{}l\mbox{}ux weighted dipole calculation. As the redshifts only enter the calculation in the determination of the radius of the concentric spheres, the results are robust to changes in reference frames. \section{Dipole Results}\label{sec:dip:results} The results are presented in Figures~\ref{fig:velvsdist}-\ref{fig:directions}. The top plots of Figures~\ref{fig:velvsdist}-\ref{fig:velvsdistcmb} show the amplitudes and three spatial components of the acceleration on the Local group (top) and the convergence of the angle between the LG dipole and the CMB dipole (bottom) as a function of distance in two reference frames. For these plots, the galaxies in the masked regions are interpolated from the adjacent regions (Method 2). The right panel in each figure shows the results for the f\mbox{}l\mbox{}ux weighted dipole and the left panels show the results for the number weighted dipole. Figure~\ref{fig:velvsdist} is for the the Local Group Frame and Figure~\ref{fig:velvsdistcmb} is in the CMB frame. As discussed in the next section, the results for the filling Method 1 where the galaxies are sampled randomly do not look very different than the results in Figures~\ref{fig:velvsdist} \& \ref{fig:velvsdistcmb} and thus are not shown. Figure~\ref{fig:directions} compares the direction of the LG dipole estimate to that of the CMB and other LG dipole measurements. We give the results for the f\mbox{}l\mbox{}ux weighted dipole calculated using the second method of mask filling in Table~\ref{tab:tab2}. Column 1 is the radii of the concentric spheres within which the values are calculated; columns 2 and 3 are the amplitude of the velocity vector and the shot noise divided by $\omegam^{0.6}/b_{\rm L}$, respectively; Columns 4, 5 and 6 show the direction of the velocity vector and its angle to the CMB dipole. The first line gives the results in the LG frame and the second line gives the results in CMB frame. Table~\ref{tab:tab3} is structured in the same way as Table~\ref{tab:tab2} however the analysis excludes five galaxies. \subsection{The Tug of War} In Figures~\ref{fig:velvsdist} \& \ref{fig:velvsdistcmb}, the LG velocity is dominated by structure within a distance of 60 \kmps (except for the CMB frame number weighted dipole where the contribution from the distant structure is over-estimated.). The `tug of war' between the Great Attractor and the Perseus-Pisces is clearly evident. The dip in the velocity vector is an indication that the local flow towards the Great Attractor\footnote{ By `Great Attractor', it is meant the entire steradian on the sky centred at ($l\sim310^\circ$,$b\sim20^\circ$) covering a distance of 20 $h^{-1}$ Mpc to 60 $h^{-1}$ Mpc.} is counteracted by the Perseus-Pisces complex in the opposite direction. If we take out 420 galaxies in the Perseus-Pisces ridge (defined by $-40 \le b \le -10$, $110 \le b \le 130$, 4600 ${\rm km s}^{-1} \le cz \le$ 6000 ${\rm km s}^{-1}$) and recalculate the convergence, the dip almost disappears and the convergence is dominated by the Great Attractor. This leads us to conclude that the Perseus-Pisces plays a significant role in the gravitational acceleration of the LG. \subsection{Filling the Zone of Avoidance} The choice of method used to fill the masked regions does not have much effect on the results. The convergence of the misalignment angle for the second method is slightly more stable than the first method and the overall direction of the LG dipole is closer to the CMB dipole. Since the Galactic $z$ component is least affected by the zone of avoidance, the discrepancy between the amplitudes in each plot comes mainly from the Galactic $x$ and $y$ components. The direction of the dipole is 2$^\circ$-3$^\circ$ closer to the CMB vector for the second method at distances where the Great Attractor lies. Of course, one cannot rule out the possibility that there may be important contribution to the dipole from other structures behind the zone of avoidance. The Great Attractor is most likely centred on the Norma Cluster ($l\approx$325$^\circ$, $b\approx$-7$^\circ$, Kraan-Korteweg {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1996) which lies very close to the obscured plane. Although, the 2MRS samples the Norma cluster much better than the optical surveys, the latitude range $|b|\lesssim5$ which is still obscured in the 2MRS may have structure that play an important role in the dipole determinations. In fact, Scharf {\it et al.\ }} \def\pc{{\rm\thinspace pc} (1992) and Lahav {\it et al.\ }} \def\pc{{\rm\thinspace pc} (1993) point out that there is significant contribution to the local flow by the Puppis complex at low galactic latitudes that are not sampled by the 2MRS. Kraan-Korteweg \& Lahav (2000) point out that since the dipole is dominated by local structures, the detection of nearby galaxies can be more important to the dipole analyses than the detection of massive clusters at larger distances. We test this by excluding the five most luminous nearby galaxies (Maffei 1, Maffei 2, IC342, Dwingeloo 1 and M81) from our analysis. Remarkably, the direction of the resultant dipole moves much closer to that of the CMB (see Table~\ref{tab:tab3}). All of these galaxies expect M81 lie very close to the Zone of Avoidance and they are excluded from most dipole analyses either because they are not in the catalogue (e.g. Rowan-Robinson {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000) or they are masked out (e.g. Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2003). In fact, when we change our mask to match that of Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003) and keep M81 in the analysis our resulting dipole is only 3$^\circ$ degrees away from the dipole calculated by Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003). This illustrates the importance of the nearby structure behind the Zone of Avoidance. The comparison of Tables~\ref{tab:tab2} and~\ref{tab:tab3} also highlights the vital role non-linear dynamics induced by nearby objects play in dipole calculations. Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003) investigate the non-linear contribution to the LG dipole by removing the bright galaxies with $K_s<8$. They report that the LG dipole moves to within a few degrees of the CMB dipole. They repeat their calculations for the PSCz survey and observe the same pattern. We do not observe this behaviour. When we remove the objects brighter that $K_s=8$ (428 galaxies), the misalignment angle of the resulting dipole decreases by $3^\circ$ for the flux weighted dipole and remains the same for the number weighted case. The dipole amplitudes decreases substantially in both cases, notably in the case of the flux dipole, suggesting that the brightest 2MRS galaxies play a significant role in inducing the LG velocity. \subsection{The Choice of Reference Frames} The number weighted LG dipole looks very different in different reference frames (Figures~\ref{fig:velvsdist} and~\ref{fig:velvsdistcmb}) whereas the f\mbox{}l\mbox{}ux weighted dipole is almost unaffected by the change. The number weighted dipole is similar to the f\mbox{}l\mbox{}ux weighted dipole in the LG frame. Thus, we conclude that it is more accurate to use the LG frame redshifts than that of the CMB frame. \subsection{The Choice of Weighting Schemes} The amplitudes of the number weighted LG dipole and the f\mbox{}l\mbox{}ux weighted LG dipole are very similar in the LG frame. However, the convergence of the misalignment angles of the flux and the number dipoles is quite different in both frames, especially at large distances. The angle of the f\mbox{}l\mbox{}ux weighted dipole is closer to the CMB dipole than its number weighted counterpart at all distances. With either weighting scheme, the dipoles are closest to the CMB dipole at a distance of about 5000 \kmps and move away from the CMB dipole direction further away. However, the change in the direction of the f\mbox{}l\mbox{}ux weighted dipole is much smaller compared to the number weighted dipole and there is convergence within the error bars by 6000 \kmps. The misalignment angles in the LG frame at 130 $h^{-1}$ Mpc are 21$^\circ$ and $37^\circ$ for the flux and the number dipoles, respectively. The discrepancy is probably mainly due to the fact the number dipole is plagued with errors due to the lack of peculiar velocity information. In fact, when we use just the redshift information instead of the distance measurements (see Section 2.4) the number dipole moves $\approx7^\circ$ towards the CMB dipole. The flux dipole assumes that the mass traces light whereas the number dipole assumes that all galaxies have the same mass. The former assumption is of course more valid, however, since the amplitudes of the dipoles are so similar in the LG frame, we conclude that the equal mass assumption for the number weighted dipole do not introduce large errors and that the discrepancy results from the errors in distance. In all figures, $v_x$ and $v_y$ change with distance more rapidly in the number weighted scheme than the f\mbox{}l\mbox{}ux weighted. At further distances, the f\mbox{}l\mbox{}ux weighted $v_x$ flattens whereas its number weighted counterpart continues to grow. It is expected that the $(x,y)$ directions are particularly sensitive to the shape of the zone of avoidance, although it is not obvious why the f\mbox{}l\mbox{}ux weighted components remain so robust. Assuming the dipole has converged we can obtain values for $\omegam^{0.6}/b$ (number weighted) and $\omegam^{0.6}/b_{\rm L}$ (f\mbox{}l\mbox{}ux weighted) by comparing the amplitude of our dipole estimates to the CMB dipole. These values are summarised in Table~\ref{tab:omegavebeta}. The values are quoted in the LG frame at\footnote{This is the distance beyond which the shot noise becomes too high (over 10$\%$ for the number weighted analysis).} 13000 \kmps using the second mask and with the luminosity density value derived earlier, $\rho_L=7.67\pm1.02\times10^8$ \Lsun $h {\rm Mpc}^{-3}$. The errors take the shot noise, the uncertainties in the CMB dipole and $\rho_L$ (for the f\mbox{}l\mbox{}ux limited case) into account. The $\beta$ values obtained for the two different weighting schemes are in excellent agreement suggesting that the dark matter haloes are well sampled by the survey. Our value for $\beta$ is also in good agreement with results from 2MASS (Pike \& Hudson 2005) and IRAS surveys (e.g. Zaroubi {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2002, Willick \& Strauss 1998). In order to calculate the uncertainties introduced by the errors in the galaxy redshifts, ten realisations of the 2MRS catalogue are created with each galaxy redshift drawn from a Gaussian distribution with standard deviation equal to its error\footnote {The mean value of the redshift measurement errors is 30 \kmps.}. It is found that the scatter in the dipole results due to the errors in redshifts are very small compared to the shot noise errors and thus are not quoted. \begin{table} \caption[The values derived for $\omegam$, $\beta$] {The values derived for $\omegam$, $\beta$.} \begin{center} \begin{tabular}{|l|c|c|} \hline $\omegam^{0.6}/b_{\rm L}$ from the f\mbox{}l\mbox{}ux weighted scheme& $=$ & $0.40\pm0.09$ \\\hline $\omegam^{0.6}/b$ from the number weighted scheme& $=$ & $0.40\pm0.08$ \\ \hline \end{tabular} \end{center} \label{tab:omegavebeta} \end{table} \subsection{The Milky Way Dipole} We also investigate the acceleration on our galaxy due to other eight members of the LG excluded from the LG dipole analysis. As expected, the flux dipole is strongly dominated by Andromeda (M31) with an amplitude of ${\rm v}/\beta\approx220$\kmps directly towards M31 ($l\approx121.4^\circ, b\approx-21.7^\circ$), confirming that near-infrared fluxes are good tracers of mass. The number weighted dipole which assumes that the galaxies have the same weight gives a similar amplitude of ${\rm v}/\beta\approx190$\kmps but its direction ($l\approx104.6^\circ, b\approx-21.6^\circ$) is skewed towards NGC 6822 ($l\approx25.3^\circ, b\approx-18.4^\circ$) which lies further away from the other seven galaxies that are grouped together. \begin{table*} \caption[The dipole convergence values for f\mbox{}l\mbox{}ux weighted dipole] {The convergence values calculated for f\mbox{}l\mbox{}ux weighted dipole using the second method for the masked region. The columns give (i)Radius of the concentric spheres within which the values are calculated; (ii) and (iii) amplitude of the velocity vector and the shot noise divided by $f(\Omega_{\rm m})$, respectively; (iv), (v) and (vi) direction of the velocity vector and its angle to the CMB dipole. The results in the first and second line are given in LG and CMB frames, respectively.} \begin{center} \begin{tabular}{@{}ccccc} \hline \\ Dist & $V_{tot}b_L/f(\Omega_{\rm m})$ & l & b & $\delta\theta$ \\ km $s^{-1}$ & km $s^{-1}$ & deg & deg & deg \\ \hline \\ 1000 & 590 $\pm$294 &259$^\circ\pm$ 59 $^\circ$& 39$^\circ\pm$20 $^\circ$& 42$^\circ\pm$20 $^\circ$\\ & 362 $\pm$289 &331$^\circ\pm$171 $^\circ$& 23$^\circ\pm$23 $^\circ$& 99$^\circ\pm$26 $^\circ$\\\hline 2000 & 1260 $\pm$318 &249$^\circ\pm$ 17 $^\circ$& 41$^\circ\pm$13 $^\circ$& 25$^\circ\pm$11 $^\circ$\\ & 993 $\pm$316 &222$^\circ\pm$ 22 $^\circ$& 41$^\circ\pm$15 $^\circ$& 44$^\circ\pm$15 $^\circ$\\\hline 3000 & 1633 $\pm$322 &260$^\circ\pm$ 12 $^\circ$& 40$^\circ\pm$10 $^\circ$& 17$^\circ\pm$ 8 $^\circ$\\ & 1334 $\pm$320 &241$^\circ\pm$ 17 $^\circ$& 44$^\circ\pm$13 $^\circ$& 31$^\circ\pm$11 $^\circ$\\\hline 4000 & 1784 $\pm$323 &264$^\circ\pm$ 10 $^\circ$& 39$^\circ\pm$ 9 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1513 $\pm$322 &252$^\circ\pm$ 14 $^\circ$& 41$^\circ\pm$11 $^\circ$& 22$^\circ\pm$ 9 $^\circ$\\\hline 5000 & 1838 $\pm$324 &265$^\circ\pm$ 10 $^\circ$& 36$^\circ\pm$ 9 $^\circ$& 12$^\circ\pm$ 7 $^\circ$\\ & 1497 $\pm$323 &252$^\circ\pm$ 13 $^\circ$& 39$^\circ\pm$11 $^\circ$& 22$^\circ\pm$ 9 $^\circ$\\\hline 6000 & 1633 $\pm$325 &259$^\circ\pm$ 10 $^\circ$& 34$^\circ\pm$ 9 $^\circ$& 15$^\circ\pm$ 8 $^\circ$\\ & 1438 $\pm$324 &250$^\circ\pm$ 12 $^\circ$& 36$^\circ\pm$11 $^\circ$& 22$^\circ\pm$ 9 $^\circ$\\\hline 7000 & 1682 $\pm$325 &256$^\circ\pm$ 10 $^\circ$& 36$^\circ\pm$ 9 $^\circ$& 18$^\circ\pm$ 8 $^\circ$\\ & 1503 $\pm$324 &247$^\circ\pm$ 12 $^\circ$& 36$^\circ\pm$10 $^\circ$& 24$^\circ\pm$ 9 $^\circ$\\\hline 8000 & 1697 $\pm$326 &255$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 18$^\circ\pm$ 8 $^\circ$\\ & 1566 $\pm$325 &248$^\circ\pm$ 12 $^\circ$& 39$^\circ\pm$10 $^\circ$& 24$^\circ\pm$ 9 $^\circ$\\\hline 9000 & 1683 $\pm$326 &255$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 19$^\circ\pm$ 8 $^\circ$\\ & 1573 $\pm$325 &248$^\circ\pm$ 12 $^\circ$& 39$^\circ\pm$10 $^\circ$& 24$^\circ\pm$ 9 $^\circ$\\\hline 10000 & 1674 $\pm$326 &253$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 20$^\circ\pm$ 8 $^\circ$\\ & 1599 $\pm$325 &246$^\circ\pm$ 12 $^\circ$& 39$^\circ\pm$10 $^\circ$& 26$^\circ\pm$ 8 $^\circ$\\\hline 11000 & 1677 $\pm$326 &253$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1624 $\pm$325 &246$^\circ\pm$ 12 $^\circ$& 40$^\circ\pm$10 $^\circ$& 26$^\circ\pm$ 8 $^\circ$\\\hline 12000 & 1676 $\pm$326 &253$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1624 $\pm$325 &246$^\circ\pm$ 12 $^\circ$& 40$^\circ\pm$10 $^\circ$& 26$^\circ\pm$ 8 $^\circ$\\\hline 13000 & 1652 $\pm$326 &251$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1629 $\pm$325 &245$^\circ\pm$ 12 $^\circ$& 39$^\circ\pm$10 $^\circ$& 26$^\circ\pm$ 8 $^\circ$\\\hline 14000 & 1659 $\pm$327 &251$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 22$^\circ\pm$ 8 $^\circ$\\ & 1636 $\pm$326 &245$^\circ\pm$ 11 $^\circ$& 39$^\circ\pm$10 $^\circ$& 26$^\circ\pm$ 8 $^\circ$\\\hline 15000 & 1640 $\pm$327 &251$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1633 $\pm$326 &246$^\circ\pm$ 11 $^\circ$& 39$^\circ\pm$10 $^\circ$& 25$^\circ\pm$ 8 $^\circ$\\\hline 16000 & 1638 $\pm$327 &251$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1643 $\pm$326 &247$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 25$^\circ\pm$ 8 $^\circ$\\\hline 17000 & 1630 $\pm$327 &251$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1643 $\pm$326 &247$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 25$^\circ\pm$ 8 $^\circ$\\\hline 18000 & 1604 $\pm$328 &251$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1631 $\pm$327 &247$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 25$^\circ\pm$ 8 $^\circ$\\\hline 19000 & 1591 $\pm$328 &251$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1629 $\pm$327 &247$^\circ\pm$ 11 $^\circ$& 38$^\circ\pm$10 $^\circ$& 24$^\circ\pm$ 8 $^\circ$\\\hline 20000 & 1577 $\pm$328 &251$^\circ\pm$ 12 $^\circ$& 37$^\circ\pm$10 $^\circ$& 21$^\circ\pm$ 8 $^\circ$\\ & 1620 $\pm$327 &247$^\circ\pm$ 11 $^\circ$& 37$^\circ\pm$10 $^\circ$& 24$^\circ\pm$ 8 $^\circ$\\\hline \label{tab:tab2} \end{tabular} \end{center} \end{table*} \begin{table*} \caption[Same as Table 2 but excluding some galaxies] {Same as Table~\ref{tab:tab2} but the analysis excludes the galaxies: Maffei 1, Maffei 2, Dwingeloo 1, IC342 and M81.} \begin{center} \begin{tabular}{@{}ccccc} \hline \\ Dist & $V_{tot}b_L/f(\Omega_{\rm m})$ & l & b & $\delta\theta$ \\ km $s^{-1}$ & km $s^{-1}$ & deg & deg & deg \\ \hline \\ 1000 & 585 $\pm$254 &280$^\circ\pm$ 30 $^\circ$& 34$^\circ\pm$17 $^\circ$& 25$^\circ\pm$ 9 $^\circ$\\ & 181 $\pm$228 &360$^\circ\pm$108 $^\circ$& 24$^\circ\pm$29 $^\circ$& 67$^\circ\pm$37 $^\circ$\\\hline 2000 & 1258 $\pm$282 &263$^\circ\pm$ 13 $^\circ$& 38$^\circ\pm$11 $^\circ$& 15$^\circ\pm$ 9 $^\circ$\\ & 963 $\pm$261 &259$^\circ\pm$ 18 $^\circ$& 39$^\circ\pm$13 $^\circ$& 19$^\circ\pm$12 $^\circ$\\\hline 3000 & 1661 $\pm$285 &269$^\circ\pm$ 9 $^\circ$& 37$^\circ\pm$ 9 $^\circ$& 11$^\circ\pm$ 5 $^\circ$\\ & 1352 $\pm$266 &265$^\circ\pm$ 12 $^\circ$& 41$^\circ\pm$10 $^\circ$& 16$^\circ\pm$ 8 $^\circ$\\\hline 4000 & 1824 $\pm$287 &272$^\circ\pm$ 8 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 10$^\circ\pm$ 4 $^\circ$\\ & 1571 $\pm$268 &269$^\circ\pm$ 9 $^\circ$& 37$^\circ\pm$ 9 $^\circ$& 12$^\circ\pm$ 5 $^\circ$\\\hline 5000 & 1891 $\pm$288 &272$^\circ\pm$ 8 $^\circ$& 33$^\circ\pm$ 7 $^\circ$& 8$^\circ\pm$ 3 $^\circ$\\ & 1559 $\pm$270 &268$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 10$^\circ\pm$ 5 $^\circ$\\\hline 6000 & 1678 $\pm$289 &268$^\circ\pm$ 8 $^\circ$& 31$^\circ\pm$ 8 $^\circ$& 9$^\circ\pm$ 4 $^\circ$\\ & 1504 $\pm$271 &266$^\circ\pm$ 9 $^\circ$& 32$^\circ\pm$ 8 $^\circ$& 10$^\circ\pm$ 5 $^\circ$\\\hline 7000 & 1713 $\pm$289 &264$^\circ\pm$ 8 $^\circ$& 33$^\circ\pm$ 8 $^\circ$& 11$^\circ\pm$ 6 $^\circ$\\ & 1551 $\pm$272 &263$^\circ\pm$ 9 $^\circ$& 33$^\circ\pm$ 8 $^\circ$& 12$^\circ\pm$ 6 $^\circ$\\\hline 8000 & 1721 $\pm$290 &264$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 11$^\circ\pm$ 7 $^\circ$\\ & 1611 $\pm$272 &264$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 12$^\circ\pm$ 7 $^\circ$\\\hline 9000 & 1704 $\pm$290 &264$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 12$^\circ\pm$ 7 $^\circ$\\ & 1614 $\pm$272 &264$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 12$^\circ\pm$ 7 $^\circ$\\\hline 10000 & 1691 $\pm$290 &262$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1634 $\pm$272 &262$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\\hline 11000 & 1691 $\pm$290 &262$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1658 $\pm$273 &262$^\circ\pm$ 9 $^\circ$& 37$^\circ\pm$ 8 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\\hline 12000 & 1690 $\pm$291 &262$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1658 $\pm$273 &261$^\circ\pm$ 9 $^\circ$& 37$^\circ\pm$ 8 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\\hline 13000 & 1665 $\pm$291 &261$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 9 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1663 $\pm$273 &261$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\\hline 14000 & 1671 $\pm$291 &260$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1668 $\pm$273 &260$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 14$^\circ\pm$ 6 $^\circ$\\\hline 15000 & 1652 $\pm$291 &260$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 9 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1670 $\pm$274 &261$^\circ\pm$ 9 $^\circ$& 36$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 6 $^\circ$\\\hline 16000 & 1653 $\pm$292 &261$^\circ\pm$ 9 $^\circ$& 34$^\circ\pm$ 9 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1683 $\pm$274 &261$^\circ\pm$ 8 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 6 $^\circ$\\\hline 17000 & 1647 $\pm$292 &261$^\circ\pm$ 9 $^\circ$& 34$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1684 $\pm$274 &261$^\circ\pm$ 8 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 6 $^\circ$\\\hline 18000 & 1619 $\pm$292 &260$^\circ\pm$ 9 $^\circ$& 34$^\circ\pm$ 9 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1672 $\pm$274 &261$^\circ\pm$ 9 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 6 $^\circ$\\\hline 19000 & 1606 $\pm$292 &260$^\circ\pm$ 9 $^\circ$& 34$^\circ\pm$ 9 $^\circ$& 14$^\circ\pm$ 7 $^\circ$\\ & 1672 $\pm$274 &262$^\circ\pm$ 8 $^\circ$& 35$^\circ\pm$ 8 $^\circ$& 13$^\circ\pm$ 6 $^\circ$\\\hline 20000 & 1594 $\pm$293 &261$^\circ\pm$ 9 $^\circ$& 34$^\circ\pm$ 9 $^\circ$& 13$^\circ\pm$ 7 $^\circ$\\ & 1665 $\pm$275 &262$^\circ\pm$ 8 $^\circ$& 34$^\circ\pm$ 8 $^\circ$& 12$^\circ\pm$ 6 $^\circ$\\\hline \label{tab:tab3} \end{tabular} \end{center} \end{table*} \begin{figure*} \psfig{figure=Figures/velvsdist.ps,angle=0.,height=100mm, width=\textwidth,clip=} \caption[The convergence of the LG acceleration in LG frame (Mask 2)] {{\bf Top}: Three components and the magnitudes of the acceleration of the Local Group due to galaxies within a series of successively larger concentric spheres centred on the local group in the {\bf Local Group frame}. The galaxies in the masked regions are interpolated from the adjacent regions (Method 2). {\bf Left} panel is {\bf the number weighted} velocity and {\bf right} panel is {\bf the f\mbox{}l\mbox{}ux weighted} velocity. The growth of the estimated shot noise is also shown. {\bf Bottom}: Convergence of the direction of the LG dipole where the misalignment angle is between the LG and the CMB dipoles. The dotted lines denote 1$\sigma$ errors from shot noise. Left plot is the direction of the number weighted LG dipole and right plot is the direction of the f\mbox{}l\mbox{}ux weighted LG dipole. We note for the number weighted dipole the dramatic increase in shot noise beyond 15000 \kmps, where the dipole's behaviour cannot be interpreted reliably.} \label{fig:velvsdist} \psfig{figure=Figures/velvsdistcmb.ps,angle=0.,height=100mm, width=\textwidth,clip=} \caption[The convergence of the LG acceleration in CMB frame (Mask 2)] {Same as in Figure~\ref{fig:velvsdist} but in {\bf CMB frame}.} \label{fig:velvsdistcmb} \end{figure*} \begin{figure*} \psfig{figure=Figures/directions.ps,angle=0.,width=\textwidth,clip=} \caption[]{The triangles show the dipole directions at 13000 \kmps derived in this paper. The direction of the number weighted 2MRS dipole in the LG frame is shown in magenta (LG-N.W., $l=231^\circ$, $b=42^\circ$). The red triangle shows the direction of the f\mbox{}l\mbox{}ux weighted 2MRS in the LG frame (LG-F.W., $l=251^\circ$, $b=38^\circ$). The blue triangle shows the direction of the f\mbox{}l\mbox{}ux weighted 2MRS in the LG frame excluding Maffei 1, Maffei 2, M81, IC342 and Dwingeloo 1 (LG-F.W., $l=261^\circ$, $b=35^\circ$). The red star shows the CMB dipole direction ($l=273^\circ$, $b=29^\circ$). The green triangle is the number weighted LG dipole in the CMB frame (CMB-N.W., $l=218^\circ$, $b=33^\circ$); the yellow triangle is the f\mbox{}l\mbox{}ux weighted LG dipole in CMB frame (CMB-F.W., $l=245^\circ$, $b=39^\circ$). The green circle is the 2MASS dipole ($l=264^\circ$, $b=43^\circ$, Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2003). The magenta square is the $IRAS$ 1.2-Jy dipole ($l=247^\circ$, $b=37^\circ$, Webster {\it et al.\ }} \def\pc{{\rm\thinspace pc} 1997). The blue upside-down triangle is the $IRAS$ PSCz dipole ($l=253^\circ$, $b=26^\circ$, Rowan-Robinson {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000). Contours are drawn at constant misalignment angles. Also shown are the Virgo Cluster ($l=280^\circ$, $b=75^\circ$), the Hydra Cluster ($l=270^\circ$, $b=27^\circ$), the Centaurus Cluster ($l=302^\circ$, $b=22^\circ$) and A3558 ($l=312^\circ$, $b=31^\circ$).} \label{fig:directions} \end{figure*} \section{Discussion} In this paper, we calculate the 2MRS dipole using number and f\mbox{}l\mbox{}ux weighting schemes. The f\mbox{}l\mbox{}ux weighted dipole bypasses the effects of redshift space distortions and the choice of reference frames giving very robust results. Our dipole estimates are dominated by the tug of war between the Great Attractor and the Perseus-Pisces superclusters and seemingly converge by 6000 \kmps. The contribution from structure beyond these distances is negligible. The direction of the flux dipole (l=251$^\circ\pm$12$^\circ$,b=37$^\circ\pm$ 10 $^\circ$) is in good agreement with the 2MASS dipole derived by Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003) (l=264.5$^\circ\pm$2$^\circ$,b=43.5$^\circ\pm$ 4 $^\circ$). The difference in results is probably due to the fact that they use a higher latitude cutoff in the mask ($|b|<7^\circ$) and exclude all galaxies below this latitude. We confirm this by changing our treatment of the Zone of Avoidance to match theirs. We find that the flux dipole is very close to their dipole direction. Their limiting Kron magnitude is $K_s=13.57$ which corresponds to an effective depth of 200 $h^{-1}$ Mpc. As their sample is deep enough to pick out galaxies in the Shapley Supercluster, the comparison of their dipole value with our values suggests that the contribution to the LG dipole from structure further away than the maximum distance of our analysis is not significant. Following Maller {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2003), when we adopt $\Omega_{\rm m}=0.27$ we get $b_L=1.14\pm0.25$, in good agreement with their value of $b=1.06\pm0.17$ suggesting that the 2MRS galaxies are unbiased. We note that the 2MRS value for the linear bias is somewhat lower than expected considering that the 2MRS has a high proportion of early type galaxies which are known to reside mostly in high density regions (e.g. Norberg {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2001, Zehavi {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2002). The values we derive for $\beta$ and $\omegam$ are consistent with the concordance $\Lambda$-CDM model values given their error bars. Figure 3 shows that the 2MRS samples the Great Attractor region better than the PSCz survey but that the PSCz redshift distribution has a longer redshift tail than the 2MRS. Nevertheless, PSCz dipole agrees well with that of the 2MRS. Rowan-Robinson {\it et al.\ }} \def\pc{{\rm\thinspace pc} (2000) derive a value for $\beta=0.75^{+0.11}_{-0.08}$ which is higher than the value we derive in this paper. The PSCz sample is biased towards star-forming galaxies and thus under-samples the ellipticals which lay in high density regions. Contrarily, the 2MRS is biased towards early-type galaxies. This difference may be the reason why they get a higher value for $\beta$. The flux weighted dipole is in excellent agreement with the $IRAS$ 1.2 Jy dipole (Webster, Lahav \& Fisher 1997) which was obtained using a number weighted scheme but with an added filter that mitigates the shot noise and deconvolves redshift distortions. Our number weighted dipole differs from their results. This is probably due to fact that the 2MRS number weighted dipole is plagued wit redshift distortions. Webster, Lahav \& Fisher (1997) obtain the real-space density field from that in the redshift-space using a Wiener Filter. In a forthcoming paper, we will use the same technique to address this issue. Similarly, the 2MRS number weighted dipole also differs from the PSCz dipole which was calculated using a flow model for massive clusters. The analysis of the $IRAS$ QDOT galaxies combined with Abell clusters (Plionis, Coles \& Catelan 1993) and the X-Ray cluster only dipole (Kocevski, Mullis \& Ebeling 2004 and Kocevski \& Ebeling 2005) imply a significant contribution to the LG velocity by the Shapley Supercluster. Kocevski \& Ebeling (2005) report a significant contribution to the LG dipole (56\%) from distances beyond 60 $h^{-1}$ Mpc. The discrepancy between their results and ours is possibly due to the fact that the 2MRS is a better tracer of the galaxies at nearby distances, whereas the X-ray cluster data are better samplers of the matter distribution beyond 150 $h^{-1}$ Mpc. The misalignment angle between the LG and the CMB dipole is smallest at 5000 \kmps where it drops to 12$^\circ\pm$7$^\circ$ and increases slightly at larger distances presumably due to shot-noise. This behaviour is also observed in the other dipole analyses (e.g. Webster, Lahav, Fisher 1997 \& Rowan-Robinson {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2000). This is a strong indication that most of the LG velocity is due to the Great Attractor and the Perseus-Pisces superclusters. Of course, we still cannot rule out a significant contribution from Shapley as we do not sample that far. However, it may be more important ask what the velocity field in the Great Attractor region is. In other words, whether we observe a significant backside infall towards the Great Attractor. The smallest misalignment angle at 13000 \kmps is 21$^\circ\pm$8$^\circ$, found for the LG frame, f\mbox{}l\mbox{}ux weighted scheme using the second mask. This misalignment can be due to several effects: \begin{itemize} \item The analysis uses linear perturbation theory which is correct only to first order $\delta$. There may be contributions to the LG dipole from small scales which would cause gravity and the velocity vectors to misalign, even with perfect sampling. However, Ciecielag, Chodorowski \& Kudlicki (2001) show that these non-linear effects cause only small misalignments for the $IRAS$ PSCz survey. However, removing the five most luminous nearby galaxies moves the flux dipole to ($l=261^\circ\pm9^\circ$, $b=34^\circ\pm9^\circ$,cz=20000 \kmps), 8$^\circ$ closer to that of the CMB. This suggests that the non-linear effects might be very important in dipole determinations. \item The sampling is not perfect and the selection effects of the surve will increase the shot noise-errors especially at large distances causing misalignments. \item There may be uncertainties in the assumptions in galaxy formation and clustering. For example the mass-to-light ratios might differ according to type and/or vary with luminosity or the galaxy biasing might be non-linear and/or scale dependent. \item There may be a significant contribution to the LG dipole from structure further away than the maximum distance of our analysis. \item The direction of the LG dipole may be affected by nearby galaxies at low latitudes which are not sampled by the 2MRS. In the future, the masked regions will be filled by galaxies from other surveys such as the ongoing HI Parkes Deep Zone of Avoidance Survey (Henning {\it et al.\ }} \def\pc{{\rm\thinspace pc} 2004) as well as the galaxies that are sampled from the 2MRS itself. \end{itemize} Our initial calculations of the expected LG acceleration (c.f. Lahav, Kaiser \& Hoffman 1990; Juszkiewicz, Vittorio \& Wyse 1990) suggest that the misalignment of 21$^\circ\pm$8$^\circ$ is within 1 $\sigma$ of the dipole probability distribution in a CDM Universe with $\Omega_{\rm m}=0.3$. In a forthcoming paper, the cosmological parameters will be constrained more vigourously using a maximum likelihood analysis based on the spherical harmonics expansion (e.g. Fisher, Scharf \& Lahav 1994; Heavens \& Taylor 1995) of the 2MRS density field. \section*{ACKNOWLEDGEMENTS} We thank Sarah Bridle, Alan Heavens, Ariyeh Maller and Karen Masters for their useful comments. PE would like thank the University College London for its hospitality during the completion of this work. OL acknowledges a PPARC Senior Research Fellowship. JPH, LM, CSK, NM, and TJ are supported by NSF grant AST-0406906, and EF's research is partially supported by the Smithsonian Institution. DHJ is supported as a Research Associate by Australian Research Council Discovery-Projects Grant (DP-0208876), administered by the Australian National University. This publication makes use of data products from the Two Micron All Sky Survey, which is a joint project of the University of Massachusetts and the Infrared Processing and Analysis Center/California Institute of Technology, funded by the National Aeronautics and Space Administration and the National Science Foundation. This research has also made use of the NASA/IPAC Extragalactic Database (NED) which is operated by the Jet Propulsion Laboratory, California Institute of Technology, under contract with the National Aeronautics and Space Administration and the SIMBAD database, operated at CDS, Strasbourg, France.
1,116,691,501,337
arxiv
\section{#1}} \renewcommand{\theequation}{\thesection.\arabic{equation}} \newcommand{\app}[1]{\setcounter{section}{0} \setcounter{equation}{0} \renewcommand{\thesection}{\Alph{section}} \section{#1}} \newcommand{\begin{equation}}{\begin{equation}} \newcommand{\eqa}{\begin{eqnarray}} \newcommand{\end{equation}}{\end{equation}} \newcommand{\end{eqnarray}}{\end{eqnarray}} \newcommand{\nonumber \end{equation}}{\nonumber \end{equation}} \newcommand{\hspace{0.7cm}}{\hspace{0.7cm}} \def\widehat{A}{\widehat{A}} \def{\widehat{\varepsilon}}{{\widehat{\varepsilon}}} \def {\widehat\delta} { {\widehat\delta} } \def\widehat{\Omega}{\widehat{\Omega}} \def\widehat{V}{\widehat{V}} \def{\cal{T}}{{\cal{T}}} \def{\widehat{\widetilde{V}}}{{\widehat{\widetilde{V}}}} \def{\widehat{\widetilde{R}}}{{\widehat{\widetilde{R}}}} \def{\widehat{\widetilde{\Omega}}}{{\widehat{\widetilde{\Omega}}}} \def\widehat{{\tilde f}}{\widehat{{\tilde f}}} \def\widehat{f}{\widehat{f}} \def\widehat{g}{\widehat{g}} \def\widehat{\phi}{\widehat{\phi}} \def\widehat{\Phi}{\widehat{\Phi}} \def\widehat{\delta}{\widehat{\delta}} \def{\bar I}{{\bar I}} \def{\bar J}{{\bar J}} \def{\bar i}{{\bar i}} \def{\bar K}{{\bar K}} \def\widetilde{\Omega}{\widetilde{\Omega}} \def\widehat{\widetilde{\epsi}}{\widehat{\widetilde{\epsi}}} \def\spinst#1#2{{#1\brack#2}} \def\vskip .4cm{\vskip .4cm} \def\noindent{\noindent} \def\omega{\omega} \def\alpha{\alpha} \def\beta{\beta} \def\gamma{\gamma} \def\Gamma{\Gamma} \def\delta{\delta} \def{1 \over \lambda}{{1 \over \lambda}} \def{1\over {r-r^{-1}}}{{1\over {r-r^{-1}}}} \def\bar{\alpha}{\bar{\alpha}} \def\bar{\beta}{\bar{\beta}} \def\bar{\gamma}{\bar{\gamma}} \def\bar{\delta}{\bar{\delta}} \def\bar{a}{\bar{a}} \def\bar{A}{\bar{A}} \def\bar{B}{\bar{B}} \def{\bf C}{\bar{C}} \def\bar{D}{\bar{D}} \def\bar{a}{\bar{a}} \def\bar{c}{\bar{c}} \def\bar{d}{\bar{d}} \def\bar{b}{\bar{b}} \def\bar{e}{\bar{e}} \def\bar f{\bar{f}} \def\bar{g}{\bar{g}} \def\hat\xi{\hat\xi} \def\hat\Xi{\hat\Xi} \def\hat u{\hat u} \def\hat v{\hat v} \def\bar u{\bar u} \def\bar v{\bar v} \def\bar \xi{\bar \xi} \let \si\sigma \let \partial\partial \let \ka\kappa \def{\alpha}^{\prime}{{\alpha}^{\prime}} \def{\beta}^{\prime}{{\beta}^{\prime}} \def{\gamma}^{\prime}{{\gamma}^{\prime}} \def{\delta}^{\prime}{{\delta}^{\prime}} \def{\rho}^{\prime}{{\rho}^{\prime}} \def{\tau}^{\prime}{{\tau}^{\prime}} \def\rho ''{\rho ''} \def{\theta}^{\prime}{{\theta}^{\prime}} \def{i\over 2}{{i\over 2}} \def{1 \over 4}{{1 \over 4}} \def{1 \over 2}{{1 \over 2}} \def{1 \over 2}{{1 \over 2}} \def\varepsilon{\varepsilon} \def\wedge{\wedge} \def\theta{\theta} \def\delta{\delta} \defi_{\de {\bf y}}{i_{\delta {\bf y}}} \defl_{\de {\bf y}}{l_{\delta {\bf y}}} \def{\bf t}{{\bf t}} \def{\bf v}{{\bf v}} \def{\tilde G}{{\tilde G}} \def\bf {\de y}{\bf {\delta y}} \def\partial{\partial} \def{\partial \over {\partial x^+}}{{\partial \over {\partial x^+}}} \def{\partial \over {\partial x^-}}{{\partial \over {\partial x^-}}} \def{\partial \over {\partial x^i}}{{\partial \over {\partial x^i}}} \def\pdy#1{{\partial \over {\partial y^{#1}}}} \def\pdx#1{{\partial \over {\partial x^{#1}}}} \def\pdyx#1{{\partial \over {\partial (yx)^{#1}}}} \def\vskip .4cm{\vskip .4cm} \def${\cal M}_A$~{${\cal M}_A$~} \def${\cal M}_B$~{${\cal M}_B$~} \defg_{ij}{g_{ij^{\star}}} \defK\"ahler~{K\"ahler~} \def\noindent{\noindent} \defz^I{z^I} \defz^i{z^i} \def{\bar z}^{i^{\star}}{{\bar z}^{i^{\star}}} \def\bar z{\bar z} \def{\bar z}^J{{\bar z}^J} \defR_{i j^{\star} k l^{\star}}{R_{i j^{\star} k l^{\star}}} \defj^{\star}{j^{\star}} \defk^{\star}{k^{\star}} \defn^{\star}{n^{\star}} \defl^{\star}{l^{\star}} \defi^{\star}{i^{\star}} \defm^{\star}{m^{\star}} \def{\partial \over \partial X^I}{{\partial \over \partial X^I}} \def{\partial \over \partial L^I}{{\partial \over \partial L^I}} \def{\partial \over \partial L^J}{{\partial \over \partial L^J}} \def{\partial \over \partial X^J}{{\partial \over \partial X^J}} \def{\partial \over \partial z^i}{{\partial \over \partial z^i}} \def{\partial \over \partial z^k}{{\partial \over \partial z^k}} \def{\partial \over \partial z^j}{{\partial \over \partial z^j}} \def{\partial \over \partial y^A}{{\partial \over \partial y^A}} \def{\partial \over \partial y^N}{{\partial \over \partial y^N}} \def\bar Q{\bar Q} \def\bar F{\bar F} \def{\bf C}{\bar C} \def\bar T{\bar T} \def{\bar f}_{\is}{{\bar f}_{i^{\star}}} \def\bar f{\bar f} \defX^I{X^I} \def{\bar X}^I{{\bar X}^I} \defX^J{X^J} \defX^0{X^0} \defN_{IJ}{N_{IJ}} \defL^I(z){L^I(z)} \def{\bar L}^I(\bar z){{\bar L}^I(\bar z)} \def\omega{\omega} \def{\buildrel \circ \over \omega}{{\buildrel \circ \over \omega}} \def\alpha{\alpha} \def\gamma{\gamma} \def{i\over 2}{{i\over 2}} \def{1 \over 4}{{1 \over 4}} \def{1 \over 2}{{1 \over 2}} \def\varepsilon{\varepsilon} \def\boldsymbol {\epsilon}{{\bf \epsilon}} \def{\bar \epsi}{{\bar \varepsilon}} \def{\bar \psi}{{\bar \psi}} \def{\bar\theta}{{\bar\theta}} \def\partial_i{\partial_i} \def\partial_{\is}{\partial_{i^{\star}}} \def\bar\rho{\bar\rho} \def\bar\lambda{\bar\lambda} \def(G^{+}){(G^{+})} \def\partial_i{\partial_i} \def\partial_k{\partial_k} \def\partial_j{\partial_j} \def\partial_{\is}{\partial_{i^{\star}}} \def\wedge{\wedge} \def\theta{\theta} \def\delta{\delta} \def{C^A}_{BC}{{C^A}_{BC}} \def{C^C}_{AB}{{C^C}_{AB}} \defi_{\de {\bf y}}{i_{\delta {\bf y}}} \defl_{\de {\bf y}}{l_{\delta {\bf y}}} \def{\bf t}{{\bf t}} \def{\tilde G}{{\tilde G}} \def\bf {\de y}{\bf {\delta y}} \defq-Poincar\'e~{q-Poincar\'e~} \def\A#1#2{ A^{#1}_{~~~#2} } \def\R#1#2{ R^{#1}_{~~~#2} } \def\Rp#1#2{ (R^+)^{#1}_{~~~#2} } \def\Rpinv#1#2{ [(R^+)^{-1}]^{#1}_{~~~#2} } \def\Rm#1#2{ (R^-)^{#1}_{~~~#2} } \def\Rinv#1#2{ (R^{-1})^{#1}_{~~~#2} } \def\Rsecondinv#1#2{ (R^{\sim 1})^{#1}_{~~~#2} } \def\Rinvsecondinv#1#2{ ((R^{-1})^{\sim 1})^{#1}_{~~~#2} } \def\Rpm#1#2{(R^{\pm})^{#1}_{~~~#2} } \def\Rpminv#1#2{((R^{\pm})^{-1})^{#1}_{~~~#2} } \def{\cal R}{{\cal R}} \def\Rb#1#2{{ {\cal R}^{#1}_{~~~#2} }} \def\Rbp#1#2{{ ({\cal R}^+)^{#1}_{~~~#2} }} \def\Rbm#1#2{ ({\cal R}^-)^{#1}_{~~~#2} } \def\Rbinv#1#2{ ({\cal R}^{-1})^{#1}_{~~~#2} } \def\Rbpm#1#2{({\cal R}^{\pm})^{#1}_{~~~#2} } \def\Rbpminv#1#2{(({\cal R}^{\pm})^{-1})^{#1}_{~~~#2} } \defR^{\pm}{R^{\pm}} \defR^{+}{R^{+}} \defR^{-}{R^{-}} \def{\hat R}{{\hat R}} \def{\hat {\Rbo}}{{\hat {{\cal R}}}} \def\widehat{R}#1#2{ {\hat R}^{#1}_{~~~#2} } \def\Rbar#1#2{ {\bar R}^{#1}_{~~~#2} } \def\L#1#2{ \Lambda^{#1}_{~~~#2} } \def\Linv#1#2{ \Lambda^{-1~#1}_{~~~~~#2} } \def\Rbhat#1#2{ {\hat {\Rbo}}^{#1}_{~~~#2} } \def\Rhatinv#1#2{ ({\hat R}^{-1})^{#1}_{~~~#2} } \def\Rbhatinv#1#2{ ({\hat {\Rbo}}^{-1})^{#1}_{~~~#2} } \def\Z#1#2{ Z^{#1}_{~~~#2} } \def\Rt#1{ {\hat R}_{#1} } \def\Lambda{\Lambda} \def{\hat R}{{\hat R}} \def\ff#1#2#3{f_{#1~~~#3}^{~#2}} \def\MM#1#2#3{M^{#1~~~#3}_{~#2}} \def\cchi#1#2{\chi^{#1}_{~#2}} \def\ome#1#2{\omega_{#1}^{~#2}} \def\RRhat#1#2#3#4#5#6#7#8{\Lambda^{~#2~#4}_{#1~#3}|^{#5~#7}_{~#6~#8}} \def\RRhatinv#1#2#3#4#5#6#7#8{(\Lambda^{-1})^ {~#2~#4}_{#1~#3}|^{#5~#7}_{~#6~#8}} \def\LL#1#2#3#4#5#6#7#8{\Lambda^{~#2~#4}_{#1~#3}|^{#5~#7}_{~#6~#8}} \def\LLinv#1#2#3#4#5#6#7#8{(\Lambda^{-1})^ {~#2~#4}_{#1~#3}|^{#5~#7}_{~#6~#8}} \def\U#1#2#3#4#5#6#7#8{U^{~#2~#4}_{#1~#3}|^{#5~#7}_{~#6~#8}} \def{\bf C}{{\bf C}} \def\CC#1#2#3#4#5#6{{\bf C}_{~#2~#4}^{#1~#3}|_{#5}^{~#6}} \def\cc#1#2#3#4#5#6{C_{~#2~#4}^{#1~#3}|_{#5}^{~#6}} \def\C#1#2{ {\bf C}_{#1}^{~~~#2} } \def\c#1#2{ C_{~#1}^{#2} } \def\cl#1#2{ C_{~#1}^{#2} } \def\q#1{ {{q^{#1} - q^{-#1}} \over {q^{{1 \over 2}}-q^{-{1 \over 2}}}}} \def\Dmat#1#2{D^{#1}_{~#2}} \def\Dmatinv#1#2{(D^{-1})^{#1}_{~#2}} \def\Delta_R{\Delta_R} \def\Delta_L{\Delta_L} \def\f#1#2{ f^{#1}_{~~#2} } \def\F#1#2{ F^{#1}_{~~#2} } \def\T#1#2{ T^{#1}_{~~#2} } \def\Ti#1#2{ (T^{-1})^{#1}_{~~#2} } \def\Tp#1#2{ (T^{\prime})^{#1}_{~~#2} } \def\Th#1#2{ {\hat T}^{#1}_{~~#2} } \def T^{\prime} { T^{\prime} } \def\M#1#2{ M_{#1}^{~#2} } \defq^{-1}{q^{-1}} \defr^{-1}{r^{-1}} \defu^{-1}{u^{-1}} \defv^{-1}{v^{-1}} \defx^{-}{x^{-}} \defx^{+}{x^{+}} \deff_-{f_-} \deff_+{f_+} \deff_0{f_0} \def\Delta{\Delta} \def\Delta_{N+1}{\Delta_{N+1}} \def\kappa_{N+1}{\kappa_{N+1}} \def\epsi_{N+1}{\varepsilon_{N+1}} \def\Mat#1#2#3#4#5#6#7#8#9{\left( \matrix{ #1 & #2 & #3 \cr #4 & #5 & #6 \cr #7 & #8 & #9 \cr }\right) } \defA^{\prime}{A^{\prime}} \def\Delta^{\prime}{\Delta^{\prime}} \defI^{\prime}{I^{\prime}} \def\epsi^{\prime}{\varepsilon^{\prime}} \def\kappa^{\prime}{\kappa^{\prime}} \def\kappa^{\prime -1}{\kappa^{\prime -1}} \def\kappa^{\prime 2}{\kappa^{\prime 2}} \def\kappa^{-1}{\kappa^{-1}} \defg^{\prime}{g^{\prime}} \defq \rightarrow 1{q \rightarrow 1} \defr \rightarrow 1{r \rightarrow 1} \defq,r \rightarrow 1{q,r \rightarrow 1} \defF_{\mu\nu}{F_{\mu\nu}} \defA_{\mu}{A_{\mu}} \defA_{\nu}{A_{\nu}} \def\part_{\mu}{\partial_{\mu}} \def\part^{\mu}{\partial^{\mu}} \def\part_{\nu}{\partial_{\nu}} \defA_{\nu]}{A_{\nu]}} \defB_{\nu]}{B_{\nu]}} \defZ_{\nu]}{Z_{\nu]}} \def\part_{[\mu}{\partial_{[\mu}} \def$[SU(2) \times U(1)]_q~${$[SU(2) \times U(1)]_q~$} \def$SU_q(2)~${$SU_q(2)~$} \def$SU(2) \times U(1)~${$SU(2) \times U(1)~$} \defg_{ij}{g_{ij}} \defSL_q(2,{\bf C}){SL_q(2,{\bf C})} \defGL_{q,r}(N){GL_{q,r}(N)} \defIGL_{q,r}(N){IGL_{q,r}(N)} \defIGL_{q,r}(2){IGL_{q,r}(2)} \defGL_{q,r}(N+1){GL_{q,r}(N+1)} \defSL_{q,r}(N){SL_{q,r}(N)} \defU(gl_{q,r}(N)){U(gl_{q,r}(N))} \defU(gl_{q,r}(N+1)){U(gl_{q,r}(N+1))} \defU(igl_{q,r}(N)){U(igl_{q,r}(N))} \defR^*{R^*} \def\rr#1{R^*_{#1}} \def\Lpm#1#2{L^{\pm #1}_{~~~#2}} \def\Lmp#1#2{L^{\mp#1}_{~~~#2}} \defL^{\pm}{L^{\pm}} \defL^{\mp}{L^{\mp}} \defL^{+}{L^{+}} \defL^{-}{L^{-}} \def\Lp#1#2{L^{+ #1}_{~~~#2}} \def\Lm#1#2{L^{- #1}_{~~~#2}} \defg_{U(1)}{g_{U(1)}} \defg_{SU(2)}{g_{SU(2)}} \def {\rm tg} { {\rm tg} } \def$Fun(G)~${$Fun(G)~$} \def{}_{{\rm inv}}\Ga{{}_{{\rm inv}}\Gamma} \def\Ga_{{\rm inv}}{\Gamma_{{\rm inv}}} \def\stackrel{q \rightarrow 1}{\longrightarrow}{\stackrel{q \rightarrow 1}{\longrightarrow}} \def\stackrel{r \rightarrow 1}{\longrightarrow}{\stackrel{r \rightarrow 1}{\longrightarrow}} \def\stackrel{q=r \rightarrow 1}{\longrightarrow}{\stackrel{q=r \rightarrow 1}{\longrightarrow}} \def\viel#1#2{e^{#1}_{~~{#2}}} \def\rightarrow{\rightarrow} \def{\det}{{\det}} \def{\det}{{\det}} \def{\det} {{\det} } \def{\det} \T{A}{B}{{\det} \T{A}{B}} \def{\det} \T{a}{b}{{\det} \T{a}{b}} \defP{P} \defQ{Q} \def{\partial}{{\partial}} \def\pp#1#2{\Pi_{#1}^{(#2)}} \def{\cal D}{{\cal D}} \def{\cal R}{{\cal R}} \def\square{{\,\lower0.9pt\vbox{\hrule \hbox{\vrule height 0.2 cm \hskip 0.2 cm \vrule height 0.2 cm}\hrule}\,}} \def\bar \epsilon{{\bar \epsilon}} \def{\bar \theta}{{\bar \theta}} \def{\de \over {\de \Phi (x)}}{{\delta \over {\delta \Phi (x)}}} \def{\hat \Xi}{{\hat \Xi}} \def{\cal L}{{\cal L}} \def\Mprod{\exp [-{i \over 2} \theta^{\mu\nu} {\part_\mu} \otimes {\part_\nu}]} \def\Nprod{\exp [-{i \over 2} \theta^{\mu\nu} \hat {\part_\mu} \otimes \hat {\part_\nu}]} \def{\part_\mu}{{\partial_\mu}} \def{\part_\nu}{{\partial_\nu}} \def{\part^\nu}{{\partial^\nu}} \def\hat {\part_\mu}{\hat {\partial_\mu}} \def\hat {\part_\nu}{\hat {\partial_\nu}} \def{\de_{\de \Phi}}{{\delta_{\delta \Phi}}} \def\ddPhi{\delta_{\delta \Phi}} \def\viel#1#2{e_{#1}^{~#2}} \def\tilde X{\tilde X} \def\tilde F{\tilde F} \def\tilde G{\tilde G} \def\tilde n{\tilde n} \def\hat x{\hat x} \def\stackrel{\star}{,}{\stackrel{\star}{,}} \def{\rm f}{{\rm f}} \def{\rm R}{{\rm R}} \def{\rm T}{{\rm T}} \def\overline{\rf}^\alpha{\overline{{\rm f}}^\alpha} \def\overline{\rf}_\alpha{\overline{{\rm f}}_\alpha} \def\otimes_\star{\otimes_\star} \def\we_\star{\wedge_\star} \def\rf^\beta{{\rm f}^\beta} \def\rf_\beta{{\rm f}_\beta} \def\overline{\rR}^\al{\overline{{\rm R}}^\alpha} \def\overline{\rR}_\al{\overline{{\rm R}}_\alpha} \def\tilde \om{\tilde \omega} \def\widetilde{V}{\widetilde{V}} \def\tilde T{\tilde T} \def\widetilde{R}{\widetilde{R}} \def\tilde r{\tilde r} \def\widetilde{\epsi}{\widetilde{\varepsilon}} \def{\tilde f}{{\tilde f}} \def{\tilde h}{{\tilde h}} \def{\tilde J}{{\tilde J}} \def{\tilde K}{{\tilde K}} \def{\widetilde \phi}{{\widetilde \phi}} \def\bar \psi{\bar \psi} \def\bar \epsilon{\bar \epsilon} \def\bar \chi{\bar \chi} \def\bar \rho{\bar \rho} \def\bar \eta{\bar \eta} \def\bar \zeta{\bar \zeta} \def\Omega{\Omega} \def\overline{Q}{\overline{Q}} \def\bar n{\bar n} \def\bar r{\bar r} \def\bar s{\bar s} \def\dot n{\dot n} \def\dot r{\dot r} \def\dot s{\dot s} \def\overline \Sigma{\overline \Sigma} \def\widehat{R}{\widehat{R}} \def\widehat{P}{\widehat{P}} \def\widehat{Q}{\widehat{Q}} \def{\bf R}{{\bf R}} \def{\bf P}{{\bf P}} \def{\bf \Om}{{\bf \Omega}} \def{\bf 1}{{\bf 1}} \def\boldsymbol {\epsilon}{\boldsymbol {\epsilon}} \def\boldsymbol {\lambda}{\boldsymbol {\lambda}} \def{\bf \Phi}{{\bf \Phi}} \def{\bf \Gamma}{{\bf \Gamma}} \def{\bf C}{{\bf C}} \def{\bf G}{{\bf G}} \def{I\!\!L}{{I\!\!L}} \def\mathbb{1}{\mathbb{1}} \def\stackrel{\rightarrow}{\part}{\stackrel{\rightarrow}{\partial}} \def\stackrel{\leftarrow}{\part}{\stackrel{\leftarrow}{\partial}} \newcommand{\NP}[1]{Nucl.\ Phys.\ {\bf #1}} \newcommand{\PL}[1]{Phys.\ Lett.\ {\bf #1}} \newcommand{\NC}[1]{Nuovo Cim.\ {\bf #1}} \newcommand{\CMP}[1]{Comm.\ Math.\ Phys.\ {\bf #1}} \newcommand{\PR}[1]{Phys.\ Rev.\ {\bf #1}} \newcommand{\PRL}[1]{Phys.\ Rev.\ Lett.\ {\bf #1}} \newcommand{\MPL}[1]{Mod.\ Phys.\ Lett.\ {\bf #1}} \newcommand{\IJMP}[1]{Int.\ J.\ Mod.\ Phys.\ {\bf #1}} \newcommand{\JETP}[1]{Sov.\ Phys.\ JETP {\bf #1}} \newcommand{\TMP}[1]{Teor.\ Mat.\ Fiz.\ {\bf #1}} \begin{document} \begin{titlepage} \vskip 2em \begin{center} {\Large \bf Group manifold approach to supergravity} \\[3em] \vskip 0.5cm {\bf Leonardo Castellani} \medskip \vskip 0.5cm {\sl Dipartimento di Scienze e Innovazione Tecnologica \\Universit\`a del Piemonte Orientale, viale T. Michel 11, 15121 Alessandria, Italy\\ [.5em] INFN, Sezione di Torino, via P. Giuria 1, 10125 Torino, Italy\\ [.5em] Arnold-Regge Center, via P. Giuria 1, 10125 Torino, Italy }\\ [4em] \end{center} \begin{abstract} \vskip .4cm We present a short review of the group-geometric approach to supergravity theories, from the point of view of recent developments. The central idea is the unification of usual diffeomorphisms, gauge symmetries and supersymmetries into superdiffeomorphisms in a supergroup manifold. The example of $N=1$ supergravity in $d=4$ is discussed in detail, and used to illustrate all the steps in the construction of a group manifold action. In the Appendices we summarize basic notions of group manifold geometry, and of integration on supermanifolds. \end{abstract} \vskip 4cm\vskip .4cm\sk \noindent {\small Invited chapter for the ``Handbook of Quantum Gravity", Eds. C. Bambi, L. Modesto and I.L. Shapiro, Springer, expected in 2023.} \vskip 2cm \noindent \hrule \vskip .2cm \noindent {\small [email protected]} \end{titlepage} \newpage \setcounter{page}{1} \tableofcontents \sect{Introduction} Fundamental interactions are described by field theories with local invariances: the actions that govern their dynamics are invariant under field transformations involving parameters that are (arbitrary) functions of spacetime. This holds true both for gravity and gauge theories, where the local symmetries are general coordinate, and gauge transformations, respectively. The essential difference between these two types of local transformations, in their infinitesimal versions, is that diffeomorphisms always contain a derivative of the field, which is absent in gauge transformations. As well known, this is due to the fact that general coordinate transformations relate fields at different spacetime points, whereas gauge transformations relate fields at the same spacetime point. Nonetheless, it is possible to give a unified description of diffeomorphisms and gauge transformations. This we achieve in a group geometrical framework. The main idea is to consider as basic fields of the theory the {\sl components of the vielbein} one-form $\sigma^A=\sigma(z)^A_{~\Lambda} dz^\Lambda$ on the manifold of a Lie (super)group $G$, {\small {\it A}} being an index in the $G$ Lie (super)algebra, and $z^\Lambda$ the coordinates of the group manifold. This vielbein satisfies the Cartan-Maurer (CM) equations \begin{equation} d \sigma^A + {1 \over 2} C^A_{BC} ~\sigma^B \wedge \sigma^C =0 \label{CM} \end{equation} where $C^A_{BC}$ are the structure constants of the $G$ Lie algebra. A brief account of group manifold geometry is given in Appendix A. The $G$ vielbein $\sigma^A (z)$ has a fixed dependence on the coordinates $z$, and therefore cannot be a dynamical object. We must consider a ``soft" group manifold, diffeomorphic to $G$ and denoted by ${\tilde G}$, with a vielbein $\sigma^A$ not satisfying anymore the CM equations. The amount of deformation from the original ``rigid" group manifold is measured by the {\sl curvature} two-form: \begin{equation} R^A \equiv d \sigma^A + {1 \over 2} C^A_{BC} ~\sigma^B \wedge \sigma^C \label{Gcurvature} \end{equation} Tangent vectors on $\tilde G$, dual to the vielbein $\sigma^A$, are denoted by $t_B$, so that $\sigma^A (t_B)=\delta^A_B$. \vskip .4cm Diffeomorphisms along tangent vectors $\varepsilon=\varepsilon^A t_A$ on $\tilde G$ are generated by the Lie derivative $\ell_\varepsilon$. When applied to the $\tilde G$ vielbein, the variation under diffeomorphisms takes the form: \begin{equation} \ell_\varepsilon \sigma^A = d \varepsilon^A + C^A_{BC} \sigma^B \varepsilon^C + \iota_\varepsilon R^A \label{Lieder} \end{equation} where $\iota_\varepsilon$ is the contraction operator, see Appendix A. On the right-hand side one recognizes the $G$-covariant derivative of the infinitesimal parameter $\varepsilon^A$ plus a curvature term. When the curvature term vanishes, i.e. when $\iota_\varepsilon R^A=0$, the diffeomorphism takes the form of a {\it gauge transformation}, and the curvature is said to be {\it horizontal} along the $t_A$'s entering the sum in $\varepsilon=\varepsilon^A t_A$. Thus in group manifold geometry {\it gauge transformations} can be interpreted as {\it particular diffeomorphisms}, along the directions on which the curvatures are horizontal. This group geometric setting is particularly suited to supergravity theories, where local supersymmetry variations can be interpreted as diffeomorphisms in the super Poincar\'e group manifold, along the fermionic directions. It is then clear how to proceed to find theories invariant under local supersymmetry transformations: we must devise a procedure that yields actions, invariant under superdiffeomorphisms. This is very similar in spirit to the superspace approach \cite{GGRS,WB}, where supergravity multiplets of dynamical (and auxiliary) fields are contained into a single superfield, depending on superspace coordinates. However the group manifold approach has important differences, as we explain in the coming Sections. The action is obtained with an algorithmic procedure, as the integral of a $d$-form, ``living" on the whole supergroup (soft) manifold $\tilde G$, but integrated on a $d$-dimensional bosonic {\it submanifold} of $\tilde G$. This leads to an ordinary spacetime action containing the dynamical fields (and possibly also the auxiliary fields) of a $d$-dimensional supergravity theory. This algorithm will be discussed in detail and applied to obtain the action of $N=1$, $d=4$ supergravity. The original references, where this approach was first proposed, are given in \cite{gm11}-\cite{gm14}. Reviews can be found in \cite{gm21}-\cite{gm25}, and \cite{EGH} is a standard reference for the use of differential forms in gravity and gauge theories. The paper is organized as follows. In Section 2 we recall the algebraic basis of $d=4$, $N=1$ supergravity as a theory on the (soft) superPoincar\'e manifold, and the passage to a spacetime action. Section 3 deals with the symmetries of the spacetime action, as inherited from the diffeomorphism invariances of the group manifold action. In Section 4 the variational principle is formulated for the group manifold action, and equations of motion are derived. The building rules for (super)group manifold actions are discussed in Section 5, and applied to arrive unambiguously at the group manifold Lagrangian for $N=1$, $d=4$ supergravity. Some conclusions, and a selected list of applications and advantages of the group-geometric approach are discussed in Section 6. Finally, the Appendices contain brief accounts of group manifold geometry, integration on supermanifolds and gamma matrix properties. \sect{Supergravity from superPoincaré geometry} \subsection{Soft superPoincar\'e manifold} Supergravity in first order vierbein formalism can be recast in a supergroup geometric setting as follows. Consider $G$ = superPoincar\'e group, and denote the vielbein on the $\tilde G$ manifold as $\sigma^A = (V^a,\omega^{ab},\psi^\alpha)$. The index $A=(a,ab,\alpha)$ runs on the translations $P_a$, Lorentz rotations $M_{ab}$ and supersymmetry charges $\overline{Q}_\alpha$ of the superPoincar\'e Lie algebra: \eqa & & [P_a,P_b] =0 \label{PoincarePP} \\ & & [M_{ab},M_{cd}]= -{1 \over 2} (\eta_{ad} M_{bc} + \eta_{bc} M_{ad} -\eta_{ac} M_{bd} -\eta_{bd} M_{ac}) \label{PoincareMM} \\ & & [M_{ab},P_c]= -{1 \over 2} ( \eta_{bc} P_{a} - \eta_{ac} P_{b}) \label{PoincareMP} \\ & & [P_a,\overline{Q}_\alpha]=0 \\ & & [M_{ab},\overline{Q}_\beta]= -{1 \over 4} \overline{Q}_\alpha (\gamma_{ab})^\alpha_{~\beta} \label{sPoincareMQ} \\ & & \{ \overline{Q}_\alpha,\overline{Q}_\beta \} = -i (C\gamma^a)_{\alpha \beta} P_{a}, \label{sPoincareQQ} \end{eqnarray} $\eta$ being the flat Minkowski metric, and $C_{\alpha\beta} $ the charge conjugation matrix. The spinorial generator $\overline{Q}_\alpha \equiv Q^\beta C_{\beta\alpha}$ is a Majorana spinor, i.e. $Q^\beta C_{\beta\alpha} = Q^\dagger_\beta (\gamma_0)^\beta_{~\alpha}$. Thus the super-Poincar\'e manifold has 10 bosonic directions with coordinates $x^a$, $y^{ab}$, parametrizing translations and Lorentz rotations, and 4 fermionic directions with Grassmann coordinates $\theta^\alpha$, corresponding to the 4 supercharges $\overline{Q}_\alpha, \alpha=1,..4$. The components of the supervielbein of the $\tilde G$ =(soft) superPoincar\'e manifold are the vierbein $V^a$, the spin connection $\omega^{ab}$ and the gravitino $\psi^\alpha$. corresponding respectively to the generators $P_a$, $M_{ab}$ and $\overline{Q}_\alpha$, Using the structure constants of the Lie superalgebra, the curvature (\ref{Gcurvature}) becomes : \eqa & & R^{a}= dV^{a} - \omega^{a}_{~c} V^{c} - \frac{i }{2} \bar\psi\gamma^{a} \psi \equiv {\cal D} V^a - \frac{i }{2} \bar\psi\gamma^{a} \psi\label{RasuperPoincare}\\ & & R^{ab}=d \omega^{ab} - \omega^{a}_{~c} ~\omega^{cb} \label{RabsuperPoincare}\\ & & \rho= d \psi- \frac{1}{ 4} \omega^{ab} \gamma_{ab} \psi \equiv {\cal D} \psi \label{rhosuperPoincare} \end{eqnarray} defining respectively the supertorsion, the Lorentz curvature and the gravitino field strength. ${\cal D}$ is the Lorentz covariant exterior derivative. Wedge products between forms are understood when omitted. Taking the exterior derivative of these definitions yields the Bianchi identities: \eqa & & dR^a -\omega^a_{~b} R^b + R^a_{~b} V^b - i \bar \psi \gamma^a \rho \equiv {\cal D} R^a + R^a_{~b} V^b - i \bar \psi \gamma^a \rho= 0 \label{BianchiRasuperPoincare}\\ & & dR^{ab} - \omega^a_{~c} R^{cb} + \omega^b_{~c} R^{ca} \equiv {\cal D} R^{ab}=0 \label{BianchiRabsuperPoincare}\\ & & d\rho - {1 \over 4} \omega^{ab} \gamma_{ab} \rho + {1 \over 4} R^{ab} \gamma_{ab} \psi \equiv {\cal D} \rho + {1 \over 4} R^{ab} \gamma_{ab} \psi =0 \label{BianchirhosuperPoincare} \end{eqnarray} At this stage all the fields depend on all $\tilde G$ manifold coordinates, corresponding to the generators of the Lie superalgebra: thus $V^a=V^a (x,y,\theta)$, $\omega^{ab} = \omega^{ab}(x,y,\theta)$, $\psi^\alpha (x,y,\theta)$, where the coordinates $x^a$, corresponding to the translations $P_a$, describe usual spacetime. Moreover the one-forms $V^a$, $\omega^{ab}, \psi$ live on the whole $\tilde G$, and therefore can be expanded as: \eqa & & V^a = V^a_\mu (x,y,\theta) dx^\mu + V^a_{\mu\nu} (x,y,\theta) dy^{\mu\nu} + V^a_\alpha (x,y,\theta) d\theta^\alpha \\ & & \omega^{ab} = \omega^{ab}_\mu (x,y,\theta) dx^\mu + \omega^{ab}_{\mu\nu} (x,y,\theta)dy^{\mu\nu} +\omega^{ab}_{\alpha} (x,y,\theta)d\theta^{\alpha}\\ & & \psi^\alpha=\psi^\alpha_\mu (x,y,\theta) dx^\mu + \psi^\alpha_{\mu\nu} (x,y,\theta) dy^{\mu\nu} +\psi^\alpha_\beta (x,y,\theta) d\theta^\beta \end{eqnarray} \subsection{Group manifold action} The overabundance of field components, and their dependence on $y$ and $\theta$ coordinates can be tamed by defining an appropriate action principle. To end up with a geometrical theory in four spacetime dimensions, we first construct a 4-form Lagrangian $L$ made out of the $\tilde G$ vielbein $\sigma^A$ and its curvature $R^A$, according to a few building rules to be discussed in Section 5. The resulting Lagrangian for superPoincar\'e supergravity is given by: \begin{equation} L = R^{ab} V^{c} V^{d} \epsilon_{abcd} + 4 \bar\psi\gamma_{5} \gamma_{a} \rho V^{a} \end{equation} We then define an action by integrating this Lagrangian on a 4-dimensional submanifold $M^4$ of the $\tilde G$ manifold, spanned by the $x$ coordinates. Integration on submanifolds $M^d$ of a $d$-form $L$ that lives on a $g$-dimensional bigger space $\tilde G$ is carried out as follows: we multiply $L$ by the {\it Poincar\'e dual} of $M^d$, a (singular) closed ({\it g-d})-form $\eta_{M^d}$ that localizes the Lagrangian on the submanifold $M^d$, and integrate the resulting $g$-form on the whole $\tilde G$. Thus the group manifold action has the general expression \begin{equation} S= \int_{\tilde G} L \wedge \eta_{M^d} \label{Gintegral} \end{equation} The fields of the theory are those contained in $L$, i.e. the $\tilde G$ vielbein components, and the embedding functions that define the $M^d$ submanifold of $\tilde G$, present in $\eta_{M^d}$. We will see later that the embedding functions do not enter the field equations obtained from the variation of (\ref{Gintegral}). This program makes use of standard integration theory when $\tilde G$ is a bosonic space, but requires some new ingredients when $\tilde G$ is a supermanifold, discussed in Appendix B. In our $d=4$ supergravity example the group manifold action is the integral on the 14-dimensional $\tilde G$ = soft superPoincar\'e manifold: \begin{equation} S= \int_{\tilde G} (R^{ab}V^c V^d \epsilon_{abcd} + 4 \bar\psi\gamma_{5} \gamma_{a} \rho V^{a})~ \eta_{M^4} \label{GintegralSP} \end{equation} \subsection{Spacetime action} A spacetime action, i.e. an action that is the integral on $M^4$ of a Lagrangian containing fields depending only on $x$, is obtained from (\ref{GintegralSP}) with a {\it particular choice} of $\eta_{M^4}$. Integration on $y$ and $\theta$ coordinates produces then the spacetime action. This particular Poincaré dual is the product of two pieces: $\eta_{M^4}= \eta_y \wedge \eta_\theta$, where $\eta_y$ is a (singular) 10-form that localizes the Lagrangian on the $y=0$ hypersurface: \begin{equation} \eta_{y} = \delta (y^{12}) \delta (y^{13}) \cdots \delta (y^{34}) dy^{12} \wedge dy^{13} \wedge \cdots \wedge dy^{34} \label{eta6form} \end{equation} Integration on the $y$ coordinates reduces (\ref{GintegralSP}) to an integral on the superspace $M^{4|4}$ spanned by $x$ and $\theta$. The $y$ dependence of all fields in $L$ disappears because of the delta functions in $\eta_y$, and the ``legs" of $L$ along $dy$ differentials are killed by the product of all independent $dy^{\mu\nu}$ in $\eta$. The other piece of $\eta_{M^4}$ (see Appendix B), after integration on $\theta$ coordinates, produces an integral on $M^4$ of a Lagrangian $4$-form, not depending any more on the $\theta$ and on the $d\theta$ differentials. Thus the action \eqa & & S_{spacetime}= \int_{\tilde G} L \wedge \eta_{M^4} = \int_{M^4} L_{y=0,dy=0,\theta=0,d\theta=0} \nonumber \\ & & ~~~~~~~~~~~~ = \int_{M^4} R^{ab}V^c V^d \epsilon_{abcd} + 4 \bar\psi\gamma_{5} \gamma_{a} \rho V^{a} \label{M4integral} \end{eqnarray} contains only the usual fields $V^a_\mu(x)$ and $\omega^{ab}_\mu (x)$ and $\psi(x)$ of $N=1$ supergravity, and reproduces the first order supergravity action. \vskip .4cm \noindent {\bf Note 1:} $\eta_y$ is closed (because it contains ``functions" depending on $y$ multiplied by all the $dy$ differentials) and not exact (because of the Dirac deltas $\delta(y)$), and thus belongs to a nontrivial de Rahm cohomology class. In general deformations of the $M^4$ surface generated by diffeomorphisms leave the Poincar\'e dual $\eta$ in the same cohomology class, since the Lie derivative commutes with the exterior derivative. \vskip .4cm \noindent {\bf Note 2:} We will always assume that integration on the Lorentz coordinates has been carried out, so that all fields depend only on $x$ and $\theta$ coordinates. Moreover all curvatures are taken to be horizontal in the Lorentz directions. As a consequence the theory lives in a superspace $M^{4|4}$ spanned by four bosonic coordinates $x^a$ and four fermionic coordinates $\theta^\alpha$. \vskip .4cm \noindent {\bf Note 3:} the spacetime action (\ref{M4integral}) and its invariance under the supersymmetry transformations (\ref{susy1})-(\ref{susy3}) were first found in ref. \cite{FFvN} in second order formalism and in \cite{DZ} in first order formalism, see also the standard references \cite{PvNreport,FVP} on supergravity. \sect{Symmetries} The action (\ref{GintegralSP}) is the integral on $\tilde G$ of a top form: it is clearly invariant under diffeomorphisms on $\tilde G$. But what we are really interested in are the symmetries of the spacetime action as given in (\ref{M4integral}), where the variations are carried out only in the $x$-dependent fields in $L|_{y=dy=0,\theta= d\theta=0}$. The only symmetries guaranteed a priori are the 4-dimensional spacetime diffeomorphisms, the spacetime action being an integral of a 4-form on $M^4$. Here resides most of the power of the group manifold formalism: if one considers the ``mother" action (\ref{Gintegral}) on $\tilde G$, the guaranteed symmetries are {\it all} the diff.s on $\tilde G$, generated by the Lie derivative $\ell_\varepsilon$ along the tangent vectors $\varepsilon = \varepsilon^A t_A$ of $\tilde G$. But how do these symmetries transfer to the spacetime action ? The variation of the group manifold action under diff.s generated by $\ell_\epsilon$ is\footnote{Recall $\ell_\varepsilon = \iota_\varepsilon d + d \iota_\varepsilon$ so that $\ell_\varepsilon$(top form) = $d(\iota_\varepsilon$ top form)} \begin{equation} \delta S = \int_{\tilde G} \ell_\varepsilon (L \wedge \eta )= \int_{\tilde G} (\ell_\varepsilon L) \wedge \eta + L \wedge \ell_\varepsilon \eta =0 \end{equation} modulo boundary terms. One has to vary the fields\footnote{Since $\ell_\varepsilon$ satisfies the Leibniz rule, $\ell_\varepsilon L$ can be computed by varying in turn all fields inside $L$.} in $L$ as well as the submanifold embedded in $\tilde G$: the sum of these two variations gives zero\footnote{In the following the vanishing of action variations will always be understood modulo boundary terms.} on the group manifold action $S$. But what we need in order to have a {\it spacetime} interpretation of all the symmetries of $S$, is really \begin{equation} \delta S = \int_{\tilde G} (\ell_\varepsilon L) \wedge \eta =0 \label{spacetimesymm} \end{equation} If this holds, varying the fields $\phi$ inside $L$ with the Lie derivative $\ell_\epsilon$ as in (\ref{Lieder}), and then projecting on spacetime, yields spacetime variations \begin{equation} \delta \phi (x) = \ell_\varepsilon \phi (x,y,\theta) |_{x} \end{equation} that leave the spacetime action (\ref{M4integral}) invariant. We have denoted by $|_x$ the projection on spacetime due to the integration on $y$ and $\theta$ coordinates in (\ref{spacetimesymm}). We call these variations {\it spacetime invariances}, since they leave invariant the spacetime action. They originate from the diff. invariance of the group manifold action, and give rise to symmetries of the spacetime action (\ref{M4integral}) only when (\ref{spacetimesymm}) holds. This happens if one of the following conditions is satisfied: \vskip .4cm \noindent $\bullet$ the Lie derivative on $\eta$ vanishes: \begin{equation} \ell_\varepsilon \eta = 0 \label{elloneta} \end{equation} \vskip .4cm \noindent $\bullet$ the spacetime projection of the Lie derivative of $L$ is exact: \begin{equation} ( \ell_\varepsilon L )|_{x} = d \alpha \label{ellonL} \end{equation} \noindent In this case the variation (\ref{spacetimesymm}) \begin{equation} \delta S = \int_{\tilde G} (\ell_\varepsilon L) \wedge \eta =\int_{M^4} (\ell_\varepsilon L )|_{x} \end{equation} vanishes after integration by parts. The requirement (\ref{ellonL}) is equivalent to \begin{equation} ( \iota_\varepsilon dL )|_{x} = d \alpha' \label{idonL} \end{equation} since $\l_\varepsilon = \iota_\varepsilon d + d \iota_\varepsilon$. \vskip .4cm \noindent The Lagrangian $L$ depends on the $\tilde G$-vielbein $\sigma^A$ and its curvature $R^A$, so that also $dL$, after use of Bianchi identities, is expressed in terms of $\sigma^A$ and $R^A$. Then condition (\ref{idonL}) translates into a {\it condition on the contractions} $\iota_\varepsilon R^A$, i.e. a condition on the curvature components. Let us see how this works for superPoincar\'e supergravity. \subsection{Symmetries of $d=4$ supergravity} The symmetries of the spacetime action (spacetime invariances) are those generated by a Lie derivative $\ell_\varepsilon$ such that $\iota_\varepsilon dL|_{x}= d\alpha'$, cf. (\ref{idonL}). We need to compute $dL$. Using the Bianchi identities (\ref{BianchiRabsuperPoincare}) and (\ref{BianchirhosuperPoincare}), and the definition of the torsion $R^a$ in (\ref{RasuperPoincare}) we find: \begin{align} & dL= 2 R^{ab} R^c V^d \varepsilon_{abcd} + i R^{ab} \bar \psi \gamma^c \psi V^d \varepsilon_{abcd}+ 4 \bar \rho \gamma_5 \gamma_a \rho V^a + \nonumber \\ & ~~~~~~~ + \bar \psi \gamma_5 \gamma_c \gamma_{ab} \psi R^{ab} V^c -4 \bar \psi \gamma_5 \gamma_a \rho R^a - 2i \bar \psi \gamma_5 \gamma_a \rho \bar \psi \gamma^a \psi \label{dL1} \end{align} The gamma matrix identity \begin{equation} \gamma_c \gamma_{ab} = \eta_{ac} \gamma_b - \eta_{bc} \gamma_a +i \varepsilon_{abcd}\gamma_5 \gamma^d \end{equation} implies $\bar \psi \gamma_5 \gamma_c \gamma_{ab} \psi =i \varepsilon_{abcd} \bar \psi \gamma^d \psi$, so that the second and the fourth term cancel in (\ref{dL1}). Moreover from the Fierz identity in Appendix C one deduces \begin{equation} \gamma_a \psi \bar \psi \gamma^a \psi =0 \label{fierz1} \end{equation} and since $\bar \psi \gamma_5 \gamma_a \rho = \bar \rho \gamma_5 \gamma_a \psi$ also the last term in (\ref{dL1}) vanishes due to (\ref{fierz1}). Therefore \begin{equation} dL= 2 R^{ab} R^c V^d \varepsilon_{abcd} + 4 \bar \rho \gamma_5 \gamma_a \rho V^a - 4 \bar \psi \gamma_5 \gamma_a \rho R^a \end{equation} \vskip .4cm \noindent {\bf Lorentz gauge transformations} \vskip .4cm It is immediate to see that if all curvatures are horizontal in the Lorentz directions (no ``legs" along $\omega$) then indeed $\iota_{\varepsilon^{ab} t_{ab}}dL=0$, and Lorentz transformations are a spacetime invariance of the supergravity action. This is essentially due to the absence of bare $\omega^{ab}$ in $L$. The general diffeomorphism formula (\ref{Lieder}) yields then the usual Lorentz transformations \eqa & & \ell_{\varepsilon^{cd} t_{cd}} V^a = \varepsilon^a_{~b} V^b \label{LorentzonV2}\\ & & \ell_{\varepsilon^{cd} t_{cd}} \omega^{ab} = d \varepsilon^{ab} - \omega^a_{~c} \varepsilon^{cb} + \omega^b_{~c} \varepsilon^{ca}= {\cal D} \varepsilon^{ab} \label{Lorentzonom2} \\ & & \ell_{\varepsilon^{cd} t_{cd}} \psi = {1 \over 4} \varepsilon^{ab} \gamma_{ab} \psi \end{eqnarray} We can check directly the invariance of the action under these variations: all curvatures and vierbeins appearing in (\ref{M4integral}) transform homogeneously, and Lorentz indices are contracted with Lorentz invariant tensors. \vskip .4cm \noindent {\bf Spacetime diffeomorphisms} \vskip .4cm \noindent Ordinary diff.s along tangent vectors $\partial_\mu$ dual to $dx^\mu$ are invariances of the spacetime action, since (\ref{M4integral}) is an integral on a 4-dimensional manifold of a 4-form. \vskip .4cm \noindent {\bf Supersymmetry transformations} \vskip .4cm \noindent Diff.s along tangent vectors $t_\alpha$ dual to $\psi^\alpha$ are spacetime invariances provided $\iota_\epsilon dL|_{x}= total ~derivative$ with $\epsilon = \epsilon^\alpha t_\alpha$, that is to say \eqa & & \iota_\epsilon dL = 2 (\iota_\epsilon R^{ab}) R^c V^d \varepsilon_{abcd} + 2 R^{ab} (\iota_\epsilon R^c ) V^d \varepsilon_{abcd} + 8 \bar \rho \gamma_5 \gamma_a (\iota_\epsilon \rho) V^a \nonumber \\ & & - 4 \bar \epsilon \gamma_5 \gamma_a \rho R^a - 4 \bar \psi \gamma_5 \gamma_a (\iota_\epsilon \rho) R^a - 4 \bar \psi \gamma_5 \gamma_a \rho (\iota_\epsilon R^a) = tot.~ der. \label{idL2} \end{eqnarray} once projected on spacetime. This is a condition for the contractions on the curvatures, and it is satisfied by: \eqa & & \iota_\epsilon R^a =0 \label{rh1}\\ & & \iota_\epsilon R^{ab} = - \varepsilon^{abef} \bar \rho_{ef} \gamma_5 \gamma_g \epsilon V^g - \varepsilon^{efg[a} \bar \rho_{ef} \gamma_5 \gamma_g \epsilon V^{b]} \equiv {\bar \theta}^{ab}_c \epsilon V^c \label{rh2}\\ & & \iota_\epsilon \rho =0 \label{rh3} \end{eqnarray} Thus we have supersymmetry invariance of the spacetime action if the curvatures have the following parametrization on a basis of 2-forms: \eqa & & R^a=R^a_{~bc} ~V^b V^c \label{param1}\\ & & R^{ab}= R^{ab}_{~~cd} V^c V^d + {\bar \theta}^{ab}_c ~\psi ~V^c \label{param2}\\ & & \rho= \rho_{ab}~ V^a V^b \label{param3} \end{eqnarray} where we have taken into account also horizontality in the Lorentz directions. The conditions (\ref{rh1})-(\ref{rh3}) are called ``rheonomic conditions", and similarly (\ref{param1})-(\ref{param3}) are called ``rheonomic parametrizations" of the curvatures. The diff.s along $\epsilon=\epsilon^\alpha t_\alpha$ (supersymmetry transformations) act on the fields according to the general formula (\ref{Lieder}), where the contractions on the curvatures are given in (\ref{rh1})-(\ref{rh3}): \eqa & & \ell_\epsilon V^a = i \bar \epsilon \gamma^a \psi \label{susy1}\\ & & \ell_\epsilon \omega^{ab} = {\bar \theta}^{ab}_c \epsilon V^c \label{susy2}\\ & & \ell_\epsilon \psi = {\cal D} \epsilon \equiv d \epsilon - {1 \over 4} \omega^{ab} \gamma_{ab} \epsilon \label{susy3} \end{eqnarray} with ${\bar \theta}^{ab}_c$ defined in (\ref{rh2}). \sect{Variational principle and field equations} The group manifold action (\ref{Gintegral}) is a functional of $L$ and of the embedded submanifold $M$, and therefore varying the action means varying both $L$ and $M$. Varying $M$ corresponds to varying $\eta_{M}$. Then the variational principle reads: \begin{equation} \label{Svariation}{\ \delta S[L, M] = \int_{\tilde G} ( \delta L\wedge\eta_{M} + L\wedge\delta\eta_{M}) } =0\,. \end{equation} Any (continuous) variation of $M$ can be obtained by acting on $\eta_M$ with a diffeomorphism generated by a Lie derivative $\ell_\xi$. An arbitrary variation is generated by an arbitrary $\xi$ vector, and the variational principle becomes \begin{equation} \label{Svariation1}{\ \delta S[L, M] = \int_{\tilde G} ( \delta L\wedge\eta_{M} + L\wedge\ell_\xi \eta_{M}) } =0\,. \end{equation} Since field variations in $L$ and variation of $M$ are independent, the two terms in (\ref{Svariation1}) must vanish separately. From the vanishing of the first one we deduce \begin{equation} \int_{\tilde G} ( \delta \phi \wedge {\partial L \over \partial \phi} + d \delta \phi \wedge {\partial L \over \partial (d\phi) } ) \wedge \eta_M =0 \end{equation} where $L=L(\phi,d\phi)$ is considered a function of the 1-form fields $\phi$ and their ``velocities" $d\phi$. A summation on all fields is understood. Integrating by parts and recalling $d \eta_M=0$ yields \begin{equation} \int_{\tilde G} \delta \phi \wedge ( {\partial L \over \partial \phi} + d {\partial L \over \partial (d\phi) } ) \wedge \eta_M =0 \end{equation} and since the $\delta \phi$ are arbitrary we find \begin{equation} ({\partial L \over \partial \phi} + d {\partial L \over \partial (d\phi)}) \wedge \eta_M=0 \label{ELequations0} \end{equation} This must hold for any $\eta_M$ (i.e. for generic embedding functions): we arrive therefore at equations that hold on the whole $\tilde G$, and are the form version of the Euler-Lagrange equations: \begin{equation} {\partial L \over \partial \phi} + d {\partial L \over \partial (d\phi)} =0 \label{ELequations1} \end{equation} If $L$ is a $d$-form, these equations are $(d-1)$-forms. Their content can be examined by expanding them along a complete basis of $(d-1)$-forms in $\tilde G$. Requiring the vanishing of the second term in the variation (\ref{Svariation1}) does not imply further equations besides the Euler-Lagrange field equations (\ref{ELequations1}): indeed this term vanishes on the shell of solutions of Euler-Lagrange equations. To prove it, notice that \begin{equation} \int_{\tilde G} L \wedge \ell_\xi \eta_M = - \int_{\tilde G} \ell_\xi L \wedge \eta_M =0 ~(on~shell) \label{onshell} \end{equation} because $\ell_\xi L$ is just a particular variation of $L$, under which the action remains stationary on-shell. Thus the group manifold variational principle leads to the field equations (\ref{ELequations1}), holding as $(d-1)$-form equations on the whole $\tilde G$. \vskip .4cm \noindent {\bf Note 1:} The variational principle {\it does not determine} the embedding of $M$ into $\tilde G$. \vskip .4cm \noindent {\bf Note 2:} the field equations (\ref{ELequations1}) are form equations, and therefore invariant under the action of a Lie derivative. More precisely, if $\phi$ is a solution of (\ref{ELequations1}), so is $\phi + \ell_\varepsilon \phi$: Lie derivatives generate symmetries of the field equations. \vskip .4cm \noindent Finally, we have the following \vskip .4cm \noindent {\bf Theorem:} $dL = 0 ~(on~shell) $ \vskip .4cm \noindent i.e. the Lagrangian, as a $d$-form on $\tilde G$, is closed on shell. To prove it recall that $\eta_M$ is closed , so that on shell we find, cf. (\ref{onshell}): \begin{equation} 0 = \int_{\tilde G} L \wedge \ell_\xi \eta_M = \int_{\tilde G} L \wedge d \iota_\xi \eta_M =- (-)^d \int_{\tilde G} dL \wedge \iota_\xi \eta_M \end{equation} $\xi$ being arbitrary, this implies $dL=0$ (on shell)\footnote{ In fact, this is just Stokes theorem applied to a region of $\tilde G$ bounded by two different hypersurfaces $M$ and $M'$.} \square Let us apply the preceding discussion to the superPoincar\'e supergravity example. \subsection{Supergravity field equations} The variational equations (\ref{ELequations1}) for the group manifold action (\ref{GintegralSP}) read: \eqa & & 2 R^{c} V^{d} \epsilon_{abcd} = 0 \label{sPoincarefieldeqRa}\\ & & 2 R^{ab} V^{c} \epsilon_{abcd} + 4 \bar \psi \gamma_5 \gamma_d \rho = 0 \label{sPoincarefieldeqRab} \\ & & 8 \gamma_5 \gamma_a \rho V^a -4 \gamma_5 \gamma_a \psi R^a=0 \label{sPoincarefieldeqrho} \end{eqnarray} obtained varying $\omega^{ab}, V^d$ and $\psi$ respectively. The analysis proceeds as follows: we first expand the curvatures on a basis of 2-forms\footnote{assuming horizontality in the Lorentz directions. This amounts to consider configurations satisfying the Lorentz horizontality constraints on the curvatures.} \eqa & & R^a = R^a_{~bc} V^b V^c + {\bar \theta}^a_{~c} \psi V^c + \bar \psi K^a \psi \\ & & R^{ab} = R^{ab}_{~~cd} V^c V^d + {\bar \theta}^{ab}_{~~c} \psi V^c + \bar \psi K^{ab} \psi \\ & & \rho= \rho_{ab} V^a V^b + H_c \psi V^c + \Omega_{\alpha\beta} \psi^\alpha \psi^\beta \end{eqnarray} and then insert them into the field equations (\ref{sPoincarefieldeqRa})-(\ref{sPoincarefieldeqrho}). These, being 3-form equations, can be expanded on the basis $\psi\psi\psi$, $\psi\psi V$, $\psi VV$, $VVV$. Their content is given below (the three lines correspond to the three eq.s of motion): \vskip .4cm \noindent$\psi\psi\psi$ sector: \eqa & &\Omega_{\alpha\beta} =0\\ & & 0=0\\ & & K^a=0 \end{eqnarray} $\psi\psi V$ sector: \eqa & & 2 \bar \psi K^{ab} \psi V^c \varepsilon_{abcd} + 4 \bar \psi \gamma_5 \gamma_d H_c \psi V^c=0 \label{psipsiV1}\\ & & ~~~~~~~~~~~~~~~~~~0=0\\ & & ~~~~~~~~~~~~~~~~~~{\bar \theta}^a_{~c}=0 \end{eqnarray} $\psi V V$ sector: \eqa & & 2 {\bar \theta}^{ab}_{~~e} \psi V^e V^c \varepsilon_{abcd} + 4 \bar \psi \gamma_5 \gamma_d \rho_{ab} V^a V^b =0 \label{psiVV1}\\ & & ~~~~~~~~~~~~~~~~~~0=0\\ & & \gamma_5 \gamma_a H_b \psi V^b V^a - 4 \gamma_5 \gamma_c \psi R^c_{~ab} V^a V^b =0 \label{psiVV3} \end{eqnarray} $V V V$ sector: \eqa & & R^a_{~bc}=0 \\ & & R^{ac} _{~~bc} - {1\over 2} \delta^a_b ~R^{cd}_{~~cd} =0 \\ & & \gamma^a \rho_{ab}=0 \end{eqnarray} Inserting $R^a_{~bc}=0$ into (\ref{psiVV3}) yields $H_c=0$, which used in (\ref{psipsiV1}) gives $K^{ab}=0$. Thus the only nontrivial relation in the ``outer" projections is (\ref{psiVV1}), that determines $\theta^{ab}_{~~c}$ to be \begin{equation} \theta^{ab}_{~~c}=-\varepsilon^{abef} \bar \rho_{ef} \gamma_5\gamma_c - \delta^{[a}_c \varepsilon^{b]efg} \bar \rho_{ef} \gamma_5\gamma_g \label{thetabar1} \end{equation} in agreement with the $\theta^{ab}_{~~c}$ obtained from the condition (\ref{rh2}). Thus we arrive at the same curvature parametrizations (\ref{param1})-(\ref{param3}) obtained in Sect. 3.1 by requiring spacetime supersymmetry invariance. Finally, the $VVV$ sector reproduces the (super)torsion equation, and the propagation equations for the vierbein and the gravitino. \vskip .4cm \noindent{\bf Note:} from the torsion equation \begin{equation} 2 R^a_{\mu\nu} \equiv \partial_\mu V^a_\nu - \partial_\nu V^a_\mu - \omega^a_{~b,\mu} V^b_\nu + \omega^a_{~b,\nu} V^b_\mu - i \bar \psi_\mu \gamma^a \psi_\nu =0 \end{equation} we can express the spin connection in terms of $V$ and $\psi$, recovering second order formalism: \eqa & & \omega_{ab,\mu} = {1 \over 2} V^\nu_a V^\rho_b \eta_{cd} ~(\partial_{[\mu} V_{\nu]}^c V_\rho^d - \partial_{[\mu} V_{\rho]}^c V_\nu^d + \partial_{[\nu} V_{\rho]}^c V_\mu^d) + \nonumber \\ & & ~~~~~~~~ + {i \over 4} V_a^\nu V_b^\rho (\bar \psi_\mu \gamma_\nu \psi_\rho+ \bar \psi_\nu \gamma_\rho \psi_\mu-\bar \psi_\rho \gamma_\mu \psi_\nu-(\nu \leftrightarrow \rho)) \label{omsecondorder} \end{eqnarray} \sect{Building rules} The group geometric approach provides a systematic set of building rules \cite{gm21} for constructing Lagrangians of supersymmetric theories: \vskip .4cm 1) Choose a Lie (super)algebra $G$, containing generators $P_a$ that can be associated to $d$ spacetime directions, and a Lorentz-like subalgebra $H$. Examples are the superPoincar\'e algebras in $d$ dimensions or their uncontracted versions (orthosymplectic superalgebras $OSp(N|2^{[d/2]})$). The fields of the theory are the vielbein components of the soft group manifold $\tilde G$. \vskip .4cm 2) Construct the most general $d$-form on $\tilde G$, by multiplying (with exterior products) 1-form vielbein components $\sigma^A$ and 2-form curvatures $R^A$, without bare Lorentz connection and contracting indices with $H$-invariant tensors, so that the resulting Lagrangian is a Lorentz scalar. \vskip .4cm 3) Require that the variational equations admit the ``vacuum solution" $R^A=0$, described by the vielbein of the rigid group manifold $G$. \vskip .4cm 4) The construction is greatly helped by scaling properties of the fields, dictated by the structure of the Lie (super)algebra $G$, or equivalently by the Cartan-Maurer equations for the $G$ vielbein. Consider for example the superPoincar\'e algebra: it is invariant under the rescalings $P_a \rightarrow \lambda P_a, M_{ab} \rightarrow M_{ab}, \overline{Q}_\alpha \rightarrow \lambda^{1\over 2} \overline{Q}_\alpha$. Then the curvature definitions (\ref{RasuperPoincare})-(\ref{rhosuperPoincare}) are invariant under \begin{equation} V^a \rightarrow \lambda V^a, ~~~\omega^{ab} \rightarrow \omega^{ab}, ~~~\psi \rightarrow \lambda^{1\over 2} \psi \label{rescalings1} \end{equation} The field equations must be invariant under these rescalings, and therefore the action must scale homogeneously under (\ref{rescalings1}). Since the Einstein-Hilbert term scales as $\lambda^2$, all terms must scale in the same way, and this restricts the candidate terms in the Lagrangian. \vskip .4cm 5) Finally, requiring that all terms have the same parity as the Einstein-Hilbert term further narrows the list of candidates. \vskip .4cm \subsection{The Lagrangian for $d=4$ supergravity} Following the above rules, one arrives at the $d=4$ supergravity action (\ref{GintegralSP}). We recall here the steps of the procedure \cite{gm21}. The most general lagrangian 4-form satisfying Rule 1 can at most contain two curvatures, and is therefore of the type: \begin{equation} L = R^A R^B \nu_{AB} + R^A \nu_A + \Lambda \end{equation} with\footnote{repeated indices are contracted with the Minkowski flat metric.} \begin{equation} R^A R^B \nu^{(2)}_{AB}=c_1 R^{ab} R^{cd} \varepsilon_{abcd}+c_2 R^{ab}R^{ab} +c_3R^a R^a +c_4 \bar\rho \rho+c_5 \bar\rho \gamma_5 \rho \label{quadratic} \end{equation} \noindent The first two are total derivatives, and are related to the Euler characteristic and to the Pontriagyn number of $M^4$. The last three can be reduced to linear terms in the curvatures plus total derivatives. Actually scaling invariance eliminates all the terms in (\ref{quadratic}) except $R^a R^a$, since the Einstein term scales as $\lambda^2$. The torsion-squared term can be reduced to a linear term since \begin{equation} R^a R^a = ({\cal D} V^a - {i \over 2} {\bar \psi} \gamma^a \psi)R^a=d(V^a R^a)+ V^a(-R^{ab}V^b+i{\bar \psi}\gamma^a\rho)-{i \over 2} {\bar \psi} \gamma^a \psi R^a \end{equation} \noindent in virtue of the Bianchi identity (\ref{BianchiRasuperPoincare}). This leaves us with a lagrangian of the form: \begin{equation} L=\Lambda+\nu_{ab} R^{ab} + \nu_a R^a + {\bar\nu} \rho \end{equation} \noindent where \eqa & &\Lambda=\alpha_1 \varepsilon_{abcd} V^aV^bV^cV^d+i\alpha_2\varepsilon_{abcd}{\bar \psi} \gamma^{ab} \psi V^cV^d + i\alpha_3 {\bar \psi} \gamma^{ab} \psi V^aV^b \\ & &\nu_{ab}= \beta_1 \varepsilon_{abcd} V^c V^d + \beta_2 V^a V^b + i \beta_3 {\bar \psi} \gamma_{ab} \psi + i \beta_4 \varepsilon_{abcd} {\bar \psi} \gamma^{cd} \psi \\ & &\nu_a=i \eta_1 {\bar \psi} \gamma_a \psi\\ & &\nu=\delta_1 \gamma_5 \gamma_a \psi V^a + i \delta_2 \gamma_a \psi V^a \end{eqnarray} \noindent are the most general Lorentz covariant terms. Notice that the only nonvanishing $\psi\psi$ currents are ${\bar \psi} \gamma^a \psi$ and ${\bar \psi} \gamma^ {ab} \psi$. Correct $\lambda^2$ scaling of $L $ drastically reduces the possible terms: $\alpha_1=\alpha_2=\alpha_3=\beta_3=\beta_4=0$. Moreover parity implies $\beta_2=\eta_1= \delta_2=0$ (all terms must have the same parity as the Einstein term $R^{ab} V^c V^d \varepsilon_{abcd}$, i.e. must be pseudoscalars). Thus we finally have: \begin{equation} L=\beta_1 \varepsilon_{abcd} R^{ab} V^c V^d + \delta_1 {\bar \psi} \gamma_5 \gamma_a \rho V^a \label{Lagrangian2} \end{equation} \noindent The requirement 3) that the vacuum be a solution of the field equations fixes the last parameter $a=\delta_1/ \beta_1$. Indeed the field equations obtained from the Lagrangian (\ref{Lagrangian2}) by varying $V^a, \omega^{ab}$ and $\psi$ are respectively: \eqa & & 2 R^{ab} V^c \varepsilon_{abcd} + a {\bar \psi} \gamma_5 \gamma_d \rho = 0 \\ & & 2{\cal D} V^c V^d \varepsilon_{abcd} + {1 \over 4} a {\bar \psi} \gamma_5 \gamma_d \gamma_{ab} \psi V^d =0 \label{fieldeq2} \\ & & 2a\gamma_5\gamma_a \rho V^a - a \gamma_5 \gamma_a \psi R^a = 0 \end{eqnarray} \noindent To find the first is immediate; for the second we only have to recall that varying $\omega^{ab}$ in $R^ {ab}$ yields $\delta R^{ab}={\cal D}(\delta \omega^{ab})$, and that by integrating by parts the Lorentz covariant derivative ${\cal D}$ can be transferred on $V^a$. Finally for the gravitino variation we have \eqa & & {1 \over a} \delta {\cal L} = (\delta {\bar \psi}) \gamma_5 \gamma_a {\cal D} \psi V^a + {\bar \psi} \gamma_5 \gamma_a {\cal D} (\delta \psi) V^a =\\ & & =(\delta {\bar \psi}) \gamma_5 \gamma_a {\cal D} \psi V^a+ {\bar \psi} \gamma_5 \gamma_a \delta\psi {\cal D} V^a+ \delta{\bar \psi} \gamma_5 \gamma_a {\cal D} \psi V^a=\\ & & = 2 (\delta {\bar \psi}) \gamma_5 \gamma_a {\cal D} \psi V^a - \delta {\bar \psi} \gamma_5 \gamma_a \psi ( R^a + {i \over 2} {\bar \psi} \gamma^a \psi ) =\\ & &=(\delta {\bar \psi}) (2 \gamma_5 \gamma_a {\cal D} \psi V^a-\gamma_5 \gamma_a \psi R^a)\\ \end{eqnarray} \noindent in virtue of ${\bar \psi} \gamma_5 \gamma_a (\delta\psi)=-(\delta {\bar \psi}) \gamma_5 \gamma_a \psi$ and the Fierz identity \begin{equation} \gamma_a \psi {\bar \psi} \gamma^a \psi=0 \end{equation} \noindent Note that using the gamma-algebra identity: \begin{equation} \gamma_5 \gamma_d \gamma_{ab}=2\gamma_5 \delta_{d[a} \gamma_{b]}-i \varepsilon_{abcd} \gamma^c \end{equation} \noindent the variational equation (\ref{fieldeq2}) can be recast in the form: \begin{equation} 2R^cV^d\varepsilon_{abcd}+{i \over 4} (4-a) {\bar \psi} \gamma_5 \gamma_d \gamma_{ab} \psi V^d =0 \end{equation} \noindent so that the vacuum, defined by vanishing curvatures, is a solution of the field equations (5.21) only if $a=4$. \vskip .4cm In conclusion: applying the building rules with $G$ = superPoincaré yields the $N=1$, $d=4$ supergravity action (\ref{GintegralSP}). \sect{Conclusions} In this review we have focused mostly on the logic of the group manifold approach, applied to a single example, i.e. $N=1$, $d=4$ supergravity. Comprehensive discussions on the applications of the method for the construction of supergravity theories in diverse dimensions can be found in the recent reviews \cite{gm24,gm25}. \vskip .4cm \noindent We list here some of the advantages/motivations: \vskip .4cm \noindent - all fields have a group-geometric origin, even if they are not all gauge fields. \noindent - all symmetries have a common origin as diffeomorphisms on $\tilde G$. \noindent - there is a systematic procedure based on group geometry to construct actions, invariant under diffeomorphisms, and under gauge symmetries closing on a subgroup of $G$. \noindent - supersymmetry is formulated in a very natural way as a diffeomorphism in Grassmann directions of a supermanifold. \noindent - closer contact is maintained with the usual component actions, whereas in the superfield formalism the actions looks quite different. In fact the group manifold action interpolates between the component and the superfield actions of the same supergravity theory, see \cite{if1}-\cite{if3}. \noindent - in the group manifold formulation of $d=6$ supergravity \cite{d6SG} and $d=10$ supergravity \cite{d10SG} the selfdual conditions for the 3-form (in $d=6$) and 5-form (in $d=10$) curvatures are a yield of the field equations in the respective superspaces, and do not need to be imposed as external constraints. \vskip .4cm \noindent Finally, we recall some conceptual advances due to the group-geometric treatment of supergravity: \vskip .4cm \noindent - the generalization to $p$-form potentials, necessary to treat supergravity theories with $p$-form fields, in the framework of Free Differential Algebras (FDA) \cite{sullivan,DFd11,gm21,DFTvN,FDAnew1}, and their dual formulation \cite{FDAdual1} - \cite{FDAdual4}. \noindent - the bridge between superspace and group manifold methods provided by superintegration, developed in ref.s \cite{if1}-\cite{if3}. \noindent - a covariant hamiltonian formalism, initially proposed in \cite{CCF1}-\cite{CCF3}, based on the definition of field momenta as derivatives of the Lagrangian with respect to the exterior derivative of the fields, not involving a preferred direction (time). Recent developments \cite{CD,SGcovariantH} include the construction of all canonical symmetry generators for $N=1$, $d=4$ supergravity \cite{SGcovariantH}. This covariant hamiltonian formalism can also be generalized to a noncommutative (twisted) setting \cite{LCtwistedH}, describing noncommutative twisted (super)gravity \cite{AC1,AC2}. \section*{Acknowledgements} In writing this review we have benefited from discussions with Carlo Alberto Cremonini, Riccardo D' Auria and Pietro Antonio Grassi. We acknowledge partial support from INFN, CSN4, Iniziativa Specifica GSS. This research has a financial support from Universit\`a del Piemonte Orientale.
1,116,691,501,338
arxiv
\section{Algorithms} \label{sec:algorithms} In this section, we discuss computational aspects of deciding our bisimulation. Since $\sim$ is a relation over distributions over the system's state space, it is uncountably infinite even for simple finite systems, which makes it in principle intricate to decide. Fortunately, the bisimulation relation has a linear structure, and this allows us to employ methods of linear algebra to work with it effectively. Moreover, important classes of continuous-space systems can be dealt with, since their structure can be exploited. We exemplify this on a subset of deterministic stochastic automata, for which we are able to provide an algorithm to decide bisimilarity. \subsection{Finite systems -- greatest fixpoints} \label{sec:algorithms-finite} Let us fix a PA $(\mathit{S}, \mathit{L}, \longrightarrow)$. We apply the standard approach by starting with $\mathcal{D}(\mathit{S}) \times \mathcal{D}(\mathit{S})$ and pruning the relation until we reach the fixpoint $\sim$. In order to represent $\sim$ using linear algebra, we identify a distribution $\mu$ with a vector $(\mu(s_1),\ldots,\mu(s_{|\mathit{S}|}))\in\mathbb{R}^{|\mathit{S}|}$. Although the space of distributions is uncountable, we construct an implicit representation of $\sim$ by a system of equations written as columns in a matrix $E$. \begin{definition} A matrix $E$ with $|\mathit{S}|$ rows is a \emph{bisimulation matrix} if for some bisimulation $R$, for any distributions $\mu,\nu$ $$\mu\, R\,\nu \;\;\;\text{iff}\;\;\; (\mu-\nu)E=0.$$ \end{definition} \noindent For a bisimulation matrix $E$, an equivalence class of $\mu$ is then the set $(\mu+\{\rho\mid\rhoE=0\})\cap\mathcal{D}(\mathit{S})$, the set of distributions that are equal modulo $E$. \begin{example}\label{ex:bisim-matrix} The bisimulation matrix $E$ below encodes that several conditions must hold for two distributions $\mu,\nu$ to be bisimilar. Among others, if we multiply $\mu-\nu$ with e.g.\ the second column, we must get $0$. This translates to $(\mu(v)-\nu(v))\cdot1=0$, i.e. $\mu(v)=\nu(v)$. Hence for bisimilar distributions, the measure of $v$ has to be the same. This proves that $u\not\sim v$ (here we identify states and their Dirac distributions). Similarly, we can prove that $\;t \;\sim\; \frac{1}{2} t' + \frac{1}{2} t''$. Indeed, if we multiply the corresponding difference vector $(0,0,1,-\frac12,-\frac12,0,0)$ with any column of the matrix, we obtain $0$. \begin{tikzpicture}[outer sep=0.1em,->, xscale=0.9, yscale=1.2, state/.style={draw,circle,minimum size=1.6em,inner sep=0.1em}] \begin{scope} \node[state] (s) at (0,0.5) {$s$}; \node[state] (t) at (1,0.5) {$t$}; \node[state] (u) at (2,1) {$u$}; \node[state] (v) at (2,0) {$v$}; \path[->] (s) edge node[above] {$a$} (t) (t) edge node[above]{\textonehalf} node[below]{$a$}(u) (t) edge node[below]{\textonehalf} (v) (u) edge[loop above,looseness=4] node[right]{$b$} (u) (v) edge[loop above,looseness=4] node[right]{$c$} (v) ; \node[state] (s') at (4,0.5) {$s'$}; \node[state] (t1) at (3,1) {$t'$}; \node[state] (t2) at (3,0) {$t''$}; \path[->] (s') edge node[below] {$a$} node[above]{\textonehalf} (t1) (s') edge node[below]{\textonehalf} (t2) (t1) edge node[below] {$a$} (u) (t2) edge node[below] {$a$} (v) ; \end{scope} \begin{scope}[xshift=20em,yshift=16,scale=0.8, every node/.style={scale=0.8},] \node [font=\small,gray] at(-2.3,.05){ $\begin{array}{c} {s:} \\ {s':} \\ {\footnotesize t:} \\ {\footnotesize t':} \\ {\footnotesize t'':} \\ {\footnotesize u:} \\ {\footnotesize v:} \end{array}$}; \node [font=\small] at(0,0){ $\left(\begin{array}{ccccc} 1&0&0&0&0\\ 1&0&0&0&0\\ 1&0&0&\text{\textonehalf}&\text{\textonehalf}\\ 1&0&0&0&1\\ 1&0&0&1&0\\ 1&0&1&0&0\\ 1&1&0&0&0 \end{array}\right)$}; \end{scope} \end{tikzpicture} \end{example} Note that the unit matrix is always a bisimulation matrix, not relating anything with anything but itself. For which bisimulations do there exist bisimulation matrices? We say a relation $R$ over distributions is \emph{linear} if $\mu R \nu$ and $\mu' R \nu'$ imply $\big(p\mu+(1-p)\mu'\big)\;R\;\big( p\nu+(1-p)\nu'\big)$ for any $p\in[0,1]$. \begin{lemma}\label{lem:existence} For every linear bisimulation there exists a corresponding bisimulation matrix. \end{lemma} \jena{no proof sketches? I would then state it as one lemma and give this splitting into two parts as a two-line sketch} Since $\sim$ is linear (see the appendix), there is a bisimulation matrix corresponding to $\sim$. It is a least restrictive bisimulation matrix $E$ (note that all bisimulation matrices with the least possible dimension have identical solution space), we call it \emph{minimal bisimulation matrix}. We show that the necessary and sufficient condition for $E$ to be a bisimulation matrix is \emph{stability} with respect to transitions. \begin{definition} For a $|\mathit{S}|\times|\mathit{S}|$ matrix $P$, we say that a matrix $E$ with $|\mathit{S}|$ rows is \emph{$P$-stable} if for every $\rho\in\mathbb{R}^{|\mathit{S}|}$, \begin{align} \rho E=0 \implies \rho P E=0 \end{align} \end{definition} \noindent We first briefly explain the stability in a simpler setting. \subsubsection{Action-deterministic systems.} Let us consider PA where in each state, there is at most one transition. For each $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$, we let $P_a=(p_{ij})$ denote the transition matrix such that for all $i,j$, if there is (unique) transition $s_i\patransa{a} \mu$ we set $p_{ij}$ to $\mu(s_j)$, otherwise to $0$. Then $\mu$ evolves under $a$ into $\mu P_a$. Denote $\vec{1}=(1,\ldots,1)^\top$. \begin{proposition}\label{prop-alg-determ} In an action-deterministic PA, $E$ containing $\vec 1$ is a bisimulation matrix iff it is $P_a$-stable for all $a\in\mathit{L}$. \end{proposition} To get a minimal bisimulation matrix $E$, we start with a single vector $\vec{1}$ which stands for an equation saying that the overall probability mass in bisimilar distributions is the same. Then we repetitively multiply all vectors we have by all the matrices $P_a$ and add each resulting vector to the collection if it is linearly independent of the current collection, until there are no changes. In Example \ref{ex:bisim-matrix}, the second column of $E$ is obtained as $P_c\vec1$, the fourth one as $P_a(P_c\vec1)$ and so on. The set of all columns of $E$ is thus given by the described iteration $$\{P_a\mid a\inL}%{\mathbb{A}\mathrm{ct}^\tau\}^*\vec{1}$$ modulo linear dependency. Since $P_a$ have $|\mathit{S}|$ rows, the fixpoint is reached within $|\mathit{S}|$ iterations yielding $1\leq d\leq|\mathit{S}|$ equations\jena{note that \# of iters does not equal \# of equations}. Each class then forms an $(|\mathit{S}|-d)$-dimensional affine subspace intersected with the set of probability distributions $\mathcal{D}(\mathit{S})$. This is also the principle idea behind the algorithm of~\cite{DBLP:journals/siamcomp/Tzeng92} and~\cite{DBLP:journals/ijfcs/DoyenHR08}. \subsubsection{Non-deterministic systems.} \newcommand{P_A^W}{P_A^W} \newcommand{P_A^{W(c)}}{P_A^{W(c)}} \newcommand{C}{C} \newcommand{\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})}{\mathcal E(C) In general, for transitions under $A$, we have to consider $c_i^A$ non-deterministic choices in each $s_i$ among all the outgoing transitions under some $a\in A$. We use variables $w_i^j$ denoting the probability that $j$-th transition, say $(s_i,a_i^j,\mu_i^j)$, is taken by the scheduler/player\footnote{ We use the standard notion of Spoiler-Duplicator bisimulation game (see e.g.~\cite{Sangiorgi:2011:ATB:2103601}) where in $\{\mu_0,\mu_1\}$ Spoiler chooses $i\in\{0,1\},A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$, and $\mu_i \tran{A} \mu_i'$, Duplicator has to reply with $\mu_{1-i}\tran{A}\mu_{1-i}'$ such that $\mu_i(\mathit{S}_A) = \mu_{i-1}(\mathit{S}_A)$, and the game continues in $\{\mu_0',\mu_1'\}$. Spoiler wins iff at some point Duplicator cannot reply. } in $s_i$. We sum up the choices into a ``non-deterministic'' transition matrix $P_A^W$ with parameters $W$ whose $i$th row equals $\sum_{j=1}^{c_i^A} w^j_i \mu_i^j$. It describes where the probability mass moves from $s_i$ under $A$ depending on the collection $W$ of the probabilities the player gives each choice. By $\mathcal W_A$ we denote the set of all such $W$. A simple generalization of the approach above would be to consider $\{P_A^W\mid A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau, W\in\mathcal W_A\}^* \vec{1}$. However, firstly, the set of these matrices is uncountable whenever there are at least two transitions to choose from. Secondly, not all $P_A^W$ may be used as the following example shows. \begin{example} In each bisimulation class in the following example, the probabilities of $s_1 + s_2$, $s_3$, and $s_4$ are constant, as can also be seen from the bisimulation matrix $E$, similarly to Example \ref{ex:bisim-matrix}. Further, $E$ can be obtained as $(\vec{1}\;\, P_c \vec{1} \;\, P_b \vec{1})$. Observe that $E$ is $P_{\{a\}}^{W}$-stable for $W$ that maximizes the probability of going into the ``class'' $s_3$ (both $s_1$ and $s_2$ go to $s_3$, i.e. $w_1^1 = w_2^1 = 1$); similarly for the ``class''~$s_4$. \noindent \begin{center} \begin{tikzpicture}[x=2.5cm,y=1.2cm,outer sep=1mm,yscale=0.8, state/.style={circle,draw,minimum size=1.6em,inner sep=0.1em}, trans/.style={font=\scriptsize,->}] \node[state] (s) at (0,0) {$s_1$}; \node[state] (t) at (0,-1) {$s_2$}; \node[state] (u) at (0.6,0) {$s_3$}; \node[state] (v) at (0.6,-1) {$s_4$}; \path[trans] (s) edge node[pos=0.3,trans,above=-2]{$a$} (u); \path[trans] (s) edge node[pos=0.3,trans,above=-2]{$a$} (v); \path[trans] (t) edge node[pos=0.3,trans,below]{$a$} (u); \path[trans] (t) edge node[pos=0.3,trans,below]{$a$} (v); \path[trans] (u) edge[loop right,looseness=5] node[trans,below]{$b$} (u); \path[trans] (v) edge[loop right,looseness=5] node[trans,above]{$c$} (v); \begin{scope}[scale=0.8, every node/.style={scale=0.8},] \node at(2.5,-0.7){ $P_{\{a\}}^W=\left(\begin{array}{cccc} 0&0&w_1^1&w_2^2\\ 0&0&w_2^1&w_2^2\\ 0&0&0&0\\ 0&0&0&0 \end{array}\right)$}; \node at(4.4,-0.7){ $E=\left(\begin{array}{ccc} 1&0&0\\ 1&0&0\\ 1&0&1\\ 1&1&0 \end{array}\right)$}; \end{scope} \end{tikzpicture} \end{center} However, for $W$ with $w_1^1\neq w_2^1$, e.g.\ $s_1$ goes to $s_3$ and $s_2$ goes with equal probability to $s_3$ and $s_4$ ($w_1^1=1, w_2^1=w_2^2 = \frac{1}{2}$), we obtain from $P_{\{a\}}^{W}E$ a new independent vector $(0,0.5,0,0)^\top$ enforcing a partition finer than $\sim$. This does not mean that Spoiler wins the game when choosing such mixed $W$ in some $\mu$, it only means that Duplicator needs to choose a \emph{different} $W'$ in a bisimilar $\nu$ in order to have $\mu P_A^W \sim \nu P_A^{W'}$ for the successors. \end{example} A fundamental observation is that we get the correct bisimulation when Spoiler is restricted to finitely many ``extremal'' choices and Duplicator is restricted for such extremal $W$ to respond only with the very same $W$. \jena{from here on extremely technical, at least correct hopefully} To this end, consider $M_A^W=P_A^W E$ where $E$ is the current matrix with each of $e$ columns representing an equation. Intuitively, the $i$th row of $M_A^W$ describes how much of $s_i$ is moved to various classes when a step is taken. Denote the linear forms in $M_A^W$ over $W$ by $m_{ij}$. Since the players can randomize and mix choices which transition to take, the set of vectors $\{(m_{i1}(w_i^1,\ldots,w_i^{c_i}),\ldots,m_{ib}(w_i^1,\ldots,w_i^{c_i}))\mid w_i^1,\ldots,w_i^{c_i}\geq 0,\sum_{j=1}^{c_i}w_i^j=1\}$ forms a convex polytope denoted by $C_i$. Each vector in $C_i$ is thus the $i$th row of the matrix $M_A^W$ where some concrete weights $w_i^j$ are ``plugged in''. This way $C_i$ describes all the possible choices in $s_i$ and their effect on where the probability mass is moved. Denote vertices (extremal points) of a convex polytope $P$ by $\mathcal E(P)$. Then $\mathcal E(C_i)$ correspond to pure (non-randomizing) choices that are ``extremal'' w.r.t.~$E$. Note that now if $s_j\sim s_k$ then $C_j=C_k$, or equivalently $\mathcal E(C_j)=\mathcal E(C_k)$. Indeed, for every choice in $s_j$ there needs to be a matching choice in $s_k$ and vice versa. However, since we consider bisimulation between generally non-Dirac distributions, we need to combine these extremal choices. We define the set $\mathcal E(C)\subseteq\prod_{i=1}^{|\mathit{S}|}\mathcal E(C_i)$ to contain a tuple\jena{sounds like convex combinations, puzzled me a bit} $c=(c_1\ \cdots\ c_{|\mathit{S}|})$ iff the $c_i$'s are ``extremal in (some) same direction'', i.e. $\sum_{i=1}^{|\mathit{S}|} c_i$ is a vertex (extremal choice) of the polytope generated by points $\{\sum_{i=1}^{|\mathit{S}|}c_i'\mid\forall i: c_i'\in C_i\}$. Each $c\in\mathcal E(C)$ is a tuple of vertices, and thus corresponds to particular choices, denoted by $W(c)$. \jena{would prefer to have the algorithm top aligned on the page; otherwise, put it between Prop 2 and Thm 2. Here it interrupts the flow of the explanation.} \begin{proposition}\label{prop-alg-nondeterm} Let $E$ be a matrix containing $\vec 1$. It is a bisimulation matrix iff it is $P_A^{W(c)}$-stable for all $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ and $c\in\mathcal E(C)$. \end{proposition} \begin{algorithm}[ht] \vspace*{-1em} \SetAlgoLined \DontPrintSemicolon \SetKwInOut{Parameter}{parameter}\SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output} \SetKwData{C}{C}\SetKwData{D}{D}\SetKwData{MX}{M}\SetKwData{f}{f} \Input{Probabilistic automaton $(\mathit{S},L}%{\mathbb{A}\mathrm{ct}^\tau,\tran{})$} \Output{A minimal bisimulation matrix $E$} \BlankLine \ForEach{$A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$} {compute $P_A^W$ \hfill\texttt{// }\textsf{non-deterministic transition matrix~~~~~}} $E\gets(\vec{1})$\; \Repeat{$E$ does not change} { \ForEach{$A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$} { $M_A^W\gets P_A^WE$ \hfill~~~~~~\texttt{// }\textsf{polytope of all choices}\; compute $\mathcal E(C)$ from $M_A^W$ \hfill\texttt{// }\textsf{vertices, i.e. extremal choices}\; \ForEach{$c\in\mathcal E(C)$}{ $M_A^{W(c)}\gets M_A^W$ with values $W(c)$ plugged in\; $E_{new}\gets$columns of $M_A^{W(c)}$ linearly independent of columns of $E$\; $E\gets(E\ E_{new})$\; } } } \BlankLine \caption{Bisimulation on probabilistic automata} \label{alg-fin} \end{algorithm} \begin{theorem}\label{thm:algorithm-finite} Algorithm~\ref{alg-fin} computes a minimal bisimulation matrix. \end{theorem} The running time is exponential. We leave the question whether linear programming or other methods \cite{DBLP:conf/fsttcs/HermannsT12} can yield $E$ in polynomial time open. The algorithm can easily be turned into one computing other bisimulation notions from the literature, for which there were no algorithms so far, see Section \ref{sec:rw}. \subsection{Continuous-time systems - least fixpoints} \label{sec:algorithms-infinite} Turning our attention to continuous systems, we finally sketch an algorithm for deciding bisimulation $\sim$ over a subclass of stochastic automata, this constitutes the first algorithm to compute a bisimulation on the uncountably large semantical object. We need to adopt two restrictions. First, we consider only \emph{deterministic} SA, where the probability that two edges become enabled at the same time is zero (when initiated in any location). Second, to simplify the exposition, we restrict all distributions occurring to exponential distributions. Notably, even for this class, our bisimulation is strictly coarser than the one induced by standard bisimulations~\cite{pepa,HHM,ymca} for continuous-time Markov chains. At the end of the section we discuss possibilities for extending the class of supported distributions. Both the restrictions can be effectively checked on SA. \begin{theorem}\label{thm:tableau} Let $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ be a deterministic SA over exponential distributions. There is an algorithm to decide in time polynomial in $|\mathcal{S}|$ and exponential in $|\mathcal{C}|$ whether $q_1 \sim q_2$ for any locations $q_1,q_2$. \end{theorem} The rest of the section deals with the proof.\holger{I can broadly follow what is happening here.} We fix $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ and $q_1,q_2 \in \mathcal{Q}$. First, we straightforwardly abstract the NLMP semantics $\mathbf{P}_\mathcal{S}$ by a NLMP $\hat{\pts}$ over state space $\hat{\states} = \mathcal{Q} \times (\mathbb{R}_{\ge 0} \cup \{-\})^\mathcal{C}$ where all negative values of clocks are expressed by one element $-$. Let $\xi$ denote the obvious mapping of distributions $\mathcal{D}(S)$ onto $\mathcal{D}(\hat{\states})$. Then $\xi$ preserves bisimulation since two states $s_1,s_2$ that differ only in negative values satisfy $\xi(\tau_a(s_1)) = \xi(\tau_a(s_2))$ for all $a\in\mathit{L}$. \begin{lemma}\label{lem:abs} For any distributions $\mu, \nu$ on $S$ we have $\mu \sim \nu$ iff $\xi(\mu) \sim \xi(\nu)$. \end{lemma} Second, similarly to an embedded Markov chain of a CTMC, we further abstract the NLMP $\hat{\pts}$ by a \emph{finite} deterministic PA $\bar{\PA} = (\bar{S}, \mathcal{A}, \longrightarrow)$ such that each state of $\bar{\PA}$ is a distribution over the uncountable state space $\hat{\states}$. \begin{itemize} \item The set $\bar{S}$ is the set of states reachable via the transitions relation defined below from the distributions $\mu_1, \mu_2$ corresponding to $q_1$, $q_2$ (see Definition~\ref{def:bisim-sta}). \item Let us fix a state $\mu \in \bar{S}$ (note that $\mu\in\mathcal{D}}%{\mathcal{M}(\hat{\states})$) and an action $a\in\mathcal{A}$ such that in the NLMP $\hat{\pts}$ an $a$-transition occurs with positive probability, i.e. $\mu \patransa{A_a} \nu$ for some $\nu$ and for $A_a = \{a\}\times \mathbb{R}_{\ge 0}$. Thanks to restricting to deterministic SA, $\hat{\pts}$ is also deterministic and such a distribution $\nu$ is uniquely defined. We set $(\mu,a,M) \in \; \longrightarrow$ where $M$ is the discrete distribution that assigns probability $p_{q,f}$ to state $\nu_{q,f}$ for each $q\in\mathcal{Q}$ and $f: \mathcal{C} \to \{-,+\}$ where $p_{q,f} = \nu(\hat{\states}_{q,f})$, $\nu_{q,f}$ is the conditional distribution $\nu_q(X) := \nu(X \cap \hat{\states}_{q,f})/\nu(\hat{\states}_{q,f})$ for any measurable $X \subseteq \hat{\states}$, and $\hat{\states}_{q,f} = \{(q',v)\in\hat{\states} \mid q' = q, \text{$v(c) \geq 0$ iff $f(c) = +$ for each $c\in\mathcal{C}$}\}$ the set of states with location $q$ and where the sign of clock values matches $f$. \end{itemize} For exponential distributions all the reachable states $\nu \in \bar{S}$ correspond to some location $q$ where the subset $X\subseteq \mathcal{C}$ is newly sampled, hence we obtain: \begin{lemma}\label{lem:expo-finite} For a deterministic SA over exponential distributions, $|\bar{S}| \leq |\mathcal{Q}|2^{|\mathcal{C}|}$. \end{lemma} Instead of a greatest fixpoint computation as employed for the discrete algorith , we take a complementary approach and prove or disprove bisimilarity by a least fixpoint procedure. We start with the initial pair of distributions (states in $\bar{\PA}$) which generates further requirements that we impose on the relation and try to satisfy them. We work with a \emph{tableau}, a rooted tree where each node is either an \emph{inner node} with a pair of discrete probability distributions over states of $\bar{\PA}$ as a label, a \emph{repeated node} with a label that already appears somewhere between the node and the root, or a \emph{failure node} denoted by $\square$, and the children of each inner node are obtained by one \emph{rule} from $\{\tableau{Step}, \tableau{Lin} \}$. A tableau not containing $\square$ is \emph{successful}. \begin{description} \item[\tableau{Step}] For a node $\mu \sim \nu$ where $\mu$ and $\nu$ have \emph{compatible timing}, we add for each label $a\in\mathit{L}$ one child node $\mu_a \sim \nu_a$ where $\mu_a$ and $\nu_a$ are the unique distributions such that $\mu \patransa{a} \mu_a$ and $\nu \patransa{a} \nu_a$. Otherwise, we add one failure node. We say that $\mu$ and $\nu$ have compatible timing if for all actions $a\in\mathcal{A}$ we have $\mu(\mathit{S}_{A_a}) = \nu(\mathit{S}_{A_a})$ and if for all actions $a\in\mathcal{A}$ with $\mu(\mathit{S}_{A_a}) >0$ we have that $\mu$ restricted to $\mathit{S}_{A_a}$ is equivalent to $\nu$ restricted to $\mathit{S}_{A_a}$. \item[\tableau{Lin}] For a node $\mu \sim \nu$ linearly dependent on the set of remaining nodes in the tableau, we add one child (repeat) node $\mu \sim \nu$. Here, we understand each node $\mu \sim \nu$ as a vector $\mu - \nu$ in the $|\mathit{S}_\mathcal{S}|$-dimensional vector space. \end{description} \noindent Note that compatibility of timing is easy to check. Furthermore, the set of rules is correct and complete w.r.t. bisimulation in $\hat{\pts}$. \begin{lemma}\label{prop:correctness} There is a successful tableau from $\mu \sim \nu$ iff $\mu \sim \nu$ in $\hat{\pts}$. Moreover, the set of nodes of a successful tableau is a subset of a bisimulation. \end{lemma} \noindent We get Theorem~\ref{thm:tableau} since $q_1 \sim q_2$ iff $\xi(\mu_1) \sim \xi(\mu_2)$ in $\hat{\pts}$ and since, thanks to \tableau{Lin}: \begin{lemma}\label{lem:finite-tableau} There is a successful tableau from $\mu \sim \nu$ iff there is a finite successful tableau from $\mu \sim \nu$ of size polynomial in $|\bar{S}|$. \end{lemma} \begin{example}\label{ex:tableau} Let us demonstrate the rules by a simple example. Consider the following stochastic automaton $\mathcal{S}$ on the left. \vspace{-0.7em} \begin{center} \begin{tikzpicture}[x=2.5cm,y=1.2cm,outer sep=1mm, state/.style={draw,circle, inner sep =0.4em, text centered}, trans/.style={font=\scriptsize}, prob/.style={font=\scriptsize} ] \begin{scope} \node[state] (s) at (0,0) {$q$}; \node[state] (t) at (0.8,0) {$u$}; \node[state] (u) at (1.7,0) {$v$}; \node[above=0,prob] at (s.north) {$x:=\mathrm{Exp}(1/2)$}; \node[above=-8,prob] at (s.north) {$y:=\mathrm{Exp}(1/2)$}; \node[above=-8,prob] at (t.north) {$x:=\mathrm{Exp}(1)$}; \node[above=-8,prob] at (u.north) {$x:=\mathrm{Exp}(1)$}; \path[->] (s) edge[loop left,in=205,out=165,looseness=5] node[prob,below=-2,pos=0.7]{$x = 0$} node[prob,below=-3,pos=0.1]{$a$} (s); \path[->] (s) edge[] node[prob,above=-4] {$a$} node[prob,below=-4] {$y=0$} (t); \path[->] (t) edge[loop right,in=25,out=345,looseness=5] node[prob,below=-2,pos=0.3]{$x = 0$} node[prob,above=-3,pos=0.1]{$a$} (t); \path[->] (u) edge[loop right,in=25,out=345,looseness=5] node[prob,below=-2,pos=0.3]{$x = 0$} node[prob,above=-3,pos=0.1]{$a$} (u); \draw[dotted,thick] (2.4,0.5) -- (2.4,-0.3); \end{scope} \begin{scope}[xshift=200] \node[state,inner sep =0.2em] (s) at (0,0) {$\mu_q$}; \node[state,inner sep =0.2em] (t) at (0.75,0) {$\mu_u$}; \node[state,inner sep =0.2em] (u) at (1.25,0) {$\mu_v$}; \node[above left=-8,prob] at (s.north west) {}; \node[above right=-8,prob] at (t.north) {}; \node[above right=-8,prob] at (u.north) {}; \path[->] (s) edge[loop,in=60,out=0,looseness=5] node[above left=-4,pos=0.1,prob]{$a$} node[pos=0.2,name=x,inner sep=0,outer sep=0]{} node[above=-3,pos=0.8,prob] {$0.5$} (s); \path[->] (x) edge[bend left] node[prob,above=-4] {$0.5$} (t); \path[->] (t) edge[loop above,looseness=5] node[prob,right=-4,pos=0.1]{$a$} (t); \path[->] (u) edge[loop above,looseness=5] node[prob,right=-4,pos=0.1]{$a$} (u); \end{scope} \end{tikzpicture} \end{center} \vspace{-0.7em} Thanks to the exponential distributions, $\bar{\PA}$ on the right has also only three states where $\mu_q = q \otimes Exp(1/2) \otimes Exp(1/2)$ is the product of two exponential distributions with rate $1/2$, $\mu_u = u\otimes Exp(1)$, and $\mu_v = v \otimes Exp(1)$. Note that for both clocks $x$ and $y$, the probability of getting to zero first is $0.5$. \vspace{0.2em} \begin{center} \begin{tikzpicture}[thin,scale=0.8, every node/.style={scale=0.8}] \begin{scope}[xshift=0em,yshift=-2em] \node (a1) at (0,0) {$ 1 \cdot \mu_u \; \sim \; 1 \cdot \mu_v $}; \draw (a1.south east) -- (a1.south west); \node [label,right] at (a1.south east) {\tableau{Step}}; \node (a2) at (0,-0.7) {$ 1 \cdot \mu_u \; \sim \; 1 \cdot \mu_v $}; \end{scope} \begin{scope}[xshift=20em] \node (a1) at (0,0) {$ 1 \cdot \mu_q + 0 \cdot \mu_u \; \sim \; 1 \cdot \mu_v $}; \node (a2) at (0,-0.7) {$ \frac{1}{2} \cdot \mu_q + \frac{1}{2} \cdot \mu_u \; \sim \; 1 \cdot \mu_v $}; \node (a3) at (0,-1.4) {$ \frac{1}{4} \cdot \mu_q + \frac{3}{4} \cdot \mu_u \; \sim \; 1 \cdot \mu_v $}; \node (a4) at (0,-2.1) {$\cdots $}; \draw (a1.south east) -- (a1.south west); \node [label,right] at (a1.south east) {\tableau{Step}}; \draw (a2.south east) -- (a2.south west); \node [label,right] at (a2.south east) {\tableau{Step}}; \draw (a3.south east) -- (a3.south west); \node [label,right] at (a3.south east) {\tableau{Step}}; \end{scope} \end{tikzpicture} \end{center} \vspace{-1em} \noindent The finite tableau on the left is successful since it ends in a repeated node, thus it proves $u \sim v$. The infinite tableau on the right is also successful and proves $q\sim v$. When using only the rule \tableau{Step}, it is necessarily infinite as no node ever repeats. The rule \tableau{Lin} provides the means to truncate such infinite sequences. Observe that the third node in the tableau on the right above is linearly dependent on its ancestors. \end{example} \begin{remark} Our approach can be turned into a complete proof system for bisimulation on models with \emph{expolynomial} distributions~\footnote{With density that is positive on an interval $[\ell,u)$ for $\ell \in \Nset_0$, $u \in \mathbb{N} \cup \{\infty\}$ given piecewise by expressions of the form $\sum_{i=0}^I \sum_{j=0}^J a_{ij} x^i e^{-\lambda_{ij}x}$ for $a_{ij},\lambda_{ij} \in \mathbb{R} \cup \{\infty\}$. This class contains many important distributions such as exponential, or uniform, and enables efficient approximation of others.}. Thanks to their properties, the states of the discrete transition system $\bar{\PA}$ can be expressed symbolically. In fact, we conjecture that the resulting semi-algorithm can be twisted to a decision algorithm for this expressive class of models. Being technically demanding, it is out of scope of this paper. \iffalse \todo{Jan: I would stop right here} Further, there are several obstacles: \begin{itemize} \item The condition defining \emph{compatible timing} seems to be more elaborate as some parts of the domain of the distribution may not play a role (e.g. the distribution of action $a$ beyond time $2$ if $b$ occurs within in at most $2$ time units and resets the clock triggering $a$). \item $\bar{\PA}$ can be infinite. Either, some restrictions on the regenerative structure~\cite{haas2002stochastic} of the SA need to be imposed; or some finite patterns in the infinitely many distributions need to be found. \item The transition probabilities in $\bar{\PA}$ are in general sums of the form $\sum_{i=0}^I a_i e^{-b_i}$ for $a_i,b_i$ rational. Then the finite dimension exploited by our approach does not apply here and additional arguments for termination need to be found. \end{itemize} Even with some of these obstacles unsolved, our approach can lead to a semi-algorithm giving a complete proof system for bisimulation on these models. \fi \end{remark} \section{Bisimulation coalgebraically} \subsection{Short introduction to coalgebras} Definitions of bisimulations can be given in terms of relations and we did so. However, for two reasons we also give a coalgebraic definition that induces our relational definition. Firstly, due to the general framework our definition will cover a spectrum of bisimulations depending on the interpretation of the coalgebra and is applicable to more complex systems, automatically yielding the bisimulation definitions. Secondly, any ad-hoc features of a simple coalgebraic definition are more visible and can be clearly identified, whereas it is difficult to distinguish which of two similar relational definitions is more natural. As we assume no previous knowledge of categorical notions we give a brief introduction to coalgebras in the spirit of \cite{Sangiorgi:2011:ATB:2103601}. A \emph{functor} $F$ (on sets) assigns to each set $X$ a set $F(X)$, and to each set function $f:X\to Y$ a set function $F(f):F(X)\to F(Y)$ such that two natural conditions are satisfied: (i) the identity function on $X$ is mapped to the identity function on $F(X)$ and (ii) a composition $f\circ g$ is mapped to a composition $F(f)\circ F(g)$. \begin{example} The powerset functor $\mathcal P(-)$ maps a set $X$ to the set $\mathcal P(X)$ of its subsets and a function $f:X\to Y$ to $\mathcal P(f):\mathcal P(X)\to\mathcal P(Y)$ by $U\mapsto \{f(x)\mid x\in U\}$. Similarly, for a fixed set $L}%{\mathbb{A}\mathrm{ct}^\tau$, the operator $(-)^L}%{\mathbb{A}\mathrm{ct}^\tau$ mapping $X$ to the set $X^L}%{\mathbb{A}\mathrm{ct}^\tau$ of functions $L}%{\mathbb{A}\mathrm{ct}^\tau\to X$ is a functor, where the image of $f:X\to Y$ is $F(f):X^L}%{\mathbb{A}\mathrm{ct}^\tau\to Y^L}%{\mathbb{A}\mathrm{ct}^\tau$ given by mapping $u:L}%{\mathbb{A}\mathrm{ct}^\tau\to X$ to $f\circ u:L}%{\mathbb{A}\mathrm{ct}^\tau\to Y$. \end{example} For a functor $F$, an \emph{$F$-coalgebra} is a pair of the carrier set (or state space) $S$ and the operation function $\mathrm{next}:S\to F(S)$. Intuitively, the function $\mathrm{next}$ describes the transition to the next step. \begin{example} A transition system $(S,\rightarrow)$ with $\mathord{\rightarrow}\subseteq S\times S$ can be understood as a $\mathcal P(-)$-coalgebra by setting $\mathrm{next}(s)=\{s'\mid s\tran{} s'\}$. And vice versa, every $\mathcal P$-coalgebra gives rise to a transition system. A labelled transition system $(S,L}%{\mathbb{A}\mathrm{ct}^\tau,\rightarrow)$ with the set of labels $L}%{\mathbb{A}\mathrm{ct}^\tau$ and $\mathord{\rightarrow}\subseteq S\times L}%{\mathbb{A}\mathrm{ct}^\tau\times S$ can be seen as a $(\mathcal P(-))^L}%{\mathbb{A}\mathrm{ct}^\tau$-coalgebra with $\mathrm{next}:S\to (\mathcal P(S))^L}%{\mathbb{A}\mathrm{ct}^\tau$ given by $\mathrm{next}(s)(a) = \{s'\mid s\tran{a}s'\}$. \end{example} A \emph{bisimulation} on an $F$-coalgebra $(S,\mathrm{next})$ is a an $F$-coalgebra $(R,\overline\mathrm{next})$ with $R\subseteq S\times S$ such that the two projections $\pi_1:R\to S$ and $\pi_2:R\to S$ make the following diagram commute:\footnote{I.e.\ $\mathrm{next}\circ\pi_1=F(\pi_1)\circ\overline\mathrm{next}$ and $\mathrm{next}\circ\pi_2=F(\pi_2)\circ\overline\mathrm{next}$.} $$ \begin{tikzpicture}[x=2.5cm,y=1.2cm] \node (S1) at (-1,0) {$S$}; \node (FS1) at (-1,-1) {$F(S)$}; \node (S2) at (1,0) {$S$}; \node (FS2) at (1,-1) {$F(S)$}; \node (R) at (0,0) {$R$}; \node (FR) at (0,-1) {$F(R)$}; \path[->] (S1) edge node[left] {$\mathrm{next}$} (FS1) (S2) edge node[right] {$\mathrm{next}$} (FS2) (R) edge node[left] {${\overline\mathrm{next}}$} (FR); \path[->] (R) edge node[above] {$\pi_1$} (S1) (R) edge node[above] {$\pi_2$} (S2) (FR) edge node[below] {$F(\pi_1)$} (FS1) (FR) edge node[below] {$F(\pi_2)$} (FS2); \end{tikzpicture}$$ \begin{example} For LTS, the coalgebraic bisimulation coincides with the classical one of Park and Milner \cite{DBLP:books/daglib/0067019}, where a symmetric $R$ is a bisimulation if for every $sRt$ and $s\tran{a}s'$ there is $t\tran{a}t'_{s,a,s',t}$ with $s'Rt'_{s,a,s',t'}$. Indeed, given a classical bisimulation $R$, one can define $\mathrm{next}(\langle s, t\rangle)(a)$ to contain for every $s\tran{a}s'$ the matching pair $\langle s',t'_{s,a,s',t}\rangle$ and symmetrically for $t$. Since all these pairs are from $R$, $(R,\overline\mathrm{next})$ is indeed a coalgebra. Further, the projection $F(\pi_1)$ of $\mathrm{next}(\langle s, t\rangle)$ assigns to each $a\in L}%{\mathbb{A}\mathrm{ct}^\tau$ all and nothing but the successors of $s$ under $a$, symmetrically for $t$, hence the commuting. Conversely, given a coalgebraic bisimulation $(R,\overline\mathrm{next})$, the commuting of $\pi_1$ guarantees that $\mathrm{next}(\langle s, t\rangle)(a)$ yields all and nothing but the successors of $s$ under $a$. Hence, for each $s\tran{a}s'$ there must be $\langle s',t'\rangle\in\mathrm{next}(\langle s, t\rangle)(a)\subseteq R$, moreover, with $t\tran{a}t'$ due to $\pi_2$ commuting. \end{example} \medskip As we have seen, the coalgebraic definition coincides with the relational one for non-probabilistic systems. One can use the same theory for finite probabilistic systems, too. Let $\mathcal{D}(X)$ denote the set of simple distributions, i.e.\ functions $f:X\to[0,1]$ such that $f$ is non-zero for only finitely many elements $x_1,\ldots,x_n$ and $\sum_{i=1}^n f(x_i)=1$. Note that $\mathcal{D}(-)$ can be understood as a functor. \begin{example} We can interpret $(\mathcal{D}(-)\cup\{\bullet\})^L}%{\mathbb{A}\mathrm{ct}^\tau$-coalgebras as finite Markov decision processes $(S,L}%{\mathbb{A}\mathrm{ct}^\tau,Pr)$ with $Pr:S\times L}%{\mathbb{A}\mathrm{ct}^\tau\to\mathcal{D}(S)\cup\{\bullet\}$ that under each action either proceed to a distribution on successors (as opposed to a non-deterministic choice in LTS) or not have the action available (the special element $\bullet$). The corresponding coalgebraic bisimulation can be shown to coincide with the classical one of Larsen and Skou~\cite{DBLP:conf/popl/LarsenS89}, where an equivalence relation $R$ is a bisimulation if $\sum_{u\in U}Pr(t,a)(u)=\sum_{u\in U}Pr(t',a)(u)$ for every $a\in L}%{\mathbb{A}\mathrm{ct}^\tau$, classes $T,U$ of $R$ and $t,t'\in T$. \end{example} In contrast, uncountable probabilistic systems are more intricate. Let $\mathcal{D}}%{\mathcal{M}(X)$ now denote the set of \emph{probability measures} over $X$ (equipped with a $\sigma$-algebra clear from context). Again, defining $\mathcal{D}}%{\mathcal{M}(f)(\mu)=\mu\circ f^{-1}$ makes $\mathcal{D}}%{\mathcal{M}(-)$ into a functor. \begin{example} We can interpret $\mathcal{D}}%{\mathcal{M}(-)$-coalgebras as Markov chains with general (possibly uncountable) state space. However, it is intricate to prove that the corresponding bisimulation is defined so that it coincides with the relational definition as already mentioned in Section~\ref{sec:intro}. \end{example} \begin{example} PA correspond to $(\mathcal P(\mathcal{D}(-)))^\mathit{L}$-coalgebras. \end{example} \subsection{Bisimulation on distributions coalgebraically} The bisimulation we proposed is induced by a different view on the probabilistic systems. Namely, we consider distributions (or measures) $\mathcal{D}}%{\mathcal{M}(S)$ over its state space $\mathit{S}$ to form the carrier of the coalgebra. A transition then changes this distribution. For instance, a Markov chain can be seen this way as a coalgebra of the identity functor. Therefore, in order to capture the distributional semantics of NLMP and other continuous systems, we define a functor\footnote{On function, we define the functor by $\spadesuit(f)(n)(A)=(id\times\mathcal P(f))(n(A))$. Here $\mathcal P(\mathit{L})$ denotes only the measurable sets of labels.} \begin{align*} \boxed{([0,1] \times \mathcal P(-))^{\mathcal P(\mathit{L})} } \tag{$\spadesuit$} \end{align*} The vital part is not only $[0,1]$, but also the use of measurable sets of labels instead of individual labels. We can view a NLMP $\mathbf{P} = (\mathit{S}, \mathit{L}, \{\tau_a \mid a\in\mathit{L}\})$ as a $\spadesuit$-coalgebra with a carrier set $\mathcal{D}}%{\mathcal{M}(\mathit{S})$. The coalgebra assigns to $\mu \in \mathcal{D}}%{\mathcal{M}(\mathit{S})$ and to a set of labels $A \in \sigmafield(\labels)$ the pair $(p,M)$ such that \begin{itemize} \item $p= \mu(\mathit{S}_A)$ is the measure of states that can read some $a\inA$ where $\mathit{S}_A = \{ s\in\mathit{S} \mid \exists a \in A. \tau_a(s) \neq \emptyset \}$; \item $M = \emptyset$ if $\mu(\mathit{S}_A) = 0$, and $M$ is the set of convex combinations\footnote{The set of convex combinations is lifted to a measurable set $Z$ of measures over $\mathit{S}$ as the set $\{X \mapsto \int_{\mu\in Z} \mu(X) \nu(d\,\mu) \mid \text{$\nu$ is a measure over $Z$}\}$.} over $\{\mu_\rho \mid \text{measurable }\rho:\mathit{S}_A\to\bigcup_{a\inA}\tau_a \}$, otherwise, where $$ \mu_\rho(X) = \frac{1}{\mu(\mathit{S}_A)} \cdot \int_{s\in\mathit{S}} \rho(s)(X)\ \mu(ds) \quad \forall X\in\sigmafield(\states).$$ \end{itemize} In other words, $M$ is obtained by restricting $\mu$ to the states that can read $A$ and weighting all possible combinations of their transitions. \begin{lemma}\label{lem:inf-coin} The union of $\spadesuit$-bisimulations and $\sim$ coincide. \end{lemma} \begin{proof} First, we prove that whenever there is $\spadesuit$-bisimulation $(R,\overline\mathrm{next})$ with $(\mu,\nu)\in R$ then $\mu\sim\nu$ by proving that $R\cup R^{-1}$ is a bisimulation relation. Let $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ and $\mu R \nu$ or $\nu R \mu$, w.l.o.g.\ the former (the latter follows symmetrically). \begin{enumerate} \item The first condition of the relational bisimulation follows by \begin{align*} \mu(S_A)&=\pi_1(\mathrm{next}(\mu)(A))\\ &=\pi_1(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(A))\\ &=\pi_1(\spadesuit\pi_1\circ\overline\mathrm{next}\langle\mu,\nu\rangle(A))\\ &=\pi_1((id\times\mathcal P\pi_1)(\overline\mathrm{next}\langle\mu,\nu\rangle(A)))\\ &=id(\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle(A)))\\ &=\pi_1((id\times\mathcal P\pi_2)(\overline\mathrm{next}\langle\mu,\nu\rangle(A)))\\ &=\pi_1(\spadesuit\pi_2\circ\overline\mathrm{next}\langle\mu,\nu\rangle(A))\\ &=\pi_1(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle(A))\\ &=\pi_1(\mathrm{next}(\nu)(A))\\ &=\nu(S_A) \end{align*} \item For the second condition of the relational bisimulation, let $\mu\tran{A}\mu'$. Since \begin{align*} \mu'&\in\pi_2(\mathrm{next}(\mu))(A)\\ &=\pi_2(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(A))\\ &=\pi_2(\spadesuit\pi_1\circ\overline\mathrm{next}\langle\mu,\nu\rangle(A))\\ &=\pi_2((id\times\mathcal P\pi_1)\Big(\overline\mathrm{next}\langle\mu,\nu\rangle(A)\Big))\\ &=\mathcal P\pi_1(\pi_2\Big(\overline\mathrm{next}(\langle\mu,\nu\rangle)(A)\Big)) \end{align*} there is $\nu'$ with $$\langle\mu',\nu'\rangle\in \pi_2\Big(\overline\mathrm{next}(\langle\mu,\nu\rangle)(A)\Big)$$ Since $R$ is a coalgebra, we have $\langle\mu',\nu'\rangle\in R$, i.e.\ $\mu'R\nu'$. \end{enumerate} \medskip Second, given $R=\mathord{\sim}$, we define $\overline\mathrm{next}$ making it into a coalgebra such that the bisimulation diagram commutes. Let $\mathrm{succ}_A(\mu)=\{\mu'\mid\mu\tran{A}\mu'\}$ denote the set of all $A$-successors of $\mu$. For $\mu R\nu$, we set $$\overline\mathrm{next}(\langle\mu,\nu\rangle)(A)=(\mu(S_A),\{\langle\mu',\nu'\rangle\in R\cap \mathrm{succ}_A(\mu)\times\mathrm{succ}_A(\nu)\})$$ Since we imposed $\langle\mu',\nu'\rangle\in R$, $(R,\overline\mathrm{next})$ is a $\spadesuit$-coalgebra. Further, we prove the bisimulation diagram commutes. Firstly, \begin{align*} \mathrm{next}\circ\pi_1\langle\mu,\nu\rangle&=(\mu(S_A),\mathrm{succ}_A(\mu))\\ \mathrm{next}\circ\pi_2\langle\mu,\nu\rangle&=(\nu(S_A),\mathrm{succ}_A(\nu)) \end{align*} Therefore, $$\pi_1(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle)= \mu(S_A)=\pi_1(\spadesuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(A))$$ and $$\pi_1(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle)=\nu(S_A)=\mu(S_A)= \pi_1(\spadesuit\pi_2(\overline\mathrm{next}\langle\mu,\nu\rangle)(A))$$ since $\mu(S_A)=\nu(S_A)$ due to $\mu\sim\nu$ and the first relational bisimulation condition. Secondly, \begin{align*} \pi_2(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(A))&= \mathrm{succ}_A(\mu)\stackrel{(1)}=\pi_2(\spadesuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(A))\\ \pi_2(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle(A))&=\mathrm{succ}_A(\nu) \stackrel{(2)}=\pi_2(\spadesuit\pi_2(\overline\mathrm{next}\langle\mu,\nu\rangle)(A)) \end{align*} After we show $(1)$ and $(2)$, we know both components of $\spadesuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(A)$ are the same as of $\mathrm{next}(\pi_1\langle\mu,\nu\rangle)(A)$, and similarly for $\spadesuit\pi_2$, hence the commuting. As to $(1)$, $\supseteq$ follows directly by $\overline\mathrm{next}$ defined above. For $\subseteq$, for every $\mu'\in\mathrm{succ}_A(\mu)$ there is $\nu'\in\mathrm{succ}_A(\nu)$ with $\mu'R\nu'$ due to the second realtional bisimulation condition. Thus also $\langle\mu',\nu'\rangle\in \spadesuit\pi_1 (\overline\mathrm{next}\langle\mu,\nu\rangle)(A)$. $(2)$ follows from symmetric argument and $R$ being symmetric. \qed \end{proof} \subsection{Related bisimulations} For \emph{discrete} systmes, one could define a functor for finite probabilistic systems with non-determinism by \begin{align*} { ([0,1] \times \mathcal P(-))^\mathit{L} \tag{$\heartsuit$}} \end{align*} Now a PA $(\mathit{S}, \mathit{L}, \longrightarrow)$ is a $\heartsuit$-coalgebra with the carrier set $\mathcal{D}(\mathit{S})$. Indeed, the coalgebra assigns to a distribution $\mu$ and a label $a$ the pair $(p,M)$ where \begin{itemize} \item $p= \mu(\mathit{S}_a)$ is the probability of states that can read $a$; \item $M = \emptyset$ if $\mu(\mathit{S}_a) = 0$, and $M$ is the set of convex combinations over $\{\frac{1}{\mu(\mathit{S}_a)}\sum_{s \in\mathit{S}_a} \nu_s \cdot \mu(s) \mid \forall s\in\mathit{S}_a.s \patransa{a} \nu_s\}$, otherwise. We write $\mu \patransa{a} \mu'$ for every $\mu'\in M$. \end{itemize} \begin{remark} The union of $\heartsuit$-bisimulations and bisimulation of \cite{DBLP:journals/corr/FengZ13}, denoted by $\sim_{\heartsuit}$, coincide. \end{remark} Although we can use $\heartsuit$ to capture the distribution semantics of PA as above, we could as well use it differently: if we defined that a label that cannot be read in the current state is \emph{ignored} instead of halting, the successor distribution would be defined by making a step from states that can read the label and staying elsewhere. (This approach is discussed in the next section.) \medskip Moreover, we could easily extend the functor to systems with real rewards (as in~\cite{DBLP:conf/ijcai/CastroPP09}) simply by adding $\mathbb{R}$ to get $\mathbb{R}\times([0,1] \times \mathcal P(-))^\mathit{L} $ for rewards on states or $([0,1] \times \mathcal P(\mathbb{R}\times -))^\mathit{L} $ on transitions etc. Similarly, for systems without the inner non-determinism like Rabin automata, we could simplify the functor to $([0,1] \times -)^\mathit{L}$. The only important and novel part of the functor is $[0,1]$ stating the overall probability mass that performs the step. (This is also the only difference to non-probabilistic coalgebraic functors.) In all the cases, the generic $\heartsuit$-bisimulation keeps the same shape. What changes is the induced relational bisimulation. \section{Applications} \label{sec:applications-app} In the following subsections, we justify the proposed bisimulation yielded by $\spadesuit$ by reviewing its application areas and comparing it to other bisimulations in these areas. \subsection{Bisimulation in compositional modelling of distributed systems} Probabilistic automata are apt for compositional modelling of communicating parallel systems. This way, the whole system is built bottom-up connecting smaller components into larger by the parallel composition operator. To tackle the state space explosion, minimisation algorithms can be applied throughout the process after each composition. Computing the quotient according to a bisimulation serves well as a minimisation algorithm if the bisimulation is a congruence w.r.t. parallel composition. This condition is satisfied by the (also distribution-based) strong bisimulation recently defined by Hennessy~\cite{DBLP:dblp_journals/fac/Hennessy12}, denoted by $\sim_\mathit{Hen}$. This is not the case with $\sim$ as shown in the following example. \begin{example} According to our definition, $u \sim v$ because $\frac{1}{2} u_h + \frac{1}{2} u_t \sim v'$. In contrast, $u \not\sim_\mathit{Hen} v$. Therefore, $\sim_\mathit{Hen}$ is strictly finer than $\sim$. Actually, $\sim_\mathit{Hen}$ coincides (on Dirac distributions) with the standard probabilistic bisimulation of Larsen and Skou \cite{DBLP:conf/popl/LarsenS89} which distinguishes $u$ and $v$ as well. \begin{tikzpicture}[outer sep=0.1em,->, state/.style={draw,circle,minimum size=1.6em,inner sep=0.1em}] \begin{scope}[xscale=1.2] \node (sm1) at (0.1,0) [state] {$v$}; \node (s0) at (1,0) [state] {$v'$}; \node (s0a) at (1.5,0) [inner sep=0, outer sep=0,minimum width=0] {}; \node (s1) at (2,0.6) [state] {}; \node (s2) at (2,-0.6) [state] {}; \draw [<-] (sm1) -- +(-0.5,0); \draw (sm1) to node[auto,pos=0.6] {$a$} (s0); \path[-] (s0) edge node[below=-1,pos=0.3]{$a$} (s0a); \draw (s0a) to node[above left=-4,pos=0.7] {$\frac{1}{2}$} (s1); \draw (s0a) to node[below left=-4,pos=0.7] {$\frac{1}{2}$} (s2); \draw [loop right,looseness=5] (s1) to node[auto] {$h$} (s1); \draw [loop right,looseness=5] (s2) to node[auto] {$t$} (s2); \end{scope} \begin{scope}[xscale=1.2,xshift=-10em] \node (s0) at (0,0) [state] {$u$}; \node (s0a) at (0.5,0) [inner sep=0, outer sep=0,minimum width=0] {}; \node (s1) at (1,0.6) [state] {$u_h$}; \node (s2) at (1,-0.6) [state] {$u_t$}; \node (s3) at (1.9,0.6) [state] {}; \node (s4) at (1.9,-0.6) [state] {}; \draw [<-] (s0) -- +(-0.5,0); \path[-] (s0) edge node[below=-1,pos=0.3]{$a$} (s0a); \draw (s0a) to node[above left=-4,pos=0.7] {$\frac{1}{2}$} (s1); \draw (s0a) to node[below left=-4,pos=0.7] {$\frac{1}{2}$} (s2); \draw (s1) to node[auto,pos=0.6] {$a$} (s3); \draw (s2) to node[auto,pos=0.6,swap] {$a$} (s4); \draw [loop right,looseness=5] (s3) to node[auto] {$h$} (s3); \draw [loop right,looseness=5] (s4) to node[auto] {$t$} (s4); \end{scope} \end{tikzpicture} Let $\parallel_A$ denotes the $CSP$-style full synchronization on labels from $A$ and interleaving on $\mathit{L} \setminus A$. Then $\sim$ is not a congruence w.r.t. $\parallel_A$ as $u \parallel_\mathit{L} s \not\sim v \parallel_\mathit{L} s$ for $s$ depicted below. \vspace{-0.5em} \begin{center} \begin{tikzpicture}[outer sep=0.1em,->, state/.style={draw,circle,minimum size=1.6em, inner sep=0.1}] \begin{scope}[xscale=1.2] \node (sm1) at (0.1,0) [state] {$s$}; \node (s0) at (1,0) [state] {$s'$}; \node (s1) at (2,0.6) [state] {}; \node (s2) at (2,-0.6) [state] {}; \draw [<-] (sm1) -- +(-0.5,0); \draw (sm1) to node[auto,pos=0.6] {$a$} (s0); \draw (s0) to node[above left=-4,pos=0.7] {$a$} (s1); \draw (s0) to node[below left=-4,pos=0.7] {$a$} (s2); \draw [loop right,looseness=5] (s1) to node[auto] {$h$} (s1); \draw [loop right,looseness=5] (s2) to node[auto] {$t$} (s2); \end{scope} \end{tikzpicture} \end{center} \vspace{-0.3em} \end{example} This is actually a classical example, due to~\cite{Segala:1996:MVR:239648}, modelling a process $u$ (or $v$) generating a secret by tossing a coin and the process $s$ guessing the secret. If $s$ guesses correctly, they synchronize forever on $h$ or $t$; otherwise, they halt. In $u \parallel_\mathit{L} s$, the non-determinism can be resolved by a \emph{scheduler} in such a way that the guesser makes a correct guess with probability $1$ which is not possible in $v \parallel_\mathit{L} s$ because the secret is generated later. This is overly pessimistic in the context of \emph{distributed systems} where the guesser observes only the communication with the tosser and not its state. Namely, the systems $u \parallel_\mathit{L} s$ and $v \parallel_\mathit{L} s$ exhibit the same behaviour (correct guess with probability at most $1/2$) if the non-determinism is resolved by \emph{distributed} schedulers~\cite{DBLP:conf/concur/AlfaroHJ01,Che06,DBLP:conf/formats/GiroD07}. This means that the non-determinism in each component of the composition is resolved independently of the state of the other component. \iffalse While model checking with respect to arbitrary schedulers gives overly pessimistic results, model checking with respect to distributed schedulers is unfortunately undecidable~\cite{DBLP:conf/formats/GiroD07}. An option in between is to use the arbitrary schedulers but to prune some of the non-deterministic choice that cannot be taken by the distributed schedulers~\cite{DBLP:conf/concur/GiroDF09}. For arbitrary schedulers our bisimulation $\sim$ is on one hand not a congruence w.r.t. $\parallel$. On the other hand, on a system $D$ it induces a \emph{quotient} $\hat{D}$ that is \begin{itemize} \item smaller than the quotient $\hat{D}_\mathit{Hen}$ of $\sim_\mathit{Hen}$, \item approximates better the distributed setting, i.e. gives less power to the (arbitrary) scheduler than $\hat{D}_\mathit{Hen}$, and \item is correct for the distributed setting, i.e. gives more power to the scheduler than has a distributed one in $\hat{D}_\mathit{Hen}$. \end{itemize} Furthermore, similarly to $v$ in the example above, a system $D'$ bisimilar to $D$ that reveals the probabilistic choice to the scheduler as late as possible can be constructed under the condition that such a ``latest'' finite system exists. If so, $D'$ then gives the best approximation of the distributed setting among the bisimilar systems. Note that $D'$ then does not necessarily have the minimal state space. Details on construction of $\hat{D}$ and $D'$ follow in the rest of the section. \medskip Now we formalize the definition of the bisimulation quotient. Let us first define the parallel composition, and schedulers. For this subsection we fix a set of labels $\mathit{L}$. To simplify notation, we assume that each PA has no self-loops, i.e. transition of the form $s \longrightarrow \mu$ with $\mu(s) > 0$ (which can be easily avoided by replacing each state $s$ by its two copies $s,s'$ with the same set of transitions where the self-loop probability is assigned to the other state). \paragraph{Parallel composition} The key operation for compositional modelling is the \emph{parallel composition} operator. For $D_1 = (\mathit{S}_1, \mathit{L}, \longrightarrow_1)$, $D_2 = (\mathit{S}_2, \mathit{L}, \longrightarrow_2)$, and a subset of their labels $A \subseteq \mathit{L}$, let $D_1 \parallel_A D_2$ denote their (CSP style) parallel composition $(\mathit{S}_1 \times \mathit{S}_2, \mathit{L}, \longrightarrow)$ where $(s_1,s_2) \patransa{a} \mu$ if $\mu = \mu_1 \times \mu_2$ and \begin{itemize} \item $a\in A$, $s_1 \patransa{a}\arrowsub{1} \, \mu_1$, and $s_2 \patransa{a}\arrowsub{2} \, \mu_2$; or \hfill {\footnotesize (synchronous move)} \item $a\not\in A$, $s_1 \patransa{a}\arrowsub{1} \, \mu_1$ and $\mu_2(s_2) = 1$, or $s_2 \patransa{a}\arrowsub{2} \, \mu_2$ and $\mu_1(s_1) = 1$.\hfill {\footnotesize (asynchronous move)} \end{itemize} Note that the product distribution $\mu_1 \times \mu_2$ assigns $\mu_1(s_1') \cdot \mu_2(s_2')$ to all $(s_1',s_2')$. Finally, for distributions $\mu_1$, $\mu_2$ in probabilistic automata $D_1$, $D_2$, we denote by $\mu_1 \parallel_A \mu_2$ the distribution $\mu_1 \times \mu_2$ in $D_1 \parallel_A D_2$. \paragraph{Schedulers} A \emph{scheduler} is a function $\sigma$ that to each \emph{history} $h\,s \in \mathit{S}^{\ast}\mathit{S}$ assigns a probability distribution $\mu$ such that $s \longrightarrow \mu$. Observe that this is somewhat non-standard definition but is equivalent to first hiding all labels and then letting the scheduler choose among internal transitions. Each scheduler $\sigma$ in $D$ induces a probability measure $\mathcal{P}_D^\sigma$ over the space $\mathit{S}^\infty$ of all infinite sequences of states (endowed with the standard cylinder $\sigma$-algebra). Let us adapt \emph{strongly distributed schedulers} of~\cite{DBLP:conf/concur/GiroDF09} to our setting of a product PA $D_1 \parallel D_2$ as follows. For a history $(s^1_1,s^1_2) (s^2_1,s^2_2) \cdots$ we define its projection $\pi_i$ for $i \in \{1,2\}$ as the subsequence of $s^1_i s^2_i \cdots$ where we omit the occurrences of states that correspond to asynchronous moves in the other component $j \in \{1,2\}, j \neq i$. Similarly for a distribution $\mu$ over states of $D_1 \parallel_A D_2$, $\pi_i$ returns the marginal probability distribution over states of $D_i$. We say that a scheduler $\sigma$ of $D_1 \parallel_A D_2$ is \emph{distributed} if for any histories $h,h'$ and any $i \in \{1,2\}$ such that \begin{itemize} \item the observation in the $i$th component is the same, i.e. $\pi_i(h) = \pi_i(h')$, and \item the sets of available actions in the last state of the $j$th component are the same for both $j\in\{1,2\}$, \end{itemize} $\sigma$ takes the same decision $\mu = \pi_i(\sigma(h)) = \pi_i(\sigma(h'))$ and also \begin{itemize} \item either $\mu$ is Dirac on the last state $s$ of $\pi_i(h)$ {\\ \footnotesize \phantom{fdfdfd} \hfill (only the other component makes an asynchronous move)}; \item or $s \longrightarrow \mu$ {\footnotesize \hfill (this component makes a move)}. \end{itemize} The set of all schedulers is denoted by $\Sigma$, the set of distributed schedulers by $\Sigma_{dist}$. \paragraph{Bisimulation quotient} Let us fix a PA $(\mathit{S}, \mathit{L}, \longrightarrow)$ and a minimal bisimulation matrix $E$, as computed by Algorithm~\ref{alg-fin}. Let $T$ be the collection of all minimal subsets $X$ of states such that the vector $\vec{1}_X$ is linearly dependent on $E$ where $\vec{1}_X(s) = 1$ if $s\inX$ and $\vec{1}_X(s) = 0$, otherwise. Clearly, $T$ is a partition of $\mathit{S}$. This partition induces a quotient PA $\hat{D} = (T, \mathit{L}, \longrightarrow_T)$ where for any $X \in T$ we set $(X,a,M_\mu) \in \; \longrightarrow_T$ for every $(s, a, \mu) \in \; \longrightarrow$ of some $s\inX$, where $M_\mu$ is the distribution over $T$ such that $M_\mu(X') = \sum_{s'\inX'} \mu(s')$ for each $X' \in T$. It is easy to observe that the definition does not depend on the choice of $s \in X$.\todo{pozdeji: toto rozhodne neni easy to observe} \begin{lemma} Let $D$. We have $|\hat{D}| \leq |\hat{D}_\mathit{Hen}|$ where $|D|$ denotes the size of its state space. \end{lemma} \begin{proof} As $\sim$ is coarser than $\sim_\mathit{Hen}$, for each bisimulation class $X$ of $\sim_\mathit{Hen}$ such that $|X| > 1$, we have all Dirac distributions $1_s$ for $s\inX$ bisimilar w.r.t. $\sim$. Hence, $1_X$ is linearly dependent on $E$ and for all states of $X$ there is at most one state in $\hat{D}$. \end{proof} We demonstrate the non-specific claim about ``power'' of schedulers on the example of reachability. Note that similar results can be obtained for even more complicated properties. \begin{lemma} Let $D$, $D'$ be PA, $a \in\mathit{L}$ and $R$ denote the set of runs where some transition under $a$ is taken at least once. When denoting $D_\sim = \hat{D}\parallel_A D'$ and $D_\mathit{Hen} = \hat{D}_\mathit{Hen}\parallel_A D'$, $$ \inf_{\sigma\in\Sigma_{dist}} \mathcal{P}_{D_\mathit{Hen}}^\sigma(R) \;\; \geq \;\; \inf_{\sigma\in\Sigma} \mathcal{P}_{D_\sim}^\sigma(R) \;\; \geq \;\; \inf_{\sigma\in\Sigma} \mathcal{P}_{D_\mathit{Hen}}^\sigma(R).$$ \end{lemma} \begin{proof} The second inequality can be observed easily as follows. Let $\sigma$ be a scheduler in $D_\sim$. Note that each state $s$ of $D_\mathit{Hen}$ corresponds to one state $f(s)$ of $D_\sim$. We can furthermore map each state $s$ of $D_\sim$ to some state $g(s) = s'$ of $D_\mathit{Hen}$ such that $s = f(s')$. The scheduler $\sigma$ then induces a scheduler $\sigma'$ in $D_\mathit{Hen}$ such that $\sigma'(s_1 \cdots s_n) = g(\sigma(f(s_1) \cdots f(s_n)))$ (where $g$ is straightforwardly lifted to distributions). Notice that thanks to the definition of $\sigma'$ and $\hat{D}$, $\sigma'$ is a correct scheduler, i.e. each decision of $\sigma'$ is a convex combination of available transitions. Then, $\sigma'$ simulates $\sigma$, i.e. $\mathbf{P}^\sigma_{D_\sim}(R) = \mathbf{P}^{\sigma'}_{D_\mathit{Hen}}(R)$. As regards the first inequality, each distributed scheduler $\sigma'$ in $D_\mathit{Hen}$ can be simulated by a (in fact also distributed) scheduler $\sigma$ in $D_\sim$ as follows. The interleaving decision can be easily simulated as the knowledge they depend on is the same in $D_\sim$. The decisions in the right component are simulated by the very same decisions (which is possible since the right components are the same and the decision of $\sigma'$ on the right side does not depend on the finer state structure on the left side). In the left component, there must exist a way to simulate the local decisions of $\sigma'$ due to the definition of bisimulation as $\hat{D}$ and $\hat{D}_\mathit{Hen}$ are bisimilar. \todo{pozdeji: toto nedokazuju, ze jsou bisimularni} \end{proof} As regards the construction of $D'$, observe that you can build a PA by the following procedure. You explore the state space of reachable \emph{distributions} and split these distributions (according to the probabilities of the states) whenever you would obtain a distribution with states in the support that have different sets of labels enabled. If this procedure yields a finite PA, then this PA reveals the information to the scheduler as late as possible (and is by definition bisimilar to $D$). If this procedure yields an infinite PA, then there exists no minimal PA w.r.t. amount of information revealed. \fi \subsection{Bisimulation for partially observable MDPs} In the distributed setting it is natural to assume that the state space of each component is \emph{fully} unobservable from outside. This is a special case of \emph{partially} observable systems, such as partially observable Markov decision processes (POMDP). POMDPs have a wide range of applications in robotic control, automated planning, dialogue systems, medical diagnosis, and many other areas~\cite{DBLP:journals/aamas/ShaniPK13}. In the analysis of POMDP, the distributions over states, called \emph{beliefs}, arise naturally and yield a continuous-space (fully observable) belief MDP. Therefore, probabilistic bisimulations over beliefs have been already studied~\cite{DBLP:conf/ijcai/CastroPP09,DBLP:conf/nfm/JansenNZ12}. However, no connection of this particular case to general probabilistic bisimulation has been studied. There are various (equivalent) definitions of POMDP, we use one close to computational game theory~\cite{DBLP:conf/mfcs/ChatterjeeDH10}. \begin{definition} A \emph{partially observable Markov decision process (POMDP)} is a tuple $\mathcal{M} = (\mathit{S},\delta, \mathcal{O})$ where $\mathit{S}$ is a set of states, $\delta \subseteq \mathit{S} \times \mathcal{D}(\mathit{S})$ is a transition relation, and $\mathcal{O} \subseteq 2^\mathit{S}$ is a set of observations that partition the state space. \end{definition} This formalism is also known as \emph{labelled Markov decision processes}~\cite{DBLP:journals/ijfcs/DoyenHR08} where state labels correspond to observations. Such a state-labelled system $\mathcal{M} = (\mathit{S},\delta, \mathcal{O})$ can be easily translated to an action-labelled PA $D_{\mathcal{M}} = (\mathit{S},\mathcal{O}, \longrightarrow)$ where $s \patransa{o} \mu$ if $s \in o$ and $(s,\mu) \in \delta$. This way, we can define $\mu \sim \mu'$ in $\mathcal{M}$ if $\mu \sim \mu'$ in $D_\mathcal{M}$. Hence, in Section~\ref{sec:algorithms-finite}, we give the first algorithm for computing bisimulations over beliefs in finite POMDP. Previously, there was only an algorithm~\cite{DBLP:conf/nfm/JansenNZ12} for computing bisimulations on distributions of Markov chains with partial observation. \subsection{Bisimulation for large-population models} In the sense of~\cite{DBLP:journals/deds/GastG11,DBLP:journals/tcs/McCaigNS11,may1974biological,jovanovic1988anonymous}, we can understand PA as a description of one \emph{agent} in a large homogeneous population. For example a \emph{chemical compounds}, a \emph{node of a computer grid}, or a \emph{customer of a chain store}. The distribution perspective is a natural one -- the distribution specifies the ratios of agents being currently in the individual states. For a Markov chain, this gives a deterministic process over the continuous space of distributions. The non-determinism of PA has also a natural interpretation. Labels given to this large population of PAs correspond to global control actions~\cite{DBLP:journals/tac/GastGB12,DBLP:journals/deds/GastG11} such as \emph{manipulation with the chemical solution}, a \emph{broadcast within the grid}, or a \emph{marketing campaign of the chain store}. Agents react to this control action if currently in a state with transition under this label, otherwise they ignore it. Multiple transitions under this label correspond to multiple ways how the agent may react. \begin{example} Let us illustrate the idea by an example of three models of customers of a chain store with half of the population in state $1$ and half of the population in state $3$. \begin{center} \begin{tikzpicture}[outer sep=0.1em,->,yscale=0.9 state/.style={draw,circle, minimum size=1.7em,inner sep=0.1em ,inner sep =0.1em,text centered}, trans/.style={font=\scriptsize\itshape}, ] \begin{scope}[xscale=1,yscale=0.8] \node (sm0) at (1.75,0) [state] {2}; \node (sm1) at (0,0) [state] {1}; \node (s1) at (2.5,-2) [state] {4}; \node (s2) at (1,-2) [state] {3}; \draw (sm0) to node[trans,auto,pos=0.7] {yoghurt ad} (s1); \draw (s1) [loop below,looseness=5] to node[trans,auto] {buy y.} (s1); \draw (sm0) to node[trans,auto,swap,pos=0.7] {m\"{u}ssli ad} (s2); \draw (s2) [loop below, looseness=5] to node[trans,auto] {buy m.} (s2); \draw [-] (3.8,0.5) -- (3.8,-3.2); \end{scope} \begin{scope}[xscale=1,yscale=0.8,xshift=15em] \node (sm0) at (1.75,0) [state] {2}; \node (sm1) at (0,0) [state] {1}; \node (s1) at (1.75,-2) [state] {4}; \node (s2) at (0,-2) [state] {3}; \draw (sm0) to node[trans,auto,swap] {yoghurt ad} (s1); \draw (s1) [loop below,looseness=5] to node[trans,auto] {buy y.} (s1); \draw (sm1) to node[trans,auto,swap] {m\"{u}ssli ad} (s2); \draw (s2) [loop below, looseness=5] to node[trans,auto] {buy m.} (s2); \draw [-] (2.3,0.5) -- (2.3,-3.2); \end{scope} \begin{scope}[xscale=1,yscale=0.8,xshift=23.5em] \node (sm0) at (2,0) [state] {2}; \node (sm1) at (0,0) [state] {1}; \node (s1) at (2.75,-1.5) [state] {4}; \node (s2) at (1.25,-1.5) [state] {3}; \node (s3) at (2,-3) [state] {5}; \draw (sm0) to node[trans,right,pos=0.6] {yoghurt ad} (s1); \draw (s1) [loop right,looseness=3] to node[trans] {buy y.} (s1); \draw (sm0) to node[trans,left,swap,pos=0.6] {m\"{u}ssli ad} (s2); \draw (s2) [loop left, looseness=3] to node[trans,left=-2] {buy m.} (s2); \draw (s1) to node[trans,auto,pos=0.1] {m\"{u}ssli ad} (s3); \draw (s2) to node[trans,auto,pos=0.1,swap] {yoghurt ad} (s3); \draw (s3) [loop right,looseness=3] to node[trans] {buy y.} (s3); \draw (s3) [loop left, looseness=3] to node[trans,left=-2] {buy m.} (s3); \end{scope} \end{tikzpicture} \end{center} It is natural to assume that these three models can be distinguished. Indeed none of the populations are bisimilar according to our definition. Note however, that the related distribution-based bisimulation of~\cite{DBLP:journals/corr/FengZ13} that allows only singletons $A$ in Definition~\ref{def:infinite-bisim} does not distinguish the first and the second population. Their definition actually extends the bisimulation of~\cite{DBLP:journals/ijfcs/DoyenHR08} defined on input-enabled models; they naturally transform general probabilistic automata to input-enabled ones by directing the missing transitions into a newly added sink state. Observe that the similarly natural alternative approach of adding self-loops does not distinguish the second and the third population. \end{example} \iffalse \subsection{Bisimulations for continuous-time systems} For continuous-time discrete event systems, such as continuous-time Markov chains~\cite{DBLP:conf/cav/BaierHHK00}, interactive Markov chains~\cite{DBLP:conf/fmco/HermannsK09}, generalized semi-Markov processes or stochastic automata~\cite{DBLP:journals/iandc/DArgenioK05}, existing bisimulations treat continuous-time completely symbolically - mainly making use of elegant mathematical properties of the exponential distribution in Markovian systems. However, this approach is hard to extend to non-Markovian systems~\cite{DBLP:journals/iandc/DArgenioK05}. Thanks to $\spadesuit$, we can define a bisimulation for non-Markovian systems directly on their semantics as it is defined by uncountable discrete-time Markov systems~\cite{DBLP:journals/iandc/DArgenioK05, whitt1980continuity, haas2002stochastic}. This translation of continuous-time systems into continuous-space semantics is done by storing the arrival times of the relevant part of \emph{future} events in the state space. As the future randomness is again something intrinsically \emph{unobservable}, our distribution-based approach is crucial. It yields a natural bisimulation which is coarser than existing bisimulations for non-Markovian systems~\cite{DBLP:journals/iandc/DArgenioK05}. Let us demonstrate the approach on a reasonably simple model of \emph{stochastic automata (SA)}~\cite{DBLP:journals/iandc/DArgenioK05}. \begin{definition} A \emph{stochastic automaton (SA)} is a tuple $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ where $\mathcal{Q}$ is a set of locations, $\mathcal{C}$ is a set of clocks, $\mathcal{A}$ is a set of actions, $\rightarrow \;\subseteq \mathcal{Q} \times \mathcal{A} \times 2^\mathcal{C} \times \mathcal{Q}$ is a set of edges, $\kappa: \mathcal{Q} \to 2^\mathcal{C}$ is a clock setting function, and $F$ assigns to each clock a cumulative distribution function. \end{definition} Avoiding technical details, $\mathcal{S}$ has the following NLMP semantics $\mathbf{P}_\mathcal{S}$ with state space $\mathit{S} = \mathcal{Q} \times (\mathbb{R}_{\ge 0})^\mathcal{C}$. It is initiated by entering some location $q_0$. When a location $q$ is entered, for each clock $c \in \kappa(q)$ a positive \emph{value} is chosen randomly according to the distribution $F(c)$ and stored in the state space. Intuitively, the automaton then idles in the location $q$ and the values of all clocks decrease simultaneously at the same speed until some edge $(q,a,X,q')$ becomes \emph{enabled}, i.e. all clocks from $X$ have value $\leq 0$. After this \emph{idling time} $t$, the action $a$ is taken and the automaton enters the next location $q'$. If more edges become enabled simultaneously, one of them is chosen non-deterministically. If an edge is enabled right after entering a location, it is taken immediately, i.e. $t=0$. Formally, a state $(q,\xi)$ denotes being in location $q$ where each clock $c$ has value $\xi(c)$. In $(q,\xi)$, a label of the form $(a,t)$ is available if $E_a \neq \emptyset$ where $E_a$ is the set of edges that have action $a$ and become available after the idling time $t$. We set $\tau_{(a,t)}((q,\xi)) = \{\mu_e \mid e \in E_a \}$ where $\mu_e$ for an edge $e = (q,a,C,q')$ is the probability measure over states with \begin{enumerate} \item the marginal in the first component being Dirac on $q'$; \item the marginal for any $c \not\in \kappa(q')$ being Dirac on $\xi(c) - t$; \item the marginals for each $c \in \kappa(q')$ having CDF $F(c)$, and their product being equal to the joint distribution of $\kappa(q')$. \end{enumerate} Intuitively, it 1) moves to $q'$, 2) decreases values of clocks by $t$, and 3) sets clocks of $\kappa(q')$ to independent random values. Let us now define our novel bisimulation for SA based on $\spadesuit$. We say that $\mu \in \mathcal{D}}%{\mathcal{M}(\mathit{S})$ is \emph{initial} for $q\in\mathcal{Q}$, if $\mu$ corresponds to the location being $q$, any $c \not\in \kappa(q)$ being $0$, and any $c \in \kappa(q)$ being independently set to a random value according to $F(c)$. \begin{definition} We say that locations $q_1,q_2$ of a SA $\mathcal{S}$ are \emph{probabilistic bisimilar}, denoted $q_1 \sim q_2$, if $\mu_1 \sim \mu_2$ in the semantical NLMP $\mathbf{P}_\mathcal{S}$ where each $\mu_i$ is initial for $q_i$. \end{definition} In Section~\ref{sec:algorithms-infinite} we show that our bisimulation identifies $q$ and $q'$ in Example 1 unlike any previous bisimulation on SA~\cite{DBLP:journals/iandc/DArgenioK05}. This is related to the intriguing open question~\cite{pedro-christel} mentioned in Section~\ref{sec:intro}. Roughly speaking, do SA \emph{naturally} extend the widely used model of continuous-time Markov chains (CTMC)? In more detail, does the embedding of CTMC into SA commute with respect to parallel composition?\footnote{Embedding is done straightforwardly by introducing one exponentially distributed clock for each transition in the CTMC. Parallel composition in CTMC yields product state space with interleaving transitions. Parallel composition for SA is defined in~\cite{DBLP:journals/iandc/DArgenioK05a}.} Note that Example~\ref{ex:intro} is obtained from composing simple one step CTMC before and after the embedding. \begin{theorem}\label{thm:sta-commute} For any $\mathcal{C}_1, \mathcal{C}_2$, Equivalently, the following diagram commutes up to $\sim$. \begin{center}\vspace{-1em} \begin{tikzpicture}[outer sep=0.1em,->,font=\upshape trans/.style={font=\scriptsize\upshape,sloped,text width=5em}, ] \node (cs) at (0,0) {CTMC $\mathcal{C}_1$, $\mathcal{C}_2$}; \node (c) at (3.25,0.5) {$\mathcal{C}_1 \parallel \mathcal{C}_2$}; \node (sas) at (3.25,-.5) {SA $\mathcal{S}_1, \mathcal{S}_2$}; \node (sa) at (6.5,0) {SA $\mathcal{S}$}; \draw (cs) to node[auto,pos=1.5,trans] {parallel composition} (c); \draw (cs) to node[auto,swap,pos=1.6,trans] {embedding} (sas); \draw (sas) to node[auto,swap,pos=0.1,trans] {parallel composition} (sa); \draw (c) to node[auto,pos=0.2,trans] {embedding} (sa); \end{tikzpicture} \end{center} \end{theorem} \fi \section{Technical details and proofs from Section~\ref{sec:applications}} Let us first formalize in more detail the concepts we relate to in the main body. \subsection{Continuous-time Markov chains} \begin{definition} A CTMC $\mathcal{C}$ is a tuple $(S,Q)$ where $S$ is a finite set of states, and $Q: S\times S \to \mathbb{R}_{\ge 0}$ is a rate matrix such that $Q(s,s'^) = 0$ denotes that there is no transition from $s$ to $s'$. \end{definition} \subsubsection{Parallel composition} For two CTMC $(S_1,Q_1)$ and $(S_2,Q_2)$ with initial states $s_1$ and $s_2$ we define their (full interleaving) parallel composition $\mathcal{C}_1 \parallel_{CT} \mathcal{C}_2$ as $(S_1\times S_2, Q')$ with the initial state $(s_1,s_2)$ where $$Q((s_1,s_2),(s'_1,s'_2)) = \begin{cases} Q_1(s_1,s'_1) & \text{if $s_2 = s'_2$,} \\ Q_1(s_1,s'_1) & \text{if $s_2 = s'_2$,} \\ 0 & \text{otherwise.} \end{cases} $$ \subsubsection{Embedding} Finally, to each CTMC $\mathcal{C} = (S,Q)$ with initial state $s_0 \in S$, we define a stochastic automaton $SA(\mathcal{C}) = (S,\mathit{S}\times\mathit{S},\{L}%{\mathbb{A}\mathrm{ct}^\tau\},\rightarrow,\kappa, F)$ with initial location $s_0$ where \begin{itemize} \item $(s,L}%{\mathbb{A}\mathrm{ct}^\tau,\{(s,s')\},s') \,\in\; \rightarrow$ for any $s,s' \in \mathit{S}$, \item $\kappa(s) = \{(s,s') \mid Q(s,s') > 0 \}$, \item $F((s_1,s_2)) = Exp(Q(s_1,s_2))$ \end{itemize} \subsection{Stochastic automata} \subsubsection{Semantics $\mathbf{P}_\mathcal{S}$ of stochastic automata} Let $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ be a stochastic automaton with initial location $q_0$. We define the semantical NLMP $\mathbf{P}_\mathcal{S} = (\mathcal{Q} \times \mathbb{R}^\mathcal{C}, \mathcal{A} \times \mathbb{R}_{\ge 0}, \{\tau_a \mid a\in\mathit{L}\})$. A state $(q,\xi)$ denotes being in location $q$ where each clock $c$ has value $\xi(c)$. The NLMP $\mathbf{P}_\mathcal{S}$ is initiated according to a initial measure $\mu$ over the state space of $\mathbf{P}_\mathcal{S}$ such that \begin{itemize} \item the marginal in the first component being Dirac on $q_0$; \item the marginal for any $c \not\in \kappa(q_0)$ being Dirac on $0$; \item the marginals for each $c \in \kappa(q_0)$ having CDF $F(c)$, and their product being equal to the joint distribution of $\kappa(q_0)$. \end{itemize} In $(q,\xi)$, a label of the form $(a,t)$ is available if $E_a \neq \emptyset$ where $E_a$ is the set of edges that have action $a$ and become available after the idling time $t$. We set $\tau_{(a,t)}((q,\xi)) = \{\mu_e \mid e \in E_a \}$ where $\mu_e$ for an edge $e = (q,a,C,q')$ is the probability measure over states with (similarly to the previous case) \begin{enumerate} \item the marginal in the first component being Dirac on $q'$; \item the marginal for any $c \not\in \kappa(q')$ being Dirac on $\xi(c) - t$; \item the marginals for each $c \in \kappa(q')$ having CDF $F(c)$, and their product being equal to the joint distribution of $\kappa(q')$. \end{enumerate} Intuitively, it (1) moves to $q'$, (2) decreases values of clocks by $t$, and (3) sets clocks of $\kappa(q')$ to independent random values. \subsubsection{Parallel composition} Further, for two SA $\mathcal{S}_1 = (\mathcal{Q}_1,\mathcal{C}_1,\mathcal{A}_1,\rightarrow_1,\kappa_1, F_1)$ and $\mathcal{S}_2 = (\mathcal{Q}_2,\mathcal{C}_2,\mathcal{A}_2,\rightarrow_2,\kappa_2, F_2)$ with initial locations $q_1$ and $q_2$ we define their full interleaving parallel composition $\mathcal{S}_1 \parallel_{S\!A} \mathcal{S}_2$ as the tuple $(\mathcal{Q}_1 \times \mathcal{Q}_2 \times \{0,1,2\},\mathcal{C}_1 \cup \mathcal{C}_2,\mathcal{A}_1 \cup \mathcal{A}_2,\rightarrow,\kappa, F)$ with initial location $(q_1,q_2,0)$, where the third component of a location denotes which of the two SA moved the last step and where \begin{itemize} \item $\rightarrow$ is the smallest relation satisfying \begin{itemize} \item $(q, L}%{\mathbb{A}\mathrm{ct}^\tau, X,q') \in \rightarrow_1$ implies $((q,q_2,b),L}%{\mathbb{A}\mathrm{ct}^\tau, X, (q',q_2,1\{)) \in \rightarrow$ for any $q_2 \in \mathcal{Q}_2$ and $b\in \{0,1,2\}$ and \item $(q, L}%{\mathbb{A}\mathrm{ct}^\tau, X,q') \in \rightarrow_2$ implies $((q_1,q,b),L}%{\mathbb{A}\mathrm{ct}^\tau, X, (q_1,q',2)) \in \rightarrow$ for any $q_1 \in \mathcal{Q}_1$ and $b\in\{0,1,2\}$; \end{itemize} \item $\kappa((q_1,q_2,b)) = \kappa_b(q_b)$ if $b \in \{1,2\}$ and $\kappa((q_1,q_2,0)) = \kappa_1(q_1) \cup \kappa_2(q_2)$, \item $F$ assigns $F_1(c)$ to $c \in \mathcal{C}_1$ and $F_2(c)$ from $c \in \mathcal{C}_2$. \end{itemize} \subsection{Proof of Theorem~\ref{thm:sta-commute}} Let us recall the theorem. \begin{reftheorem}{thm:sta-commute} Let $SA(\mathcal{C})$ denote the stochastic automaton corresponding to a CTMC $\mathcal{C}$. For any CTMC $\mathcal{C}_1, \mathcal{C}_2$, we have $$SA(\mathcal{C}_1) \parallel_{S\!A} SA(\mathcal{C}_1) \;\; \sim \;\; SA(\mathcal{C}_1 \parallel_{CT} \mathcal{C}_1).$$ \end{reftheorem} \begin{proof} It is easy to see that to each location $(s_1,s_2)$ in the system on the right there are three locations of the form $(s_1,s_2,b)$ in the system on the left, that differ only in the third component $b$, i.e. they \begin{itemize} \item have the same set of edges, \item have the same set $Pos(s_1,s_2)$ of clocks that are positive in each location, \end{itemize} and differ only in the sets of clocks $\kappa$ to be re-sampled. We show that $(s_1,s_2) \sim (s_1,s_2,b)$ for any $b \in \{0,1,2\}$ by applying the arguments from the algorithm in Section~\ref{sec:algorithms-infinite}. Let $\bar{\PA}_L$ and $\bar{\PA}_R$ denote the finite systems from Lemma~\ref{lem:expo-finite} obtained from the systems on the left and on the right, respectively. The distribution of clocks in each location $q = (s_1,s_2)$ or $q = (s_1,s_2,b)$ is $q \otimes \bigotimes_{c\in Pos(s_1,s_2)} Exp(\lambda_c)$. Hence, to each state on the right, there are at most 3 reachable states on the left with the same clock distributions. Thanks to the same edges and same clock distributions, these three states are indistinguishable by the \tableau{Step} rule.\qed \end{proof} \section{Proofs from Section~\ref{sec:algorithms}} \subsection{Discrete systems} We use the notation $\mu\oplus_p\nu$ to denote $(1-p)\mu+p\nu$. Further, for a (not necessarily probabilistic) measure $\mu=(\mu(s_1),\ldots,\mu(s_{|\mathit{S}|}))$ we denote $|\mu|=\sum_{i=1}^{|\mathit{S}|}\mu(s_i)$. For any probability distribution $\mu$ thus $|\mu|=1$. \begin{reflemma}{lem:existence} For every linear bisimulation there exists a corresponding bisimulation matrix. \end{reflemma} \begin{proof} Let $R$ be a linear bisimulation and $\Gamma$ an arbitrary equivalence class of $R$. Due to linearity, $\Gamma$ is closed under convex combinations. Consider $\bar\Gamma$ the affine closure of $\Gamma$, i.e. the smallest set that is closed under affine combinations. Then (i) $\bar\Gamma$ is an affine subspace, and (ii) $\bar\Gamma\cap\mathcal{D}}%{\mathcal{M}(\mathit{S})=\Gamma$. This holds for every class of $R$. Hence $\{\bar\Gamma\mid\Gamma \text{ is an equivalence class of } R\}$ decomposes $\mathbb R^{|\mathit{S}|}$ and all $\bar\Gamma$ have the same difference space $\bar\Delta:=\{\mu-\nu\mid \mu,\nu\in\bar\Gamma\}$ (independent of choice of $\Gamma$). Since $\bar\Delta$ is a linear subspace, there is a matrix $E$ such that $\rho\in\bar\Delta$ iff $\rhoE=0$. For every $\mu R \nu$ we thus have $(\mu-\nu)E=0$. In the other direction, let $\mu\in\Gamma$ and $\nu$ be arbitrary distribution such that $(\mu-\nu)E=0$. We thus have $\mu-\nu\in\bar\Delta$. Since $\mu\in\bar\Gamma$ we thus get $\nu\in\bar\Gamma$. Since $\nu\in\mathcal{D}}%{\mathcal{M}(\mathit{S})$, we finally obtain $\nu\in \Gamma$ and thus $\mu R \nu$. \qed \end{proof} \begin{lemma}\label{lem:linearity} $\sim$ is linear. \end{lemma} \begin{proof} We prove that $\mu_1\sim\nu_1$ and $\mu_2\sim\nu_2$ imply $\mu_1\oplus_p\mu_2\sim\nu_1\oplus_p\nu_2$ for any $p\in[0,1]$. This follows easily from the Spoiler-Duplicator game. Indeed, let Duplicator have a winning response to every Spoiler's strategy both in $\mu_1\sim\nu_1$ and $\mu_2\sim\nu_2$. Let now $p\in[0,1]$. Any Spoiler's strategy on $\mu_1\oplus_p\mu_2\sim\nu_1\oplus_p\nu_2$ (w.l.o.g.\ attacking on the left under $A$) can be decomposed to a part acting on $(1-p)\mu_1$ resulting into $\Big((1-p)\mu_1(S_A),(1-p)\mu_1'\Big)$ and a part acting on $p\nu$ resulting into $\Big(p\mu_2(S_A),p\mu_2'\Big)$. Duplicator has a winning response $\nu_1'$ to the former (when applied to the whole $\mu_1$) and also $\nu_2'$ to the latter (when applied to the whole $\mu_2$). Duplicator can now mix his responses resulting into $\nu_1'\oplus_p\nu_2'$, which is clearly a choice conforming both to the rules, since $(\nu_1\oplus_p\nu_2)(S_A)=(1-p)\nu_1(S_A)+p\nu_2(S_A)=(1-p)\mu_1(S_A)+p\mu_2(S_A)=(\mu_1\oplus_p\mu_2)(S_A)$ and also winning as the resultinig pair is again a convex combination of individual resulting pairs.\qed \end{proof} Thus minimal bisimulation matrices always exist. \begin{corollary} There is a minimal bisimulation matrix, i.e.\ a matrix $E$ such that for any $\mu,\nu\in\mathcal{D}}%{\mathcal{M}(\mathit{S})$, we have $\mu\sim\nu$ iff $(\mu-\nu)E=0$. \end{corollary} We are searching for the least restrictive system $E$ satisfying stability. Therefore, we can compute $\sim$, i.e.\ the greatest fixpoint of the bisimulation requirement of stability, as the least fixpont of the partitioning procedure of adding equations. Indeed, recall that all bisimulation matrices with the least possible dimension have the same solution space. \begin{refproposition}{prop-alg-determ} In an action deterministic PA, $E$ containing $\vec 1$ is a bisimulation matrix iff it is $P_a$-stable for all $a\in\mathit{L}$. \end{refproposition} \begin{proof} Firstly, we prove that for any $a\in\mathit{L}$, any bisimulation matrix $E$ is $P_a$-stable. Let $\rho$ be such that $\rhoE=0$. Let us write $\rho=\mu-\nu$ where entries in $\mu$ and $\nu$ are non-negative. Since $E$ contains $\vec1$, we have $|\mu|=|\nu|$, moreover, for the moment assumed, equal 1. Then $\rho$ is a difference of two measures $\mu-\nu$. Since $E$ is a bisimulation matrix, we have $\mu\sim\nu$. Therefore, if Spoiler attacks under $a$, we have $\mu P_a\sim\nu P_a$. Therefore, $(\mu P_a-\nu P_a)E=0$, equivalently $\rho P_aE=0$. In the general case, where $|\mu|=|\nu|$ is not equal $1$, we can egard them as a scalar multiples of measures, normalize them, and use the same reasoning (with the exception when they are $\vec0$, in which case the claim for $\rho=\vec0$ holds trivially). Secondly, let $E$ contain $\vec1$ and be $P_a$-stable for all $a\in\mathit{L}$. We show that $R$ defined by $\mu R\nu$ iff $(\mu-\nu)E=0$ is a bisimulation relation. Consider now $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ singletons. The first bisimulation condition for $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$ follows from $(\mu-\nu)P_a\vec1=0$. The second one then from $(\mu-\nu)P_aE=0$ implying $(\mu P_a-\nu P_a)E=0$ by stability. For general $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$, the bisimulation condition does not generate any new requirements due to the action determinism. Since $S_A$ is a disjoint union of $S_a$ for $a\in A$, the properties follow from the properties of singeltons. \qed \end{proof} We recall that for elements of $\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ are tuples of corners of $C_i$'s that are ``extremal in the same direction.'' Formally, we say a point $p$ is \emph{extremal in direction $d$} (in a polytope $P$) if $d$ is a normal vector of a separating hyperplane containing only $p$ from the whole $P$ and such that $p+d$ lies in the other half-space than $P$. Intuitively, elements of $\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ are those tuples of corners that form corners of ``combinations'' of $C_i$'s. Formally, denote the $|\mathit{S}|$-dimensional vector of $C_i$'s by $\vec C$. For a distribution $\mu$, the ``$\mu$-combination of polytopes $C_i$'' is the polytope $$\mu\vec C^\top = \{\sum_{i=1}^{|\mathit{S}|} \mu(s_i){c_i} \mid \forall i: {c_i}\in C_i\}$$ The corners $\mathcal E(\mu\vec C^\top)$ are then exactly $\{\mu c^\top\mid c\in \mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})\}$. Further, we call that a choice is \emph{extremal} if it can be written as $W(c)$ for some extremal $c$, i.e. $c\in \mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$. Note that these points are mapped to pure strategies and achieve \emph{Pareto extremal values} when applied to any distributions, i.e. $\mu c^\top$ is a corner of $\mu \vec C^\top$ for every distribution $\mu$. \begin{refproposition}{prop-alg-nondeterm} $E$ containing $\vec 1$ is a bisimulation matrix iff the matrix is $P_A^{W(c)}$-stable for all $A\subseteq\mathit{L}$ and $c\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$. \end{refproposition} \begin{proof} Observe that if $\mu\sim\nu$ then $\mu\vec C^\top$ and $\nu \vec C^\top$ are the same polytopes. Indeed, for every choice on one side there must be a choice on the other side matching in all components. Conversely, if $\mu\vec C^\top\neq\nu \vec C^\top$ then $\mu\not\sim\nu$ as Spoiler can choose a vector that cannot be matched by Duplicator. Note that equality of polytopes $\mu\vec C^\top$ and $\nu \vec C^\top$ can be tested by equality of the sets of their extremal points. The extremal points are exactly points $\mu \vec c^\top$ and $\nu \vec c^\top$ for $\vec c\in \mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$. Hence we prove the two following facts: \begin{enumerate} \item[(1)] the extremal choices, i.e.\ $\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$, are sufficient for Spoiler, \item[(2)] for an extremal choice $W\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ of Spoiler, $W$ is an optimal reply of Duplicator for any distributions $\mu$ and $\nu$. \end{enumerate} As to (1), intuitively, if two polytopes are different, there must be a corner of one not in the other by convexity of the polytopes. Formally, for given $\mu\not\sim\nu$, $\mu\vec C^\top\neq\nu \vec C^\top$ and an optimal choice of Spoiler is a $W(c)$ such that $\mu c^\top\notin \nu \vec C^\top$ (or the other way round, $\nu c^\top\notin \mu \vec C^\top$). Such a choice can be done so that $\mu M_A^{W(c)}$ is Pareto extremal hence corner of $\mu\vec C^\top$. As to (2), intuitively, if two polytopes are the same and Spoiler checks whether a corner $c_1$ of one is also a corner of the other, Duplicator has to answer with a corner $c_2$ that is extremal in the same direction as $c_1$. Formally, let $\mu\sim\nu$ and $W(s)$ be an extremal choice of Spoiler on $\mu$, $W(d)$ an optimal (winning) response of Duplicator on $\nu$ supposed, for a contradiction, different from $W(s)$. Since $s$ is extreme in some direction $v$ for which $d$ is not, and since $W(s)$ achieves on $\mu$ the same as $W(d)$ on $\nu$, there is a choice $W(d')$ where $d'$ is extremal in direction $v$ and thus achieves strictly better Pareto value on $\nu$ than $d$, hence also strictly better (in direction $v$) than $W(s)$ on $\mu$. Now if Spoiler moved from $\nu$ by $W(d)$ a matching response would be $W(s)$. On the other hand, if Spoiler moved from $\nu$ by $W(d')$, this choice strictly dominates $W(d)$ on $\nu$ (in direction $v$) and thus all choices on $\mu$ (in direction $v$) as $s$ is extremal in direction $v$. Hence there is no matching response for the Duplicator, a contradiction. \medskip As a result of (1) and (2), the bisimulation matrix requirement can be simplified. In the game fashion it is written as follows: for all $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ \begin{multline*} (\mu - \nu)E=0 \implies\forall W_S\in\mathcal W: \exists W_D\in\mathcal W:\\\mu P_A^{W_S}\vec1=\nu P_A^{W_D}\vec1 \wedge (\mu P_A^{W_S} - \nu P_A^{W_D})E=0 \end{multline*} Now we can transform it into: for all $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ \begin{multline*} (\mu - \nu)E=0 \implies\forall W\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|}):\\ (\mu P_A^{W}-\nu P_A^{W})\vec1=0 \wedge (\mu P_A^{W} - \nu P_A^{W})E=0 \end{multline*} and since $\vec1$ is a column of $E$, we can also write it equivalently as: for all $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$ $$(\mu - \nu)E=0 \implies \forall W\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|}): (\mu - \nu) P_A^{W} E=0$$ which is nothing but $P_A^{W(c)}$-stability for all $A\subseteq\mathit{L}$ and $c\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$. (We deal with $\rho$ not being a difference of any two distributions by scaling as in Proposition~\ref{prop-alg-determ}). \qed \end{proof} \begin{corollary} Any matrix $P_A^{W(c)}$-stable for all $A\subseteq\mathit{L}$ and $c\in\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ and containing $\vec 1$ with minimal rank is a minimal bisimulation matrix. \end{corollary} \begin{reftheorem}{thm:algorithm-finite} Algorithm~\ref{alg-fin} computes a minimal bisimulation matrix in exponential time. \end{reftheorem} \begin{proof} The proof follows from the previous corollary and the fact that the algorithm only adds columns required by stability on the current partitioning. Concerning the complexity, each step is polynomial except for computing and iterating over all exponentially many extremal choices and exponentially many sets of labels. The extremal points $\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ can be computed easily: firstly, we identify which directions the corners of each $C_i$ are extremal for. The elements of $\mathcal E(\bigC)}%{\mathcal E(C_{1..|\mathit{S}|})$ are combinations of corners etremal in the same direction. Therefore, we only need to compute the common partitioning of the directions according to extremality w.r.t. each corner.\qed \end{proof} \subsection{Continuous-time systems} \label{app:infinite} Let us repeat the main theorem of the subsection. \begin{reftheorem}{thm:tableau} Let $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ be a deterministic SA over exponential distributions. There is an algorithm to decide in time polynomial in $|\mathcal{S}|$ and exponential in $|\mathcal{C}|$ whether $q_1 \sim q_2$ for any locations $q_1,q_2$. \end{reftheorem} \noindent The proof follows easily from the following lemmata. \begin{reflemma}{lem:abs} For any distributions $\mu, \nu$ on $S$ we have $\mu \sim \nu$ iff $\xi(\mu) \sim \xi(\nu)$. \end{reflemma} \begin{proof} $\Rightarrow$: Let us take the maximal bisimulation in $\mathbf{P}_\mathcal{S}$. We map it by $\xi$; it is easy to see that it is still a bisimulation since the operations $\xi$ and $\patransa{A}$ commute for any $A\subseteq \mathit{L}$: for any distribution $\mu$, we have $\mu(\mathit{S}_A) = \xi(\mu)(\mathit{S}_A)$, and the unique distributions $\mu',\mu''$ such that $\mu \patransa{A} \mu'$ and $\xi(\mu) \patransa{A} \mu''$ satisfy $\mu'' = \xi(\mu')$. $\Leftarrow$: Let us take $\mu$, $\nu$ such that $\mu \not\sim\nu$. Then there is a finite sequence of set of labels $A_1, \ldots, A_n$, such that after applying this sequence, one of the conditions in Definition~\ref{def:infinite-bisim} is not satisfied. Again, as the operations $\xi$ and $\patransa{A}$ commute for any $A\subseteq \mathit{L}$, we get that also $\xi(\mu) \not\sim \xi(\nu)$. \qed \end{proof} \begin{reflemma}{lem:expo-finite} For a deterministic SA over exponential distributions, $|\bar{S}| \leq |\mathcal{Q}|2^{|\mathcal{C}|}$. \end{reflemma} \begin{proof} It is easy to check that for all states of the form $q \otimes \bigotimes_{c\inX \subseteq \mathcal{C}} Exp(\lambda_c)$, any successor in $\bar{\PA}$ has the same form. Let us fix a state of such a form $q \otimes \bigotimes_{c \in X} Exp(\lambda_c)$ and \begin{itemize} \item an edge $(q,L}%{\mathbb{A}\mathrm{ct}^\tau, X', q')$ such that $X \cap X' = \{c'\}$ (i.e. exactly one clock from the trigger set is still positive). The successor state is of the form $q' \otimes \bigotimes_{c \in (X\setminus \{c'\}) \cup \kappa(q')} Exp(\lambda_c)$. Indeed, the distribution $P[X - Y > t \mid X > Y]$ for $X \sim Exp(\lambda)$ and $Y \sim Exp(\mu)$ is still exponentially distributed with rate $\lambda$. \item an edge of the general form $(q,L}%{\mathbb{A}\mathrm{ct}^\tau, X', q')$ such that $X \cap X' \neq \emptyset$ (i.e. some clocks from the trigger set are still positive) can be split into a diamond of edges among intermediate states when each clock from the set $X \cap X'$ runs down to zero, each of the intermediate states are of the specified form.\qed \end{itemize} \end{proof} \begin{reflemma}{prop:correctness} There is a successful tableau from $\mu \sim \nu$ iff $\mu \sim \nu$ in $\hat{\pts}$. Moreover, the set of nodes of a successful tableau is a subset of a bisimulation. \end{reflemma} \begin{proof} $\Leftarrow$: We can build an infinite successful tableau only using the rule \tableau{Step}. Note that the rule exactly follows the transition relation of $\hat{\pts}$ (only regards the distribution as a discrete convex combination of one of finitely many distributions -- states of $\bar{\PA}$). Hence, by applying the rule \tableau{Step} from bisimilar distributions, we can obtain only tableau nodes corresponding to bisimilar distributions never reaching a failure node. $\Rightarrow$: First, observe that if there is a successful tableau $T$ from node $\mu \sim \nu$, there also is a successful (possibly infinite) tableau $T'$ using only the rule \tableau{Step}. This is easy to observe since whenever there is an application of the \tableau{Lin} rule, one can iteratively apply the \tableau{Step} rule infinitely many times (since one can express the current node as a linear combination of nodes from which one can apply the \tableau{Step} rule; and the same inductively holds for each such successor node). Note that by this construction, the set of nodes of $T$ is a subset of the set of nodes of $T'$. We show that for any node $\mu_1 \sim \mu_2$ in $T'$ we have $\mu_1 \sim \mu_2$ in $\hat{\pts}$.\ Let us fix such a node $\mu_1 \sim \mu_2$ and let $R$ be a relation such that $\mu_1' R \mu_2'$ if $\mu_1' \sim \mu_2'$ is an ancestor of the node $\mu_1 \sim \mu_2$. Since the rule \tableau{Step} closely follows the definition of bisimulation, it is easy to see that $R$ is a bisimulation. As $R$ contains also $(\mu_1,\mu_2)$, we have $\mu_1 \sim \mu_2$. \qed \end{proof} \begin{reflemma}{lem:finite-tableau} There is a successful tableau from $\mu \sim \nu$ iff there is a finite successful tableau from $\mu \sim \nu$ of size polynomial in $|\bar{S}|$. \end{reflemma} \begin{proof} The implication $\Leftarrow$ is trivial. As regards $\Rightarrow$, let us assume that there is a successful tableau from $\mu \sim \nu$. As each node in the tableau corresponds to a vector of dimension $|\bar{S}|$, the maximal size of a set of linearly independent nodes is $|\bar{S}|$. By applying the rule \tableau{Lin} when possible we can prune the tableau into linear size. \qed \end{proof} Note that we not only have a polynomial bound on the size of a successful tableau, we also have a deterministic polynomial time procedure to construct such a tableau. We build the tableau in arbitrary fixed order (such as breath-first) For each node, we first check whether the \tableau{Lin} rule can be applied; if not, we apply the \tableau{Step} rule. This concludes the proof of Theorem~\ref{thm:tableau}. \section{Applications} \label{sec:applications} \label{sec:applications!} We now argue by some concrete application domains that the distribution view on bisimulation yields a fruitful notion. \subsection{Memoryless vs. memoryfull continuous time.} \label{sec:applications-sa} First, we reconsider the motivating discussion from Section~\ref{sec:intro} revolving around the difference between continuous time represented by real-valued clocks, respectively memoryless stochastic time. \iffalse For Markovian continuous-time systems, such as continuous-time Markov chains~\cite{DBLP:conf/cav/BaierHHK00}, or interactive Markov chains~\cite{DBLP:conf/fmco/HermannsK09}, existing bisimulations treat continuous-time completely symbolically -- making use of elegant mathematical properties of the exponential distribution. This approach is hard to extend to non-Markovian systems~\cite{DBLP:journals/iandc/DArgenioK05}, such as generalized semi-Markov processes or stochastic automata. We can define a bisimulation for non-Markovian systems directly on their semantics defined by uncountable \emph{discrete-time} Markov systems~\cite{DBLP:journals/iandc/DArgenioK05,whitt1980continuity,haas2002stochastic}. This translation of continuous-time systems into continuous-space semantics is done by storing the arrival times of the relevant part of \emph{future} events in the state space. As the future randomness is again something intrinsically \emph{unobservable}, our distribution-based approach is crucial. It yields a natural bisimulation which is coarser than existing bisimulations for non-Markovian systems~\cite{DBLP:journals/iandc/DArgenioK05}. \fi For this we introduce a simple model of \emph{stochastic automata}~\cite{DBLP:journals/iandc/DArgenioK05}. \begin{definition} A \emph{stochastic automaton (SA)} is a tuple $\mathcal{S} = (\mathcal{Q},\mathcal{C},\mathcal{A},\rightarrow,\kappa, F)$ where $\mathcal{Q}$ is a set of locations, $\mathcal{C}$ is a set of clocks, $\mathcal{A}$ is a set of actions, $\rightarrow \;\subseteq \mathcal{Q} \times \mathcal{A} \times 2^\mathcal{C} \times \mathcal{Q}$ is a set of edges, $\kappa: \mathcal{Q} \to 2^\mathcal{C}$ is a clock setting function, and $F$ assigns to each clock its distribution over $\mathbb{R}_{\ge 0}$. \end{definition} Avoiding technical details, $\mathcal{S}$ has the following NLMP semantics $\mathbf{P}_\mathcal{S}$ with state space $S = \mathcal{Q} \times \mathbb{R}^\mathcal{C}$, assuming it is initialized in some location $q_0$: When a location $q$ is entered, for each clock $c \in \kappa(q)$ a positive \emph{value} is chosen randomly according to the distribution $F(c)$ and stored in the state space. Intuitively, the automaton idles in location $q$ with all all clock values decreasing at the same speed until some edge $(q,a,X,q')$ becomes \emph{enabled}, i.e. all clocks from $X$ have value $\leq 0$. After this \emph{idling time} $t$, the action $a$ is taken and the automaton enters the next location $q'$. If an edge is enabled on entering a location, it is taken immediately, i.e. $t=0$. If more than one edge become enabled simultaneously, one of them is chosen non-deterministically. Its formal definition is given in~the appendix. We now are in the position to harvest Definition~\ref{def:infinite-bisim}, to arrive at the novel bisimulation for stochastic automata. \begin{definition}\label{def:bisim-sta} We say that locations $q_1,q_2$ of an SA $\mathcal{S}$ are \emph{probabilistic bisimilar}, denoted $q_1 \sim q_2$, if $\mu_1 \sim \mu_2$ in $\mathbf{P}_\mathcal{S}$ where each $\mu_i$ corresponds\holger{what is 'corresponds'? Is it the joint distribution with a marginal that is Dirac on $q_i$ and so forth?}\jena{not only this, the distribution is obtained as a product of these marginals, i.e. they are independent.}\holger{I was expecting so, but is this clear by the word 'corresponds'?} to the location being $q_i$, any $c \not\in \kappa(q_i)$ being $0$, and any $c \in \kappa(q_i)$ being independently set to a random value according to $F(c)$. \end{definition} This bisimulation identifies $q$ and $q'$ from Section~\ref{sec:intro} unlike any previous bisimulation on SA~\cite{DBLP:journals/iandc/DArgenioK05}. In Section~\ref{sec:algorithms-infinite} we discuss how to compute this bisimulation, despite being continuous-space. Recall that the model initialized by $q$ is obtained by first translating two simple CTMC, and then applying the natural interleaving semantics, while the model, of $q'$ is obtained by first applying the equally natural CTMC interleaving semantics prior to translation. The bisimilarity of these two models generalizes to the whole universe of CTMC and SA: \begin{theorem}\label{thm:sta-commute} Let $SA(\mathcal{C})$ denote the stochastic automaton corresponding to a CTMC $\mathcal{C}$. For any CTMC $\mathcal{C}_1, \mathcal{C}_2$, we have $$SA(\mathcal{C}_1) \parallel_{S\!A} SA(\mathcal{C}_1) \;\; \sim \;\; SA(\mathcal{C}_1 \parallel_{CT} \mathcal{C}_1).$$ \end{theorem} Here, $\parallel_{CT}$ and $\parallel_{S\!A}$ denotes the interleaving parallel composition of SA~\cite{DBLP:journals/iandc/DArgenioK05a} (echoing TA parallel composition) and CTMC~\cite{pepa,HHM} (Kronecker sum of their matrix representations), respectively. \holger{dropped poulation model discussion. Do not understand it. It smells like you are discussing the (deterministic) ``mean field'' of an infinite population Markov models. Do you?} \todo{Jan: basically yes. have they defined bisimulation for that? depends how the deterministic system is defined. what do they do when some processes cannot execute the action? (die? wait? something else?)} \holger{Notably, in the mean-field world I know of, the population models in this context are CT(!)MCs, and actions, resource contention, nondeterminism and the like are difficult to formulate, unless you move out of the CTMC realm....} \jena{Yes, mean-field is continuous-time. However, the references I digged out talk about discrete-time models. Mentioning shorter in Further applications is perfect for me.}\holger{Am unable to do it, because I do not understand.} \iffalse \subsection{Bisimulation for large-population models.} In the sense of~\cite{DBLP:journals/deds/GastG11,DBLP:journals/tcs/McCaigNS11,may1974biological,jovanovic1988anonymous}, we can understand PA as a description of one \emph{agent} in a large homogeneous population of e.g. \emph{chemical compounds}, \emph{nodes of a computer grid}, or \emph{customers of a chain store}. Here the distribution perspective is a natural one: the distribution specifies the ratios of agents being currently in the individual states. For a Markov chain, this gives a deterministic process over the continuous space of distributions. The non-determinism of PA has also a natural interpretation. Labels given to this large population of PAs correspond to global control actions~\cite{DBLP:journals/tac/GastGB12,DBLP:journals/deds/GastG11} such as \emph{manipulation with the chemical solution}, a \emph{broadcast within the grid}, or a \emph{marketing campaign of the chain store}. Agents react to this control action if currently in a state with transition under this label. Multiple transitions under this label correspond to multiple ways how the agent may react. \fi \subsection{Bisimulation for partial-observation MDP (POMDP).} A POMDP is a quadruple $\mathcal{M}=(\mathit{S},\mathit{L},\delta, \mathcal{O})$ where (as in an MDP) $\mathit{S}$ is a set of states, $A$ is a set of actions, and $\delta: \mathit{S} \times \mathcal{A} \to \mathcal{D}(\mathit{S})$ is a transition function. Furthermore, $\mathcal{O} \subseteq 2^\mathit{S}$ partitions the state space. The choice of actions is resolved by a policy yielding a Markov chain. Unlike in an MDP, such choice is not based on the knowledge of the current state, only on knowing that the current state belongs into an \emph{observation} $o \in \mathcal{O}$. POMDPs have a wide range of applications in robotic control, automated planning, dialogue systems, medical diagnosis, and many other areas~\cite{DBLP:journals/aamas/ShaniPK13}. In the analysis of POMDP, the distributions over states, called \emph{beliefs}, arise naturally. They allow for transforming the POMDP $\mathcal{M}$ into a fully observable NLMP $D_{\mathcal{M}} = (\mathit{S},\mathcal{O}, \longrightarrow)$ with continuous space, by setting $(s, \patransa{o},\mu) \in \longrightarrow$ if $s \in o$ and $\delta(s,a) = \mu$ for some $a\in A$. Although probabilistic bisimulations over beliefs have been already considered~\cite{DBLP:conf/ijcai/CastroPP09,DBLP:conf/nfm/JansenNZ12}, no connection of this particular case to general probabilistic bisimulation has been studied. We can set $\mu \sim \mu'$ in $\mathcal{M}$ if $\mu \sim \mu'$ in $D_\mathcal{M}$. In Section~\ref{sec:algorithms-finite}, we shall provide an algorithm for computing bisimulations over beliefs in finite POMDP. Previously, there was only an algorithm~\cite{DBLP:conf/nfm/JansenNZ12} for computing bisimulations on distributions of Markov \emph{chains} with partial observation. \subsection{Further applications.} Probabilistic automata are especially apt for compositional modelling of \emph{distributed systems}. The only information a component in a distributed system has about the current state of another component stems from their mutual communication. Therefore, each component can be also viewed from the outside as a partial-observation system. Thus, also in this context, distribution bisimulation is a natural concept. Furthermore we can understand a PA as a description, in the sense of~\cite{DBLP:journals/deds/GastG11,may1974biological}, of a representative \emph{agent} in a large homogeneous \emph{population}. The distribution view then naturally represents the ratios of agents being currently in the individual states and labels given to this large population of PAs correspond to global control actions~\cite{DBLP:journals/deds/GastG11}. For more details on applications, see~the appendix. \todo{explain more} \todo{mention the population model thougts.} \section{Probabilistic bisimulation on distributions} \label{sec:bisim} A (potentially uncountable) set $S$ is a \emph{measurable space} if it is equipped with a $\sigma$-algebra, which we denote by $\Sigma(X)$. The elements of $\Sigma(X)$ are called \emph{measurable sets}. For a measurable space $S$, let $\mathcal{D}}%{\mathcal{M}(S)$ denote the set of \emph{probability measures} (or \emph{probability distributions}) over $S$. The following definition is similar to the treatment of~\cite{phd-cont}. \todo{``The following definition is similar to the treatment of~\cite{phd-cont}.'' to related work?} \jena{I'd prefer to keep it here. We did not come up with this Def.} \begin{definition} A \emph{non-deterministic labelled Markov process} (NLMP) is a tuple $\mathbf{P} = (\mathit{S}, \mathit{L}, \{\tau_a \mid a\in\mathit{L}\})$ where $\mathit{S}$ is a measurable space of \emph{states}, $\mathit{L}$ is a measurable space of \emph{labels}, and $\tau_a : \mathit{S} \to \sigmafield(\measures(\states))$ assigns to each state $s$ a measurable set of probability measures $\tau_a(s)$ available in $s$ under $a$.% \footnote{We further require that for each $s \in \mathit{S}$ we have $\{(a,\mu) | \mu \in \tau_a(s)\} \in \sigmafield(\labels) \otimes \sigmafield(\measures(\states))$ and for each $A \in \sigmafield(\labels)$ and $Y \in \sigmafield(\measures(\states))$ we have $\{s\in\mathit{S} \mid \exists a \in A. \tau_a(s) \cap Y \neq \emptyset \}\in \sigmafield(\states)$. Here $\sigmafield(\measures(\states))$ is the Giry $\sigma$-algebra~\cite{giry1982categorical} over $\mathcal{D}}%{\mathcal{M}(X)$.} \end{definition} When in a state $s\in\mathit{S}$, NLMP reads a label $a \in \mathit{L}$ and \emph{non-deterministically} chooses a successor distribution $\mu\in\mathcal{D}}%{\mathcal{M}(\mathit{S})$ that is in the set of convex combinations\footnote{ A distribution $\mu \in\mathcal{D}}%{\mathcal{M}(\mathit{S})$ is a \emph{convex combination} of a set $M \in \sigmafield(\measures(\states))$ of distributions if there is a measure $\nu$ on $\mathcal{D}}%{\mathcal{M}(\mathit{S})$ such that $\nu(M) = 1$ and $\mu = \int_{\mu'\in\mathcal{D}}%{\mathcal{M}(\mathit{S})} \mu' \nu(\de{\mu'})$. } over $\tau_a(s)$, denoted by $s \patransa{a} \mu$. If there is no such distribution, the process halts. Otherwise, it moves into a successor state according to $\mu$. Considering convex combinations is necessary as it gives more power than pure resolution of non-determinism \cite{Segala:1996:MVR:239648}. \begin{example} If all sets are finite, we obtain \emph{probabilistic automata (PA)} defined \cite{Segala:1996:MVR:239648} as a triple $(\mathit{S}, \mathit{L}, \longrightarrow)$ where $\mathord{\longrightarrow} \subseteq \mathit{S} \times \mathit{L} \times \mathcal{D}(\mathit{S})$ is a probabilistic transition relation with $(s,a,\mu)\in\mathord{\longrightarrow}$ if $\mu\in\tau_a(s)$. \end{example} \begin{example}\label{ex:cont} In the continuous setting, consider a random number generator that also remembers the previous number. We set $\mathit{L}=[0,1]$, $\mathit{S} = [0,1]\times[0,1]$ and $\tau_{x}(\langle \textit{new},\textit{last}\rangle) = \{\mu_{x}\}$ for $x=\mathit{new}$ and $\emptyset$ otherwise, where $\mu_{x}$ is the uniform distribution on $[0,1]\times \{x\}$. If we start with a uniform distribution over $S$, the measure of successors under any $x\in\mathit{L}$ is $0$. Thus in order to get any information of the system we have to consider successors under sets of labels, e.g.\ intervals. \end{example} For a measurable set $A\subseteq\mathit{L}$ of labels, we write $s \patransa{A} \mu$ if $s \patransa{a} \mu$ for some $a\inA$, and denote by $\mathit{S}_A := \{s \mid \exists \mu: s \patransa{A} \mu\}$ the set of states having some outgoing label from $A$. Further, we can lift this to probability distributions by setting $\mu \patransa{A} \nu$ if $\nu = \frac{1}{\mu(\mathit{S}_A)}\int_{s \in\mathit{S}_A} \nu_s\ \mu(d\,s)$ for some measurable function assigning to each state $s\in\mathit{S}_A$ a measure $\nu_s$ such that $s \patransa{A} \nu_s$. Intuitively, in $\mu$ we restrict to states that do not halt under $A$ and consider all possible combinations of their transitions; we scale up by $\frac{1}{\mu(\mathit{S}_A)}$ to obtain a distribution again. \jena{maybe citing hyper-transitions from the PhD thesis Pedro has sent} \begin{example} In the previous example, let $\upsilon$ be the uniform distribution. Due to the independence of the random generator on previous values, we get $\upsilon\patransa{[0,1]}\upsilon$. Similarly, $\upsilon\xrightarrow{[0.1,0.2]}\upsilon_{[0.1,0.2]}$ where $\upsilon_{[0.1,0.2]}$ is uniform on $[0,1]$ in the first component and uniform on $[0.1,0.2]$ in the second component, with no correlation. \end{example} Using this notation, a non-deterministic and probabilistic system such as NLMP can be regarded as a non-probabilistic, thus solely non-deterministic\holger{how about this?}, labelled transition system over the uncountable space of probability distributions. The natural bisimulation from this distribution perspective is as follows. \begin{definition}\label{def:infinite-bisim} Let $(\mathit{S}, \mathit{L}, \{\tau_a \mid a \in \mathit{L}\})$ be a NLMP and $R \subseteq \mathcal{D}}%{\mathcal{M}(\mathit{S}) \times \mathcal{D}}%{\mathcal{M}(\mathit{S})$ be a symmetric relation. We say that $R$ is a (strong) \emph{probabilistic bisimulation} if for each $\mu \, R \, \nu$ and measurable $A\subseteq\mathit{L}$ \begin{enumerate} \item $\mu(\mathit{S}_A) = \nu(\mathit{S}_A)$, and \item for each $\mu \patransa{A} \mu'$ there is a $\nu \patransa{A} \nu'$ such that $\mu' \, R \, \nu'$. \end{enumerate} We set $\mu \sim \nu$ if there is a probabilistic bisimulation $R$ such that $\mu \, R \, \nu$. \end{definition} \begin{example}\label{ex:cont2} Considering Example~\ref{ex:cont}, states $\{x\}\times[0,1]$ form a class of $\sim$ for each $x\in[0,1]$ as the old value does not affect the behaviour. More precisely, $\mu\sim\nu$ iff marginals of their first component are the same. \end{example} \noindent\textbf{Naturalness.} Our definition of bisimulation is not created ad-hoc as it often appears for relational definitions, but is actually an instantiation of the standard bisimulation for a particular \emph{coalgebra}. Although this aspect is not necessary for understanding the paper, it is another argument for naturalness of our definition. For reader's convenience, we present a short introduction to coalgebras and the formal definitions in the appendix. Here we only provide an intuitive explanation by example. Non-deterministic labelled transition systems are essentially given by the transition function $\mathit{S}\to \mathcal P(\mathit{S})^L}%{\mathbb{A}\mathrm{ct}^\tau$; given a state $s\in \mathit{S}$ and a label $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$, we can obtain the set of the successors $\{s'\in\mathit{S}\mid s\tran{a}s'\}$. The transition function corresponds to a coalgebra, which induces a bisimulation coinciding with the classical one of Park and Milner \cite{DBLP:books/daglib/0067019}. Similarly, PA are given by the transition function $\mathit{S}\to \mathcal P(\mathcal{D}}%{\mathcal{M}(\mathit{S}))^L}%{\mathbb{A}\mathrm{ct}^\tau$; instead of successors there are distributions over successors. Again, the corresponding coalgebraic bisimulation coincides with the classical ones of Larsen and Skou~\cite{DBLP:conf/popl/LarsenS89} and Segala and Lynch \cite{DBLP:conf/concur/SegalaL94}. In contrast, our definition can be obtained by considering states $\mathit{S}'$ to be distributions in $\mathcal{D}}%{\mathcal{M}(S)$ over the original state space and defining the transition function to be $\mathit{S}'\to ([0,1]\times\mathcal P(\mathit{S}'))^{\Sigma(L}%{\mathbb{A}\mathrm{ct}^\tau)}$. The difference to the standard non-probabilistic case is twofold: firstly, we consider all measurable sets of labels, i.e.\ all elements of $\Sigma(L}%{\mathbb{A}\mathrm{ct}^\tau)$; secondly, for each label set we consider the mass, i.e.\ element of $[0,1]$, of the current state distribution that does not deadlock\holger{reread this}, i.e.\ can perform some of the labels. These two aspects form the crux of our approach and distinguish it from other approaches. \section{Probabilistic bisimulation on distributions} \label{sec:bisim} For the sake of readability we start with discrete systems, then we extend the setting to continuous systems. For a set $S$, let $\mathcal{D}(S)$ denote the set of simple distributions, i.e.\ functions $f:S\to[0,1]$ such that $f$ is non-zero for only finitely many elements $s_1,\ldots,s_n$ and $\sum_{i=1}^n f(x_i)=1$. The set of \emph{probability measures} over $S$ (equipped with a $\sigma$-algebra clear from context) is denoted by $\mathcal{D}}%{\mathcal{M}(S)$. \subsection{Discrete systems} Let us first define an important example of a formalism of discrete non-deterministic probabilistic labelled systems. \begin{definition} \emph{Probabilistic automaton (PA, \cite{Segala:1996:MVR:239648})} is a triple $(\mathit{S}, \mathit{L}, \longrightarrow)$ where $\mathit{S}$ is a finite set of \emph{states}, $\mathit{L}$ is a finite set of \emph{labels}, and $\longrightarrow \; \subseteq \mathit{S} \times \mathit{L} \times \mathcal{D}(\mathit{S})$ is a finite \emph{probabilistic transition relation}. \end{definition} When in a state $s\in\mathit{S}$, PA reads a label $a \in \mathit{L}$ and \emph{non-deterministically} chooses a successor distribution $\mu$ that is in the set of convex combinations over the set $\{\nu \mid (s,a,\nu) \in \longrightarrow \}$, denoted by $s \patransa{a} \mu$. If there is no such distribution, the automaton halts. Otherwise, it moves into a successor state according to $\mu$. Considering convex combinations is necessary as it gives more power than pure resolution of non-determinism \cite{Segala:1996:MVR:239648}. A probabilistic automaton may also be viewed as a \emph{non-deterministic} labelled transition system over the uncountable space of probability distributions. As each state supported by a probability distribution may have transitions for different labels, we need to work with arbitrary \emph{subsets} $A\subseteq\mathit{L}$ of labels\jena{this is a bit fake argument}: we write $s \patransa{A} \mu$ if $s \patransa{a} \mu$ for some $a\inA$, and denote by $\mathit{S}_A := \{s \mid \exists \mu: s \patransa{A} \mu\}$ the set of states having some label from $A$. For probability distributions $\mu, \nu \in \mathcal{D}(\mathit{S})$ and $A \subseteq \mathit{L}$, we set $\mu \patransa{A} \nu$ if $\nu = \frac{1}{\mu(\mathit{S}_A)}\sum_{s \in\mathit{S}_A} \mu(s) \cdot \nu_s$ for some distribution $s \patransa{A} \nu_s$ for each $s \in \mathit{S}_A$. Intuitively, we restrict in the transition to states that do not halt and consider all possible combinations of their transitions; we scale up by $\frac{1}{\mu(\mathit{S}_A)}$ to obtain a distribution again. The natural bisimulation from this distribution perspective is as follows. \begin{definition}\label{def:finite-bisim} Let $(\mathit{S}, \mathit{L}, \longrightarrow)$ be a PA and $R \subseteq \mathcal{D}(\mathit{S}) \times \mathcal{D}(\mathit{S})$ be a symmetric relation. We say that $R$ is a (strong) \emph{probabilistic bisimulation} if for each $\mu \, R \, \nu$ and $A\subseteq\mathit{L}$ \begin{enumerate} \item $\mu(\mathit{S}_A) = \nu(\mathit{S}_A)$, and \item for each $\mu \patransa{A} \mu'$ there is a $\nu \patransa{A} \nu'$ such that $\mu' \, R \, \nu'$. \end{enumerate} We set $\mu \sim \nu$ if there is a probabilistic bisimulation $R$ such that $\mu \, R \, \nu$. \end{definition} \jena{Here, we were asked to formulate, whether it coincides with the standard non-deterministic bisimulation over the continuous transition system over probability distributions. This is hard to express because it is not clear how to formalize the non-deterministic system over distributions. Does it correspond to your co-algebra? Then yes.} \subsection{Continuous systems} Let us now restate the definitions in the continuous setting. By saying that a set $X$ is a \emph{measurable space}, we assume that it is equipped with a $\sigma$-algebra denoted by $\Sigma(X)$.\footnote{We assume that the set of probability measures $\mathcal{D}}%{\mathcal{M}(X)$ over a measurable space $X$ is equipped with the Giry $\sigma$-algebra~\cite{giry1982categorical} $\sigmafield(\measures(\states))$.} The following definition is similar to the treatment of~\cite{phd-cont}. \begin{definition} A \emph{non-deterministic labelled Markov process} (NLMP) is a tuple $\mathbf{P} = (\mathit{S}, \mathit{L}, \{\tau_a \mid a\in\mathit{L}\})$ where $\mathit{S}$ is a measurable space of \emph{states}, $\mathit{L}$ is a measurable space of \emph{labels}, and $\tau_a : \mathit{S} \to \sigmafield(\measures(\states))$ assigns to each state $s$ a measurable set of probability measures $\tau_a(s)$ available in $s$ under $a$.% \footnote{We further require that for each $s \in \mathit{S}$ we have $\{(a,\mu) | \mu \in \tau_a(s)\} \in \sigmafield(\labels) \otimes \sigmafield(\measures(\states))$ and for each $A \in \sigmafield(\labels)$ and $Y \in \sigmafield(\measures(\states))$ we have $\{s\in\mathit{S} \mid \exists a \in A. \tau_a(s) \cap Y \neq \emptyset \}\in \sigmafield(\states)$.} \end{definition} The semantics is the same as before: when in a state $s$ a label $a$ is read, the system non-deterministically chooses a measure $\mu$ such that $s \patransa{a} \mu$, i.e. $\mu$ is in the set of convex combinations\footnote{The set of convex combinations is lifted to a measurable set $Z$ of measures over $\mathit{S}$ as the set $\{X \mapsto \int_{\mu\in Z} \mu(X) \nu(d\,\mu) \mid \text{$\nu$ is a measure over $Z$}\}$.} over $\tau_a(s)$, or halts if there is no such measure. \begin{example}\label{ex:cont} Consider a random number generator that also remembers its last number. We set $\mathit{L}=[0,1]$, $\mathit{S} = [0,1]\times[0,1]$ and $\tau_{x}(\langle \textit{new},\textit{last}\rangle) = \{\mu_{x}\}$ for $x=\mathit{new}$ and $\emptyset$ otherwise, where $\mu_{x}$ is the uniform distribution on $[0,1]\times \{x\}$. If we start with a uniform distribution over $S$ the measure of successors under any $x\in\mathit{L}$ is $0$. Thus in order to get any information of the system we have to consider successors under sets of labels, e.g.\ intervals. \jena{This example motivates for something I tried to sneak in in the discrete world already. Needs to be changed.} \end{example} The distribution semantics is similar to the discrete setting. Reusing the notation, we set $\mu \patransa{A} \nu$ if there is a measurable function $\rho$ that to each state $s\in\mathit{S}_A$ assigns a measure $\nu_s$ such that $s \patransa{A} \nu_s$. \begin{definition}\label{def:infinite-bisim} Let $(\mathit{S}, \mathit{L}, \{\tau_a \mid a \in \mathit{L}\})$ be a NLMP and $R \subseteq \mathcal{D}}%{\mathcal{M}(\mathit{S}) \times \mathcal{D}}%{\mathcal{M}(\mathit{S})$ be a symmetric relation. We say that $R$ is a (strong) \emph{probabilistic bisimulation} if for each $\mu \, R \, \nu$ and measurable $A\subseteq\mathit{L}$ \begin{enumerate} \item $\mu(\mathit{S}_A) = \nu(\mathit{S}_A)$, and \item for each $\mu \patransa{A} \mu'$ there is a $\nu \patransa{A} \nu'$ such that $\mu' \, R \, \nu'$. \end{enumerate} We set $\mu \sim \nu$ if there is a probabilistic bisimulation $R$ such that $\mu \, R \, \nu$. \end{definition} \begin{example}\label{ex:cont2} Considering Example~\ref{ex:cont}, states $\{x\}\times[0,1]$ form a class of $\sim$ for each $x\in[0,1]$ as the old value does not affect the behaviour. More precisely, $\mu\sim\nu$ iff marginals of their first component are the same. \jena{this needs to get updated} \end{example} \subsection*{Coalgebraic definitions} Our definition of bisimulation is not created ad-hoc, but is actually an instantiation of the standard bisimulation for a particular \emph{coalgebra}. Although this aspect is not necessary for understanding the paper, it is another argument for naturalness of our definition. For reader's convenience, we present a short introduction to coalgebras and the formal definitions in Appendix. Here we only provide an intuitive explanation by example. Usual non-deterministic labelled transition systems are essentially given by the transition function $\mathit{S}\to \mathcal P(\mathit{S})^L}%{\mathbb{A}\mathrm{ct}^\tau$. Indeed, given a state $s\in \mathit{S}$ and a label $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$, we can obtain the set of the successors $\{s'\in\mathit{S}\mid s\tran{a}s'\}$. This coalgebra induces a bisimulation which coincides with the classical one of Park and Milner \cite{DBLP:books/daglib/0067019}. Similarly, probabilistic automata are given by the transition function $\mathit{S}\to \mathcal P(\mathcal{D}(\mathit{S}))^L}%{\mathbb{A}\mathrm{ct}^\tau$ as instead of successors there are distributions over successors. And again the corresponding coalgebraic bisimulation can be shown to coincide with the classical ones of Larsen and Skou~\cite{DBLP:conf/popl/LarsenS89} and Segala \cite{}. In contrast, our definition can be obtained by considering states $\mathit{S}'$ to be distributions $\mathcal{D}(S)$ over the original state space and defining the transition function to be $\mathit{S}'\to ([0,1]\times\mathcal P(\mathit{S}'))^L}%{\mathbb{A}\mathrm{ct}^\tau$. The difference to the standard non-probabilistic case is that for each label we also know the mass of the current state distribution that can perform this label. \todo{Jan: comment on the standard non-deterministic bisimulation over the continuous transition system over probability distributions?} In the continuous case, the trasnition function is $\mathit{S}'\to ([0,1]\times\mathcal P(\mathit{S}'))^{\Sigma(L}%{\mathbb{A}\mathrm{ct}^\tau)}$ as we need to consider all measurable sets of labels, i.e.\ all elements of $\Sigma(L}%{\mathbb{A}\mathrm{ct}^\tau)$. \section{Discussion} \label{sec:discuss} \subsection{Bisimulation coalgebraically} Definitions of bisimulations can be given in terms of relations and we will do so, too. However, for two reasons we will also give a coalgebraic definition that induces our relational definition. Firstly, due to the general framework our definition will cover a spectrum of bisimulations depending on the interpretation of the coalgebra and is applicable to more complex systems, automatically yielding the bisimulation definitions. Secondly, any ad-hoc features of a simple coalgebraic definition are more visible and can be clearly identified, whereas it is difficult to distinguish which of two similar relational definitions is more natural. As we assume no previous knowledge of categorical notions we give a brief introduction to coalgebras in the spirit of \cite{Sangiorgi:2011:ATB:2103601}. A \emph{functor} $F$ (on sets) assigns to each set $X$ a set $F(X)$, and to each set function $f:X\to Y$ a set function $F(f):F(X)\to F(Y)$ such that two natural conditions are satisfied: (i) the identity function on $X$ is mapped to the identity function on $F(X)$ and (ii) a composition $f\circ g$ is mapped to a composition $F(f)\circ F(g)$. \begin{example} The powerset functor $\mathcal P(-)$ maps a set $X$ to the set $\mathcal P(X)$ of its subsets and a function $f:X\to Y$ to $\mathcal P(f):\mathcal P(X)\to\mathcal P(Y)$ by $U\mapsto \{f(x)\mid x\in U\}$. Similarly, for a fixed set $L}%{\mathbb{A}\mathrm{ct}^\tau$, the operator $(-)^L}%{\mathbb{A}\mathrm{ct}^\tau$ mapping $X$ to the set $X^L}%{\mathbb{A}\mathrm{ct}^\tau$ of functions $L}%{\mathbb{A}\mathrm{ct}^\tau\to X$ is a functor, where the image of $f:X\to Y$ is $F(f):X^L}%{\mathbb{A}\mathrm{ct}^\tau\to Y^L}%{\mathbb{A}\mathrm{ct}^\tau$ given by mapping $u:L}%{\mathbb{A}\mathrm{ct}^\tau\to X$ to $f\circ u:L}%{\mathbb{A}\mathrm{ct}^\tau\to Y$. \end{example} For a functor $F$, an \emph{$F$-coalgebra} is a pair of the carrier set (or state space) $S$ and the operation function $\mathrm{next}:S\to F(S)$. Intuitively, the function $\mathrm{next}$ describes the transition to the next step. \begin{example} A transition system $(S,\rightarrow)$ with $\mathord{\rightarrow}\subseteq S\times S$ can be understood as a $\mathcal P(-)$-coalgebra by setting $\mathrm{next}(s)=\{s'\mid s\tran{} s'\}$. And vice versa, every $\mathcal P$-coalgebra gives rise to a transition system. A labelled transition system $(S,L}%{\mathbb{A}\mathrm{ct}^\tau,\rightarrow)$ with the set of labels $L}%{\mathbb{A}\mathrm{ct}^\tau$ and $\mathord{\rightarrow}\subseteq S\times L}%{\mathbb{A}\mathrm{ct}^\tau\times S$ can be seen as a $(\mathcal P(-))^L}%{\mathbb{A}\mathrm{ct}^\tau$-coalgebra with $\mathrm{next}:S\to (\mathcal P(S))^L}%{\mathbb{A}\mathrm{ct}^\tau$ given by $\mathrm{next}(s)(a)\to \{s'\mid s\tran{a}s'\}$. \end{example} A \emph{bisimulation} on an $F$-coalgebra $(S,\mathrm{next})$ is a an $F$-coalgebra $(R,\overline\mathrm{next})$ with $R\subseteq S\times S$ such that the two projections $\pi_1:R\to S$ and $\pi_2:R\to S$ make the following diagram commute:\footnote{I.e.\ $\mathrm{next}\circ\pi_1=F(\pi_1)\circ\overline\mathrm{next}$ and $\mathrm{next}\circ\pi_2=F(\pi_2)\circ\overline\mathrm{next}$.} \begin{tikzpicture}[x=2.5cm,y=1.2cm] \node (S1) at (-1,0) {$S$}; \node (FS1) at (-1,-1) {$F(S)$}; \node (S2) at (1,0) {$S$}; \node (FS2) at (1,-1) {$F(S)$}; \node (R) at (0,0) {$R$}; \node (FR) at (0,-1) {$F(R)$}; \path[->] (S1) edge node[left] {$\mathrm{next}$} (FS1) (S2) edge node[right] {$\mathrm{next}$} (FS2) (R) edge node[left] {${\overline\mathrm{next}}$} (FR); \path[->] (R) edge node[above] {$\pi_1$} (S1) (R) edge node[above] {$\pi_2$} (S2) (FR) edge node[below] {$F(\pi_1)$} (FS1) (FR) edge node[below] {$F(\pi_2)$} (FS2); \end{tikzpicture} \begin{example} For LTS, the coalgebraic bisimulation coincides with the classical one of Park and Milner \cite{DBLP:books/daglib/0067019}, where a symmetric $R$ is a bisimulation if for every $sRt$ and $s\tran{a}s'$ there is $t\tran{a}t'_{s,a,s',t}$ with $s'Rt'_{s,a,s',t'}$. Indeed, given a classical bisimulation $R$, one can define $\mathrm{next}(\langle s, t\rangle)$ for $a\in L}%{\mathbb{A}\mathrm{ct}^\tau$ to contain for every $s\tran{a}s'$ the matching pair $\langle s',t'_{s,a,s',t}\rangle$ and symmetrically for $t$. Since all these pairs are from $R$, $(R,\overline\mathrm{next})$ is indeed a coalgebra. Further, the projection $F(\pi_1)$ of $\mathrm{next}(\langle s, t\rangle)$ yields all and nothing but the successors of $s$, symmetrically for $t$, hence the commuting. Conversely, given a coalgebraic bisimulation $(R,\overline\mathrm{next})$, the commuting of $\pi_1$ guarantees that $\mathrm{next}(\langle s, t\rangle)$ yields all and nothing but the successors of $s$. Hence, for each $s\tran{a}s'$ there must be $\langle s',t'\rangle\in\mathrm{next}(\langle s, t\rangle)\subseteq R$, moreover, with $t\tran{a}t'$ due to $\pi_2$ commuting. \end{example} \medskip As we have seen, the coalgebraic definition coincides with the relational one for non-probabilistic systems. One can use the same theory for finite probabilistic systems, too. Let $\mathcal{D}(X)$ denote the set of simple distributions, i.e.\ functions $f:X\to[0,1]$ such that $f$ is non-zero for only finitely many elements $x_1,\ldots,x_n$ and $\sum_{i=1}^n f(x_i)=1$. Note that $\mathcal{D}(-)$ can be understood as a functor. \begin{example} We can interpret $(\mathcal{D}(-)\cup\{\bullet\})^L}%{\mathbb{A}\mathrm{ct}^\tau$-coalgebras as finite Markov decision processes $(S,L}%{\mathbb{A}\mathrm{ct}^\tau,Pr)$ with $Pr:S\times L}%{\mathbb{A}\mathrm{ct}^\tau\to\mathcal{D}(S)\cup\{\bullet\}$ that under each action either proceed to a distribution on successors (as opposed to a non-deterministic choice in LTS) or not have the action available (the special element $\bullet$). The corresponding coalgebraic bisimulation can be shown to coincide with the classical one of Larsen and Skou~\cite{DBLP:conf/popl/LarsenS89}, where an equivalence relation $R$ is a bisimulation if $\sum_{u\in U}Pr(t,a)(u)=\sum_{u\in U}Pr(t',a)(u)$ for every $a\in L}%{\mathbb{A}\mathrm{ct}^\tau$, classes $T,U$ of $R$ and $t,t'\in T$. \end{example} In contrast, uncountable probabilistic systems are more intricate. The set of \emph{probability measures} over $X$ (equipped with a $\sigma$-algebra clear from context) is denoted by $\mathcal{D}}%{\mathcal{M}(X)$. Again, defining $\mathcal{D}}%{\mathcal{M}(f)(\mu)=\mu\circ f^{-1}$ makes $\mathcal{D}}%{\mathcal{M}(-)$ into a functor. \begin{example} We can interpret $\mathcal{D}}%{\mathcal{M}(-)$-coalgebras as Markov chains with general (possibly uncountable) state space. However, it is intricate to prove that the corresponding bisimulation is defined so that it coincides with the relational definition as already mentioned in Section~\ref{sec:intro}. \end{example} In the following section, the proposed bisimulation is induced by a different view on the probabilistic systems. Namely, we consider distributions $\mathcal{D}(S)$ or measures $\mathcal{D}}%{\mathcal{M}(S)$ over its state space $\mathit{S}$ to form the carrier of the coalgebra. A transition then changes this distribution. For instance, a Markov chain can be seen this way as a coalgebra of the identity functor. \subsection{Bisimulation on distributions coalgebraically} Observe that from the state-based point of view PA correspond to $(\mathcal P(\mathcal{D}(-)))^\mathit{L}$-coalgebras --- In order to capture the distributional semantics of PA and other systems, we define a functor for finite probabilistic systems with non-determinism by\footnote{On function, we define the functor by $\heartsuit(f)(n)(a)=(id\times\mathcal P(f))(n(a))$.} \begin{align*} \boxed{ ([0,1] \times \mathcal P(-))^\mathit{L} \tag{$\heartsuit$}} \end{align*} Now a PA $(\mathit{S}, \mathit{L}, \longrightarrow)$ is a $\heartsuit$-coalgebra with the carrier set $\mathcal{D}(\mathit{S})$. Indeed, the coalgebra assigns to a distribution $\mu$ and a label $a$ the pair $(p,M)$ where \begin{itemize} \item $p= \mu(\mathit{S}_a)$ is the probability of states that can read $a$; \item $M = \emptyset$ if $\mu(\mathit{S}_a) = 0$, and $M$ is the set of convex combinations over $\{\frac{1}{\mu(\mathit{S}_a)}\sum_{s \in\mathit{S}_a} \nu_s \cdot \mu(s) \mid \forall s\in\mathit{S}_a.s \patransa{a} \nu_s\}$, otherwise. We write $\mu \patransa{a} \mu'$ for every $\mu'\in M$. In other words, $M$ is obtained by restricting $\mu$ to the states that can read $a$ and weighting all possible combinations of their transitions. \end{itemize} Although we can use $\heartsuit$ to capture the distribution semantics of PA as above, we could as well use it differently: if we defined that a label that cannot be read in the current state is \emph{ignored} instead of halting, the successor distribution would be defined by making a step from states that can read the label and staying elsewhere. (This approach is discussed in the next section.) Moreover, we could easily extend the functor to systems with real rewards (as in~\cite{DBLP:conf/ijcai/CastroPP09}) simply by adding $\mathbb{R}$ to get $\mathbb{R}\times([0,1] \times \mathcal P(-))^\mathit{L} $ for rewards on states or $([0,1] \times \mathcal P(\mathbb{R}\times -))^\mathit{L} $ on transitions etc. Similarly, for systems without the inner non-determinism like Rabin automata, we could simplify the functor to $([0,1] \times -)^\mathit{L}$. The only important and novel part of the functor is $[0,1]$ stating the overall probability mass that performs the step. (This is also the only difference to non-probabilistic coalgebraic functors.) In all the cases, the generic $\heartsuit$-bisimulation keeps the same shape. What changes is the induced relational bisimulation. ---- \begin{lemma}\label{lem:fin-coin} The union of $\heartsuit$-bisimulations and $\sim$ coincide. \end{lemma} ---- Therefore, in order to capture the distributional semantics of NLMP and other continuous systems, we define a functor \begin{align*} \boxed{([0,1] \times \mathcal P(-))^{\mathcal P(\mathit{L})} } \tag{$\spadesuit$} \end{align*} And again we could vary the functor and adapt it to our particular needs. However, the vital part is now not only $[0,1]$, but also the use of measurable sets of labels instead of individual labels. Analogously to the discrete case, we can view NLMP as $\spadesuit$-coalgebras with a carrier set $\mathcal{D}}%{\mathcal{M}(\mathit{S})$. The coalgebra assigns to $\mu \in \mathcal{D}}%{\mathcal{M}(\mathit{S})$ and to a set of labels $A \in \sigmafield(\labels)$ the pair $(p,M)$ such that \begin{itemize} \item $p= \mu(\mathit{S}_A)$ is the measure of states that can read some $a\inA$ where $\mathit{S}_A = \{ s\in\mathit{S} \mid \exists a \in A. \tau_a(s) \neq \emptyset \}$; \item $M = \emptyset$ if $\mu(\mathit{S}_A) = 0$, and $M$ is the set of convex combinations\footnote{The set of convex combinations is lifted to a measurable set $Z$ of measures over $\mathit{S}$ as the set $\{X \mapsto \int_{\mu\in Z} \mu(X) \nu(d\,\mu) \mid \text{$\nu$ is a measure over $Z$}\}$.} over $\{\mu_\rho \mid \text{measurable }\rho:\mathit{S}_A\to\bigcup_{a\inA}\tau_a \}$, otherwise, where $$ \mu_\rho(X) = \frac{1}{\mu(\mathit{S}_A)} \cdot \int_{s\in\mathit{S}} \rho(s)(X)\ \mu(ds) \quad \forall X\in\sigmafield(\states).$$ \end{itemize} ---- \begin{lemma}\label{lem:inf-coin} The union of $\spadesuit$-bisimulations and $\sim$ coincide. \end{lemma} \section{Introduction} \label{sec:intro} Continuous time concurrency phenomena can be addressed in two principal manners: On the one hand, \emph{timed automata} (TA) extend interleaving concurrency with real-valued clocks~\cite{alur-dill}. On the other hand, time can be represented by memoryless stochastic time, as in \emph{continuous time Markov chains} (CTMC) and extensions, where time is represented in the form of exponentially distributed random delays~\cite{pepa,HHM,ymca,DBLP:conf/lics/EisentrautHZ10}. TA and CTMC variations have both been applied to very many intriguing cases, and are supported by powerful real-time, respectively stochastic time model checkers~\cite{uppaal,prism} with growing user bases. The models are incomparable in expressiveness, but if one extends timed automata with the possibility to sample from exponential distributions~\cite{tutti,DBLP:journals/iandc/DArgenioK05,strulo}, there appears to be a natural bridge from CTMC to TA. This kind of stochastic semantics of timed automata has recently gained considerable popularity by the statistical model checking approach to TA analysis~\cite{statistical1,statistical2}. Still there is a disturbing difference, and this difference is the original motivation~\cite{pedro-christel} of the work presented in this paper. The obvious translation of an exponentially distributed delay into a clock expiration sampled from the very same exponential probability distribution fails in the presence of concurrency. This is because the translation is not fully compatible with the natural interleaving concurrency semantics for TA respectively CTMC. This is illustrated by the following example, which in the middle displays two small CTMC, which are supposed to run independently and concurrently.\jena{mention that 1 and 2 are rates?}\holger{Lengthy to explain it here to someone who does not know it already, and those who know do not need the explanation.} \begin{center} \vspace*{-1em} \begin{tikzpicture}[outer sep=0.1em,->, state/.style={draw,circle,minimum size=1.7em, inner sep=0.1em}, sstate/.style={draw,circle,minimum size=0.7em, inner sep=0.1em}, label/.style={font=\small}] \begin{scope}[xshift=-2.6cm] \node (s) at (0,2) [state] {$q$}; \node (t) at (-1,1) [state] {$u$}; \node (u) at (1,1) [state] {$v$}; \node (v) at (0,0) [state] {$r$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \begin{scope}[xshift=4.7cm] \node (h) at (-4,1.5) [sstate] {$$}; \node (i) at (-4,0.5) [sstate] {$$}; \node (j) at (-3,1.5) [sstate] {$$}; \node (k) at (-3,0.5) [sstate] {$$}; \path[->] (h) edge node[label,right]{1} (i) (j) edge node[label,right]{2} (k) ; \end{scope} \begin{scope}[xshift=5.6cm] \node (s) at (0,2) [state,inner sep=0em] {$q'$}; \node (t) at (-1,1) [state,inner sep=0em] {$u'$}; \node (u) at (1,1) [state,inner sep=0em] {$v'$}; \node (v) at (0,0) [state,inner sep=0em] {$r'$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \node (tt) at (-2,1) {$\substack{\\y:=\mathrm{Exp}(2)}$}; \node (uu) at (2,1) {$\substack{\\x:=\mathrm{Exp}(1)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \end{tikzpicture} \vspace*{-1em} \end{center} On the left and right we see two stochastic automata (a variation of timed automata formally defined in Section~\ref{sec:applications!}). They have clocks $x$ and $y$ which are initialized by sampling from exponential distributions, and then each run down to $0$. The first one reaching $0$ triggers a transition and the other clock keeps on running unless resampled, which happens on the right, but not on the left. The left model is obtained by first translating the respective CTMC, and then applying the natural TA interleaving semantics, while the right model is obtained by first applying the equally natural CTMC interleaving semantics prior to translation. The two models have subtly different semantics in terms of their underlying dense probabilistic timed transition systems. This can superficially be linked to the memoryless property of exponential distributions, yet there is no formal basis for proving equivalence. This paper closes this gap, which has been open for at least 15 years, by introducing a natural \emph{continuous-space distribution-based} bisimulation. This result is embedded in several further intriguing application contexts and algorithmic achievements for this novel bisimulation\holger{rephrased}. The theory of bisimulations is a well-established and elegant framework to describe equivalence between processes based on their behaviour. In the standard semantics of probabilistic systems~\cite{DBLP:conf/popl/LarsenS89,SegalaL94}, when a probabilistic step from a state to a distribution is taken, the random choice is resolved and we instead continue from one of the successor states. Recently, there has been considerable interest in instead regarding probabilistic systems as deterministic transformers of probability \emph{distributions}~\cite{DBLP:conf/qest/KorthikantiVAK10,DBLP:conf/lics/AgrawalAGT12,DBLP:journals/corr/0001MS13a}, where the choice is not resolved and we continue from the distribution over successors. Thus, instead of the current state the transition changes the current distribution over the states. Although the distribution semantics is very natural in many contexts \cite{DBLP:dblp_journals/fac/Hennessy12}, it has been only partially reflected in the study of bisimulations \cite{DBLP:dblp_journals/fac/Hennessy12,DBLP:journals/ijfcs/DoyenHR08,DBLP:journals/corr/FengZ13,DBLP:conf/lics/EisentrautHZ10}. Our definition arises as an unusual, but very simple instantiation of the standard coalgebraic framework for bisimulations \cite{Sangiorgi:2011:ATB:2103601}. (No knowledge of coalgebra is required from the reader though.) Despite its simplicity, the resulting notion is surprisingly fruitful, not only because it indeed solves the longstanding correspondence problem between CTMC and TA with stochastic semantics. Firstly, it is more adequate than other equivalences when applied to systems with distribution semantics, including large-population models where different parts of the population act differently \cite{may1974biological}. Indeed, as argued in \cite{DBLP:journals/fac/GeorgievskaA12}, some equivalent states are not identified in the standard probabilistic bisimulations and too many are identified in the recent distribution based bisimulations \cite{DBLP:journals/ijfcs/DoyenHR08,DBLP:journals/corr/FengZ13}. Our approach allows for a bisimulation identifying precisely the desired states~\cite{DBLP:journals/fac/GeorgievskaA12}. Secondly, our bisimulation over distributions induces an equivalence \emph{on states}, and this relation equates behaviourally indistinguishable states which in many settings are unnecessarily distinguished by standard bisimulations. We shall discuss this phenomenon in the context of several applications. \holger{What we are saying here is that the new relation is coarser, and that this is good, right?} \holger{And I do not get the difference between 1 and 2.} \iffalse \begin{example}\label{ex:intro} Consider the following two stochastic automata (formally defined in Section \ref{sec:bisim}). They have ``kitchen timers'' $x$ and $y$, which get a random exponentially distributed value when a state with a corresponding assignment is entered. Then the time elapses and the values of the timers decrease until the first one reaches $0$. Then a transition to the next state is taken and the other timers keep on running. \begin{center} \vspace*{-1em} \begin{tikzpicture}[outer sep=0.1em,->, state/.style={draw,circle,minimum size=1.7em, inner sep=0.1em}, label/.style={font=\small}] \begin{scope} \node (s) at (0,2) [state] {$q$}; \node (t) at (-1,1) [state] {$u$}; \node (u) at (1,1) [state] {$v$}; \node (v) at (0,0) [state] {$r$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \begin{scope}[xshift=4.6cm] \node (s) at (0,2) [state,inner sep=0em] {$q'$}; \node (t) at (-1,1) [state,inner sep=0em] {$u'$}; \node (u) at (1,1) [state,inner sep=0em] {$v'$}; \node (v) at (0,0) [state,inner sep=0em] {$r'$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \node (tt) at (-2,1) {$\substack{\\y:=\mathrm{Exp}(2)}$}; \node (uu) at (2,1) {$\substack{\\x:=\mathrm{Exp}(1)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \end{tikzpicture} \vspace*{-1em} \end{center} \noindent If we observe the times when transitions occur (a timer rings), the two systems behave the same. Indeed, due to the memoryless property of the exponential distributions, there is no way to distinguish them. Yet existing notions of bisimulations and behavioural equivalences in the literature fail to capture this. The distributional view, which we take, is necessary here. Indeed, as $u'$ cannot be matched by any $u$ with a particular remaining time on the timer, but can be matched by an appropriate distribution over $u$'s with all possible remaining times. See Section \ref{sec:applications!} for details. \end{example} \fi Nevertheless, the key idea to work with distributions instead of single states also bears disadvantages. The main difficulty is that even for finite systems the space of distributions is uncountable, thus bisimulation is difficult to compute. However, we show that it admits a concise representation using methods of linear algebra and we provide an algorithm for computing it. Further, in order to cover e.g.\ continuous-time systems, we need to handle both uncountably many states (that store the sampled time) and labels (real time durations). Fortunately, there is an elegant way to do so using the standard coalgebra framework\holger{This is pretty much hidden now. Deephasize}. Moreover, it can easily be further generalized, e.g.\ adding rewards to the generic definition is a trivial task. \noindent \textbf{Our contribution} is the following: \begin{itemize} \item We give a natural definition of bisimulation from the distribution perspective for systems with generally uncountable spaces of states and labels. \item We argue by means of several applications that the definition can be considered more useful than the classical notions of probabilistic bisimulation. \item We provide an algorithm to compute this distributional bisimulation on finite non-deterministic probabilistic systems, and present a decision algorithm for uncountable continuous-time systems induced by the stochastic automata mentioned above. \end{itemize} \iffull Full proofs can be found in the appendix. \else A full version of this paper is available~the appendix. \fi \section{Introduction} \label{sec:intro} The theory of bisimulations is a well-established and elegant framework to describe equivalence between processes based on their behaviour. The original definition was given for non-deterministic processes \cite{DBLP:conf/tcs/Park81,DBLP:books/daglib/0067019} and was further extended to finite probabilistic systems in \cite{DBLP:conf/popl/LarsenS89}. Since then many variants of bisimulations have been proposed and investigated, in particular an extension to non-deterministic probabilistic systems \cite{DBLP:conf/concur/SegalaL94}. Recently, there has been considerable interest in regarding probabilistic systems as deterministic transformers of probability \emph{distributions} rather than individual stochastic processes \cite{DBLP:conf/qest/KorthikantiVAK10,DBLP:conf/lics/AgrawalAGT12,DBLP:journals/corr/0001MS13a}. In the standard semantics, of probabilistic systems, when a probabilistic step from a state to a distribution is taken, the random choice is resolved and we continue from one of the successor states. In contrast, under the \emph{distribution semantics} the choice is not resolved and we continue from the distribution over successors. Thus, instead of the current state the transition changes the current distribution over the states. Although the distribution semantics is very natural in many contexts \cite{DBLP:dblp_journals/fac/Hennessy12}, it has been only partially reflected in the study of bisimulations \cite{DBLP:dblp_journals/fac/Hennessy12,DBLP:journals/ijfcs/DoyenHR08,DBLP:journals/corr/FengZ13}. In this paper, we give a \emph{natural} definition of such bisimulations. It arises as an unusual, but very simple instantiation of the standard coalgebraic framework for bisimulations \cite{Sangiorgi:2011:ATB:2103601}. (No knowledge of coalgebra is required from the reader though.) Despite its simplicity, the resulting notion is surprisingly fruitful for two reasons. Firstly, it is more precise than other equivalences when applied to systems with the distribution semantics such as large-population models where different parts of the population act differently \cite{may1974biological}. Indeed, as argued in \cite{DBLP:journals/fac/GeorgievskaA12}, some equivalent states are not identified in the standard probabilistic bisimulations and too many are identified in the recent distribution based bisimulations \cite{DBLP:journals/ijfcs/DoyenHR08,DBLP:journals/corr/FengZ13}. Our approach allows for a bisimulation identifying precisely the desired states~\cite{DBLP:journals/fac/GeorgievskaA12}. Secondly, our bisimulation over distributions induces a novel behavioural equivalence on states. This equivalence is useful, for it identifies states that are behaviourally indistinguishable in many settings, but were unnecessarily distinguished by standard bisimulations. We document this in several applications: ranging from partially observed systems control \cite{DBLP:journals/aamas/ShaniPK13} where the controller only has a probabilistic belief where the system is, over distributed scheduler synthesis \cite{DBLP:conf/concur/GiroDF09}, to continuous-time systems where random waiting times are sampled, but are not observed before they elapse \cite{DBLP:journals/iandc/DArgenioK05}. We illustrate this in the following intriguing example from the continuous-time area due to \cite{holger-christel}. \begin{example}\label{ex:intro} Consider the following two stochastic automata (formally defined in Section \ref{sec:bisim}). They have ``kitchen timers'' $x$ and $y$, which get a random exponentially distributed value when a state with a corresponding assignment is entered. Then the time elapses and the values of the timers decrease until the first one reaches $0$. Then a transition to the next state is taken and the other timers keep on running. \begin{center} \vspace*{-1em} \begin{tikzpicture}[outer sep=0.1em,->, state/.style={draw,circle,minimum size=1.7em, inner sep=0.1em}, label/.style={font=\small}] \begin{scope} \node (s) at (0,2) [state] {$q$}; \node (t) at (-1,1) [state] {$u$}; \node (u) at (1,1) [state] {$v$}; \node (v) at (0,0) [state] {$r$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \begin{scope}[xshift=4.6cm] \node (s) at (0,2) [state,inner sep=0em] {$q'$}; \node (t) at (-1,1) [state,inner sep=0em] {$u'$}; \node (u) at (1,1) [state,inner sep=0em] {$v'$}; \node (v) at (0,0) [state,inner sep=0em] {$r'$}; \node (ss) at (1,2.1) {$\substack{x:=\mathrm{Exp}(1),\\y:=\mathrm{Exp}(2)}$}; \node (tt) at (-2,1) {$\substack{\\y:=\mathrm{Exp}(2)}$}; \node (uu) at (2,1) {$\substack{\\x:=\mathrm{Exp}(1)}$}; \path[->] (s) edge node[label,right]{$a$} node[label,left]{$x=0$} (t) (s) edge node[label,left]{$b$} node[label,right]{$y=0$} (u) (t) edge node[label,right]{$b$} node[label,left]{$y=0$} (v) (u) edge node[label,left]{$a$} node[label,right]{$x=0$} (v) ; \end{scope} \end{tikzpicture} \vspace*{-1em} \end{center} \noindent If we observe the times when transitions occur (a timer rings), the two systems behave the same. Indeed, due to the memoryless property of the exponential distributions, there is no way to distinguish them. Yet existing notions of bisimulations and behavioural equivalences in the literature fail to capture this. The distributional view, which we take, is necessary here. Indeed, as $u'$ cannot be matched by any $u$ with a particular remaining time on the timer, but can be matched by an appropriate distribution over $u$'s with all possible remaining times. See Section \ref{sec:applications} for details. \end{example} Nevertheless, the key idea to work with distributions instead of single states also bears disadvantages. The main difficulty is that even for finite systems the space of distributions is uncountable, thus bisimulation is difficult to compute. However, we show that it admits a concise representation using methods of linear algebra and we give algorithms to compute it. Further, in order to cover e.g.\ continuous-time systems, we need to handle both uncountably many states (that store the sampled time) and labels (real time durations). Fortunately, there is an elegant way to do so using the standard coalgebra framework. Moreover, it can easily be further generalized, e.g.\ adding rewards to the generic definition is a trivial task. \noindent \textbf{Our contribution} is the following: \begin{itemize} \item We give a natural definition of bisimulation from the distributional perspective for systems with generally uncountable spaces of states and labels. \item We argue on several applications that the definition is more useful in many settings than the classical notions of probabilistic bisimulation. \item We give the first algorithm to compute distributional bisimulation on finite non-deterministic probabilistic systems and, further, an algorithm deciding bisimilarity on several classes of uncountable (continuous-time) systems. \end{itemize} \section{Conclusion} We have introduced a general and natural notion of a distribution-based probabilistic bisimulation, shown its applications in different settings and given algorithms to compute it for finite and some classes of infinite systems. As to future work, the precise complexity of the finite case is certainly of interest. Further, the tableaux decision method opens the arena for investigating wider classes of continuous-time systems where the new bisimulation is decidable. \bibliographystyle{myabbrv} \section{Proofs from Section~\ref{sec:bisim}} Recall $\heartsuit(f)(n)(a)=(id\times\mathcal P(f))(n(a))$. \begin{reflemma}{lem:fin-coin} The union of $\heartsuit$-bisimulations and $\sim$ coincide. \end{reflemma} \begin{proof} First, we prove that whenever there is $\heartsuit$-bisimulation $(R,\overline\mathrm{next})$ with $(\mu,\nu)\in R$ then $\mu\sim\nu$ by proving that $R\cup R^{-1}$ is a bisimulation relation. Let $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$ and $\mu R \nu$ or $\nu R \mu$, wlog the former (the latter follows symmetrically). \begin{enumerate} \item The first condition of the relational bisimulation follows by \begin{align*} \mu(S_a)&=\pi_1(\mathrm{next}(\mu)(a))\\ &=\pi_1(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(a))\\ &=\pi_1(\heartsuit\pi_1\circ\overline\mathrm{next}\langle\mu,\nu\rangle(a))\\ &=\pi_1((id\times\mathcal P\pi_1)(\overline\mathrm{next}\langle\mu,\nu\rangle(a)))\\ &=id(\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle(a)))\\ &=\pi_1((id\times\mathcal P\pi_2)(\overline\mathrm{next}\langle\mu,\nu\rangle(a)))\\ &=\pi_1(\heartsuit\pi_2\circ\overline\mathrm{next}\langle\mu,\nu\rangle(a))\\ &=\pi_1(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle(a))\\ &=\pi_1(\mathrm{next}(\nu)(a))\\ &=\nu(S_a) \end{align*} \item For the second condition of the relational bisimulation, let $\mu\tran{a}\mu'$. Since \begin{align*} \mu'&\in\pi_2(\mathrm{next}(\mu))(a)\\ &=\pi_2(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(a))\\ &=\pi_2(\heartsuit\pi_1\circ\overline\mathrm{next}\langle\mu,\nu\rangle(a))\\ &=\pi_2((id\times\mathcal P\pi_1)\Big(\overline\mathrm{next}\langle\mu,\nu\rangle(a)\Big))\\ &=\mathcal P\pi_1(\pi_2\Big(\overline\mathrm{next}(\langle\mu,\nu\rangle)(a)\Big)) \end{align*} there is $\nu'$ with $$\langle\mu',\nu'\rangle\in \pi_2\Big(\overline\mathrm{next}(\langle\mu,\nu\rangle)(a)\Big)$$ Since $R$ is a coalgebra, we have $\langle\mu',\nu'\rangle\in R$, i.e.\ $\mu'R\nu'$. \end{enumerate} \medskip Second, given $R=\mathord{\sim}$, we define $\overline\mathrm{next}$ making it into a coalgebra such that the bisimulation diagram commutes. Let $\mathrm{succ}_a(\mu)=\{\mu'\mid\mu\tran{a}\mu'\}$ denote the set of all $a$-successors of $\mu$. For $\mu R\nu$, we set $$\overline\mathrm{next}(\langle\mu,\nu\rangle)(a)=(\mu(S_a),\{\langle\mu',\nu'\rangle\in R\cap \mathrm{succ}_a(\mu)\times\mathrm{succ}_a(\nu)\})$$ Since we imposed $\langle\mu',\nu'\rangle\in R$, $(R,\overline\mathrm{next})$ is a $\heartsuit$-coalgebra. Further, we prove the bisimulation diagram commutes. Firstly, \begin{align*} \mathrm{next}\circ\pi_1\langle\mu,\nu\rangle&=(\mu(S_a),\mathrm{succ}_a(\mu))\\ \mathrm{next}\circ\pi_2\langle\mu,\nu\rangle&=(\nu(S_a),\mathrm{succ}_a(\nu)) \end{align*} Therefore, $$\pi_1(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle)= \mu(S_a)=\pi_1(\heartsuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(a))$$ and $$\pi_1(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle)=\nu(S_a)=\mu(S_a)= \pi_1(\heartsuit\pi_2(\overline\mathrm{next}\langle\mu,\nu\rangle)(a))$$ since $\mu(S_a)=\nu(S_a)$ due to $\mu\sim\nu$ and the first relational bisimulation condition. Secondly, \begin{align*} \pi_2(\mathrm{next}\circ\pi_1\langle\mu,\nu\rangle(a))&= \mathrm{succ}_a(\mu)\stackrel{(1)}=\pi_2(\heartsuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(a))\\ \pi_2(\mathrm{next}\circ\pi_2\langle\mu,\nu\rangle(a))&=\mathrm{succ}_a(\nu) \stackrel{(2)}=\pi_2(\heartsuit\pi_2(\overline\mathrm{next}\langle\mu,\nu\rangle)(a)) \end{align*} After we show $(1)$ and $(2)$, we know both components of $\heartsuit\pi_1(\overline\mathrm{next}\langle\mu,\nu\rangle)(a)$ are the same as of $\mathrm{next}(\pi_1\langle\mu,\nu\rangle)(a)$, and similarly for $\heartsuit\pi_2$, hence the commuting. As to $(1)$, $\supseteq$ follows directly by $\overline\mathrm{next}$ defined above. For $\subseteq$, for every $\mu'\in\mathrm{succ}_a(\mu)$ there is $\nu'\in\mathrm{succ}_a(\nu)$ with $\mu'R\nu'$ due to the second realtional bisimulation condition. Thus also $\langle\mu',\nu'\rangle\in \heartsuit\pi_1 (\overline\mathrm{next}\langle\mu,\nu\rangle)(a)$. $(2)$ follows from symmetric argument and $R$ being symmetric. \end{proof} \begin{reflemma}{lem:inf-coin} The union of $\spadesuit$-bisimulations and $\sim$ coincide. \end{reflemma} \begin{proof} The proof is similar, with applications to $a\inL}%{\mathbb{A}\mathrm{ct}^\tau$ replaced by applications to $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$. \end{proof} \section{Relational bisimulation} An equivalence relation $\sim \subseteq \mathit{S} \times \mathit{S}$ over states can be lifted to a relation $\mathcal{L}(\sim) \subseteq \mathcal{D}(\mathit{S}) \times \mathcal{D}(\mathit{S})$ over measure by letting $\mu \,\mathcal{L}(\sim)\, \mu'$ if for each set of equivalence classes $X \subseteq \mathit{S} / \sim$ such that $\bigcup X$ is measurable we have $\mu(\bigcup X) = \mu'(\bigcup X)$. \begin{definition}[Larsen\&Skou state-based bisimulation] We say that an equivalence $\sim \subseteq \mathit{S} \times \mathit{S}$ is a (strong) \emph{bisimulation} if for each $s \, \sim \, s'$ and each $a \in \mathit{L}$ we have \begin{enumerate} \item either $\tau_a(s) = \vec{0} = \tau_a(s')$ \item or $\tau_a(s) \, \mathcal{L}(R)\, \tau_a(s')$. \end{enumerate} \end{definition} The largest state bisimulation is denoted by $\sim_{state}$. \begin{definition}[Distribution strong bisimulation] We say that an equivalence $\sim \subseteq \mathcal{D}(\mathit{S}) \times \mathcal{D}(\mathit{S})$ is a \emph{distribution (strong) bisimulation} if for each $\mu \,\sim\, \mu$ and each measurable $X \subseteq \mathit{L}$, \begin{enumerate} \item $\mu(\states_X) = \mu'(\states_X)$, and \item for each measurable $\rho : \states_X \to X$ there is a measurable $\rho': \states_X \to X$ such that $\mu_\rho \, \sim \, \mu'_{\rho'}$ where $$ \mu_\rho(A) := \mu(A \setminus X) + \int_X \tau_{\rho(x)}(A) \cdot \mu(dx) \quad \forall A\in\sigmafield(\states). $$ \end{enumerate} \end{definition} The largest distribution bisimulation is denoted by $\sim_{dist}$. \section{Co-algebraic bisimulation} category $Mes$ \begin{itemize} \item objects: measurable spaces; for object $X$, let $\Sigma(X)$ denote its $\sigma$-algebra and we use $X$ also as the carrier set \item morphisms: measurable maps \end{itemize} functor $\Pi$ \begin{itemize} \item on objects: $\Pi(X)=\{\mu:\Sigma(X)\to[0,1]\mid \mu \text{ is a probability measure on } X\}$ \item on morphisms: $\Pi(f)=\lambda \nu.\nu\circ f^{-1}$ \end{itemize} category $\Pi(Mes)$ is the image of the endofunctor $\Pi$ on $Mes$ Let $A$ be fixed and $\mathcal P(A)$ denote the set of \emph{measurable} subsets of $A$ functor $F$ \begin{itemize} \item on objects: $F(X)=\big([0,1]\times X\big)^{\mathcal P(A)}$ \item on morphisms: $F(f)=\big(id\times f\big)^{\mathcal P(A)}$ \end{itemize} Let $S=(S,f_S)$ and $T=(T,f_T)$ be $F$-coalgebras in category $\Pi(Mes)$. A bisimulation of $S$ and $T$ is an object $R$ of $\Pi(Mes)$ \footnote{we may require the structure of a relation: $R\subseteq X\times Y$, or we can take categorical product in order to obtain the relation} that is an $F$-coalgebra equipped with $F$-coalgebra homomorphisms to $S$ and to $T$. For $f:X\to F(X)$m and $B\in\mathcal P(A)$, let $\langle f^m_B,f^s_B\rangle:X\to [0,1]\times X$ denote the function equal to $f(-)(B)$, i.e. components of $f$. Then the coalgebraci bisimulation definition requires: \begin{itemize} \item $f^m_B(r)=f^m_B(\pi_1(r))$ and $f^m_B(r)=f^m_B(\pi_2(r))$, hence $f^m_B$ must be the same on both components of the bisimulation (if we consider the product, then for $sRt$ we have $f^m_B(s)=f^m_B(t)$) \item $f^s_B(r)=f^s_B(\pi_1(r))$ and $f^s_B(r)=f^s_B(\pi_2(r))$ and of course $f^s_B(r)\in R$, hence all these conditions must again hold for the element of bisimulation that contains the successors (if we consider the product, then for $sRt$ we have $f^s_B(s)\ R\ f^s_B(t)$) \end{itemize} Lemma: equivalent to relational \section{Categorical bisimulation} span of zig-zags because we have pullbacks (?) \section{Algorithms} computing gfp on distributions: if discretized what sort of error occurs? any $\varepsilon$-bisimulation studied in past? computing gfp on states: HH used lifting to distributions, lift to measures for given time distributions ($\{Exp(r)\mid r\in\mathbb{R}\}\cup\cdots$) \section{Bisimulation in compositional modelling} \label{app:compositional} [zatim bordel bez ladu a skladu] For this subsection we fix a set of labels $\mathit{L}$. The key operation for compositional modelling is the \emph{parallel composition} operator. For $D_1 = (\mathit{S}_1, \mathit{L}, \longrightarrow_1)$, $D_2 = (\mathit{S}_2, \mathit{L}, \longrightarrow_2)$, and a subset of their labels $A \subseteq \mathit{L}$, let $D_1 \parallel_A D_2$ denote their (CSP style) parallel composition $(\mathit{S}_1 \times \mathit{S}_2, \mathit{L}, \longrightarrow)$ where $(s_1,s_2) \patransa{a} \mu$ if $\mu = \mu_1 \times \mu_2$ (The product distribution $\mu_1 \times \mu_2$ assigns $\mu_1(s_1') \cdot \mu_2(s_2')$ to all $(s_1',s_2')$.) and \begin{itemize} \item $a\in A$, $s_1 \patransa{a}\arrowsub{1} \, \mu_1$, and $s_2 \patransa{a}\arrowsub{2} \, \mu_2$; or \hfill {\footnotesize (synchronous move)} \item $a\not\in A$, $s_1 \patransa{a}\arrowsub{1} \, \mu_1$ and $\mu_2(s_2) = 1$, or $s_2 \patransa{a}\arrowsub{2} \, \mu_2$ and $\mu_1(s_1) = 1$.\hfill {\footnotesize (asynchronous move)} \end{itemize} For distributions $\mu_1$, $\mu_2$ in probabilistic automata $D_1$, $D_2$, we denote by $\mu_1 \parallel_A \mu_2$ the distribution $\mu_1 \times \mu_2$ in $D_1 \parallel_A D_2$. For bisimulation to be applicable in compositional modelling, it needs to be a congruence w.r.t. parallel composition. \begin{theorem} Let $\mu_1,\mu_2$ be distributions in $D$ such that $\mu_1 \sim \mu_2$. We have $\mu_1 \parallel_A \mu \; \sim \; \mu_2 \parallel_A \mu$ for any $\mu$ in $D'$ and $A\subseteq\mathit{L}$. \end{theorem} \section{Defs for expolynomials} --- Let $W$ be the set of all sequences (of length at most $k$) of the form $w = (a_1,c_1) \ldots (a_n,c_n)$ where each $a_i \in \mathcal{A}$ and $c_i \in \mathcal{C} \cup \{\bot\}$ such that each clock appears at most one in the sequence, i.e. $c_i = c_j$ implies $i=j$ or $c_i = c_j = \bot$. From a state $s=(q,\theta)$, each sequence encodes a possible execution to some state $(q',\theta')$ -- a sequence of transitions with actions $a_i$ that may occur from $s$ before all the clocks active\jena{def} in $\theta$ get below zero or get resampled. The elements $c_i$ encode which of the clocks active in $\theta$ triggered the transition ($c_i$ is $\bot$ if the transition is triggered by a clock not active in $\theta$ or a by a clock that is resampled earlier on the sequence). By a forward reachability. Traversing the sequence $w$ backwards from $q'$ in the qualitative TA model\jena{def} gives a \emph{zone} $z_{w,q}$, a symbolical representation of all clocks values in $q$ from that it is possible to perform this sequence. For each sequence $w$, we denote by $Z_w$ the minimal set of zones \begin{itemize} \item that is a partition, i.e. for any $z_1,z_2 \in Z_w$ we have $z_1 \cap z_2 = \emptyset$, and \item each zone $z_{w,q} \cap D(\theta)$\jena{def domain} for $(q,\theta)\in\mathit{S}_\mathcal{S}$ is a union of elements of $Z_w$.\jena{def stavy grafu} \end{itemize} For each zone $z \in Z_w$ and distribution $\theta$ such that $\theta(z) > 0$, we denote by $\theta^z$ the conditioned distribution on $D(\theta) \cap z$, i.e. for each measurable set $A$ in its support we have $\theta^z(A) = \theta(A)/\theta(z)$. For each sequence $w = (L}%{\mathbb{A}\mathrm{ct}^\tau_1,c_1) \cdots (L}%{\mathbb{A}\mathrm{ct}^\tau_n,c_n)$ and each action $L}%{\mathbb{A}\mathrm{ct}^\tau$, let $\#_{w,L}%{\mathbb{A}\mathrm{ct}^\tau} = \{i \mid L}%{\mathbb{A}\mathrm{ct}^\tau = L}%{\mathbb{A}\mathrm{ct}^\tau_i, c_i \neq \bot \}$ denote the number of times an active clock triggers the action $L}%{\mathbb{A}\mathrm{ct}^\tau$ on the sequence $w$. The space of the sequence $X_w$ is $\prod_{L}%{\mathbb{A}\mathrm{ct}^\tau\in\mathcal{A}} (\mathbb{R}_{\ge 0})^{\#_{w,L}%{\mathbb{A}\mathrm{ct}^\tau}}$ Finally, for each state distribution $\theta^z$ over $z \subseteq (\mathbb{R}_{\ge 0})^\mathcal{C}$ we denote by $\theta^z_w$ the distribution over $X_w$ obtained from $\theta^z$ by projecting out all clocks not appearing in this sequence and mapping the dimension of each clock $c$ to the $i$-th dimension of $L}%{\mathbb{A}\mathrm{ct}^\tau$ where $c$ appears in $w$ with the $i$-th occurrence of $L}%{\mathbb{A}\mathrm{ct}^\tau$. Let $X = \{(q,\theta^z_w) \mid q\in\mathcal{Q}, (q,\theta)\in\mathit{S}_\mathcal{S}, w \inW, z \in Z_w)\}$ be all different measures in the system induced by all sequences and zones of the sequences. To each state $(q,\theta)$ we assign a discrete distribution $\mu_{(q,\theta)}$ over $X$ that assigns to each $(q,\theta^z_w)\inX$ probability $\theta(z)/\theta(\bigcup Z_q)$ where $Z_q = \bigcup_{w\inW} Z_{w,q}$. \section{Related work and discussion}\label{sec:rw} For an overview of coalgebraic work on probabilistic bisimulations we refer to a survey \cite{DBLP:journals/tcs/Sokolova11}. A considerable effort has been spent to extend this work to continuous-space systems: the solution of \cite{DBLP:conf/icalp/VinkR97} (unfortunately not applicable to $\mathbb{R}$), the construction of \cite{DBLP:journals/mscs/Edalat99} (described by \cite{Sangiorgi:2011:ATB:2103601} as ``ingenious and intricate''), sophisticated measurable selection techniques in \cite{DBLP:conf/icalp/Doberkat03}, and further approaches of \cite{DBLP:conf/lics/DesharnaisGJP00} or \cite{phd-cont}. In contrast to this standard setting where relations between states and their successor distributions must be handled, our work uses directly relations on distributions which simplifies the setting. The coalgebraic approach has also been applied to trace semantics of uncountable systems \cite{DBLP:conf/concur/KerstanK12}. Coalgebraic treatment of probabilistic bisimulation is still very lively \cite{DBLP:conf/fossacs/Mio14}. Recently, distribution-based bisimulations have been studied. In \cite{DBLP:journals/ijfcs/DoyenHR08}, a bisimulation is defined in the context of language equivalence of Rabin's deterministic probabilistic automata and also an algorithm to compute the bisimulation on them. However, only finite systems with no non-determinism are considered. The most related to our notion are the very recent independently developed \cite{DBLP:journals/corr/FengZ13} and \cite{DBLP:journals/corr/abs-1202-4116}. However, none of them is applicable in the continuous setting and for neither of the two any algorithm has previously been given. Nevertheless, since they are close to our definition, our algorithm with only small changes can actually compute them. Although the bisimulation of \cite{DBLP:journals/corr/FengZ13} in a rather complex way extends \cite{DBLP:journals/ijfcs/DoyenHR08} to the non-deterministic case reusing their notions, it can be equivalently rephrased as our Definition~\ref{def:infinite-bisim} only considering singleton sets $A\subseteqL}%{\mathbb{A}\mathrm{ct}^\tau$. Therefore, it is sufficient to only consider matrices $P_A^W$ for singletons $A$ in our algorithm. Apart from being a weak relation\holger{note this}, the bisimulation of \cite{DBLP:journals/corr/abs-1202-4116} differs in the definition of $\mu\tran{A}\nu$: instead of restricting to the states of the support that can perform \emph{some} action of $A$, it considers those states that can perform \emph{exactly} actions of $A$. Here each $i$th row of each transition matrix $P_A^W$ needs to be set to zero if the set of labels from $s_i$ is different from $A$. There are also bisimulation relations over distributions that, however, coincide with the classical \cite{DBLP:conf/popl/LarsenS89} on Dirac distributions and are only directly lifted to non-Dirac distributions. Thus they fail to address the motivating correspondence problem from Section~\ref{sec:intro} and are less precise for large-population models. Moreover, no algorithms were given. They were considered for finite \cite{DBLP:conf/concur/CrafaR11,DBLP:dblp_journals/fac/Hennessy12} and uncountable \cite{cattani-thesis} state spaces. There are other bisimulations that identify more states than the classical \cite{DBLP:conf/popl/LarsenS89} such as \cite{DBLP:conf/concur/SongZG11} and \cite{italie} designed to match a specific logic. Further, weak bisimulations coarser than usual state based analogues were given in \cite{DBLP:conf/lics/EisentrautHZ10,DBLP:conf/qest/EisentrautHKT013,DBLP:journals/iandc/DengH13}, which also inspires our work, especially their approach to internal transitions. However, they are quite different from our notion as in the case without internal transitions they basically coincide with lifting \cite{DBLP:dblp_journals/fac/Hennessy12} of the classical bisimulation \cite{DBLP:conf/popl/LarsenS89}. Another approach to obtain coarser equivalences on probabilistic automata is via testing scenarios~\cite{DBLP:conf/icalp/StoelingaV03}.
1,116,691,501,339
arxiv
\section{Introduction} One of the most important problems in theoretical physics is the unification between the Einstein's general theory of relativity and the Standard Model of particle physics [1]. According to Ref. [1], two important predictions of this unification are the following: $(i)$ the existence of extra dimensions; and $(ii)$ the existence of a minimal length scale on the order of the Planck length. Studies in string theory and loop quantum gravity emphasize that there is a minimal length scale in nature. Today's theoretical physicists know that the existence of a minimal length scale leads to a modification of Heisenberg uncertainty principle. This modified uncertainty principle can be written as \begin{equation} \triangle X \geq\frac{\hbar}{2\triangle P}+\frac{a_{1}}{2}\;\frac{\ell_{P}^{2}}{\hbar}\;\Delta P+\frac{a_{2}}{2}\;\frac{\ell_{P}^{4}}{\hbar^{3}}\;(\Delta P)^{3}+\cdots, \end{equation} where $\ell_{P}$ is the Planck length and $a_{i}\;,\forall i\in \{1,2, \cdots \}$, are positive numerical constants [2-4]. By keeping only the first two terms on the right-hand side of Eq. (1), we obtain the usual generalized uncertainty principle (GUP) as follows: \begin{equation} \triangle X \geq\frac{\hbar}{2\triangle P}+\;\frac{a_{1}}{2}\;\frac{\ell_{P}^{2}}{\hbar}\;\Delta P. \end{equation} It is clear that in Eq. (2), $\triangle X$ is always larger than $(\triangle X)_{min}=\sqrt{a_{1}}\;\ell_{P}$. At the present time, theoretical physicists believe that reformulation of quantum field theory in the presence of a minimal length scale leads to a divergenceless quantum field theory [5-7]. During recent years, reformulation of quantum mechanics, gravity, and quantum field theory in the presence of a minimal length scale have been studied extensively [5-21]. H. S. Snyder was the first who formulated the electromagnetic field in quantized spacetime [22]. There are many papers about electrodynamics in the presence of a minimal length scale. For a review, we refer the reader to Refs. [12,13,14,15,16,19,20]. In our previous work [15], we studied formulation of electrodynamics with an external source in the presence of a minimal measurable length. In this work, we study formulation of a magnetostatic field with an external current density in the presence of a minimal length scale based on the Kempf algebra. This paper is organized as follows. In Section 2, the $D$-dimensional $(\beta,\beta')$-two-parameter deformed Heisenberg algebra introduced by Kempf and his co-workers is studied and it is shown that the Kempf algebra leads to a minimal length scale [23-25]. In Section 3, the Lagrangian formulation of a magnetostatic field in three spatial dimensions described by Kempf algebra is introduced in the case of $\beta'=2\beta$, whereas the position operators commute to the first order in $\beta$. It is shown that at the classical level there is a similarity between magnetostatics in the presence of a minimal length scale and the magnetostatic sector of the Abelian Lee-Wick model in three spatial dimensions. The Ampere's law and the energy density of a magnetostatic field in the presence of a minimal length scale are obtained. In Section 4, the Biot-Savart law in the presence of a minimal length scale is found. We show that at large spatial distances the modified Biot-Savart law becomes the Biot-Savart law in usual magnetostatics. In Section 5, we study the effect of minimal length corrections to the gyromagnetic moment of the muon. From this study we conclude that the upper bound on the isotropic minimal length scale in three spatial dimensions is $4.42\times10^{-19}m$. This value for the isotropic minimal length scale is close to the electroweak length scale $(\ell_{electroweak}\sim 10^{-18}\, m)$. In Section 6, the relationship between magnetostatics in the presence of a minimal length scale and a particular class of non-local magnetostatic field is investigated. Our conclusions are presented in Section 7. We use SI units throughout this paper. \section{Modified Commutation Relations with a Minimal Length Scale} Kempf and co-workers have introduced a modified Heisenberg algebra which describes a $D$-dimensional quantized space [23-25]. The Kempf algebra in a $D$-dimensional space is characterized by the following modified commutation relations \begin{eqnarray} \left[X^{i},P^{j}\right] &=& i\hbar \left[ (1+\beta\textbf{P}^{2})\delta^{ij} + \beta' P^{i}P^{j}\right], \\ \left[X^{i},X^{j}\right] &=& i\hbar \frac{(2\beta-\beta')+(2\beta+\beta')\beta \textbf{P}^{2}}{1+\beta \textbf{P}^{2}}(P^{i}X^{j}-P^{j}X^{i}), \\ \left[P^{i},P^{j}\right] &=& 0, \end{eqnarray} where $i,j=1,2,...,D$ and $\beta ,\beta'$ are two non-negative deformation parameters $(\beta,\beta' \geq 0)$. In Eqs. (3) and (4), $\beta$ and $\beta'$ are constant parameters with dimension $(momentum)^{-2}$. Also, in the above equations $X^{i}$ and $P^{i}$ are position and momentum operators in the deformed space.\\ An immediate consequence of Eq. (3) is the appearance of an isotropic minimal length scale which is given by [26] \begin{equation} (\triangle X^{i})_{min}=\hbar\sqrt{D\beta+\beta'}\quad , \quad\forall i\in \{1,2, \cdots ,D\}. \end{equation} In Ref. [27], Stetsko and Tkachuk introduced a representation which satisfies the modified Heisenberg algebra (3)-(5) up to the first order in deformation parameters $\beta$ and $\beta'$. The Stetsko-Tkachuk representations for the position and momentum operators in the deformed space can be written as follows: \begin{eqnarray} X^{i} &=& x^{i}+ \frac{2\beta-\beta'}{4}(\textbf{p}^{2}x^{i}+x^{i}\textbf{p}^{2}), \\ P^{i} &=& p^{i}(1+\frac{\beta'}{2}\textbf{p}^{2}), \end{eqnarray} where $x^{i}$ and $p^{i}={i}{\hbar}\partial^{i}={i}{\hbar}\frac{\partial}{\partial x_{i}}$ are position and momentum operators in ordinary quantum mechanics, and $\textbf{p}^{2}=\sum_{i=1}^{D}p^{i}p^{i}$. In this article, we study the special case of $\beta'=2\beta$, in which the position operators commute to the first order in deformation parameter $\beta$, i.e., $[X^{i},X^{j}]=0$ and thus a diagonal representation for the position operator in the deformed space can be obtained. For this linear approximation, the modified Heisenberg algebra (3)-(5) becomes \begin{eqnarray} \left[X^{i},P^{j}\right] &=& i\hbar \left[ (1+\beta\textbf{P}^{2})\delta^{ij} +2\beta P^{i}P^{j}\right], \\ \left[X^{i},X^{j}\right] &=& 0, \\ \left[P^{i},P^{j}\right] &=& 0. \end{eqnarray} In 1999, Brau [28] showed that the following representations satisfy (9)-(11), in the first order in $\beta$: \begin{eqnarray} X^{i} &=& x^{i},\\ P^{i} &=& p^{i}(1+\beta\textbf{p}^{2}). \end{eqnarray} It is necessary to note that the Stetsko-Tkachuk representations (7),(8) and the Brau representations (12),(13) coincide when $\beta'=2\beta$. Benczik has shown that the energy spectrum of some quantum systems in the deformed space with a minimal length are representation-independent [29]. It seems that the laws of physics in the presence of a minimal length must be representation-independent. \section{Lagrangian Formulation of a Magnetostatic Field with an External Current Density in the Presence of a Minimal Length Scale Based on the Kempf Algebra} The Lagrangian density for a magnetostatic field with an external current density $\textbf{J}(\textbf{x})=(J^{1}(\textbf{x}), \\ J^{2}(\textbf{x}), J^{3}(\textbf{x}))$ in three spatial dimensions $(D=3)$ can be written as follows [30]: \begin{equation} {\cal L}=-\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}), \end{equation} where $i,j=1,2,3$ , $F_{ij}(\textbf{x})=\partial_{i}A_{j}(\textbf{x})-\partial_{j}A_{i}(\textbf{x})$ and $\textbf{A}(\textbf{x})=(A^{1}(\textbf{x}),A^{2}(\textbf{x}),A^{3}(\textbf{x}))$ are the electromagnetic field tensor and the vector potential respectively.\\ The Euler-Lagrange equation for the components of the vector potential is \begin{equation} \frac{\partial{\cal L}}{\partial A_{k} }-\partial_{l}\left(\frac{\partial{\cal L}}{\partial(\partial_{l}A_{k})}\right)=0. \end{equation} If we substitute (14) into (15), we will obtain the following field equation for the magnetostatic field \begin{equation} \partial_{l}F^{lk}(\textbf{x})=\mu_{0}J^{k}(\textbf{x}). \end{equation} The electromagnetic field tensor $F_{ij}(\textbf{x})$ satisfies the Bianchi identity \begin{equation} \partial_{i}F_{jk}(\textbf{x})+\partial_{j}F_{ki}(\textbf{x})+\partial_{k}F_{ij}(\textbf{x})=0. \end{equation} The three-dimensional magnetic induction vector $\textbf{B}(\textbf{x})$ is defined as follows [31]: \begin{equation} F_{ij}=-\epsilon_{ijk}B^{k}\,\,\,\, ,\,\,\,F^{ij}=\epsilon^{ijk}B_{k}\;, \end{equation} where \begin{equation} \{B^{i}\}=\{B_{x},B_{y},B_{z}\}\,\,\,\,,\,\,\,\,\{B_{i}\}=\{-B_{x},-B_{y},-B_{z}\}. \end{equation} Using Eqs. (18) and (19), Eqs. (16) and (17) can be written in the vector form as follows: \begin{eqnarray} \boldsymbol{\nabla}\times\textbf{B}(\textbf{x})&=&\mu_{0}\textbf{J}(\textbf{x}),\\ \boldsymbol{\nabla}\cdot \textbf{B}(\textbf{x})&=&0. \end{eqnarray} The above equations are the basic equations of magnetostatics [30].\\ An immediate consequence of Eq. (21) is that $\textbf{B}(\textbf{x})$ can be written as follows: \begin{equation} \textbf{B}(\textbf{x})=\boldsymbol{\nabla}\times\textbf{A}(\textbf{x}). \end{equation} Now, we want to obtain the Lagrangian density for a magnetostatic field in the peresence of a minimal length scale based on the Kempf algebra. For this purpose, we must replace the ordinary position and derivative operators with the deformed position and derivative operators according to Eqs. (12) and (13), i.e., \begin{eqnarray} x^{i}\longrightarrow X^{i}&=&x^{i}, \\ \partial^{i}\longrightarrow D^{i}&:=&(1-\beta\hbar^{2}\nabla^{2})\partial^{i}, \end{eqnarray} where $\nabla^{2}:=\partial_{i}\partial_{i}$ is the Laplace operator. Using Eqs. (23) and (24) the electromagnetic field tensor in the presence of a minimal length scale becomes\\ \begin{equation*} F_{ij}(\textbf{x})=\partial_{i}A_{j}(\textbf{x})-\partial_{j}A_{i}(\textbf{x})\longrightarrow {\cal F}_{ij}(\textbf{X})= D_{i}A_{j}(\textbf{X})-D_{j}A_{i}(\textbf{X}), \end{equation*} or \begin{equation} {\cal F}_{ij}(\textbf{X})=F_{ij}(\textbf{x})-\beta\hbar^{2}\nabla^{2}F_{ij}(\textbf{x}). \end{equation} It should be mentioned that the above modification of the electromagnetic field tensor has been introduced earlier by Hossenfelder and co-workers in order to study the minimal length effects in quantum electrodynamics in Ref. [16]. If we use Eqs. (23), (24), and (25), we obtain the Lagrangian density for a magnetostatic field in the deformed space as follows \footnote {Using Eq. (23) together with the transformation rule for a contravariant vector, we obtain the following result to the first order in deformation parameter $\beta$ \begin{eqnarray*} J^{\;\prime \;i}(\textbf{X})A^{\;\prime \;i}(\textbf{X})=\frac{\partial X^{i}}{\partial x^{j}}J^{j}(\textbf{x})\frac{\partial X^{i}}{\partial x^{k}}A^{k}(\textbf{x})=\delta_{j}^{i}\delta_{k}^{i}J^{ j}(\textbf{x})A^{k}(\textbf{x})=J^{ i}(\textbf{x})A^{i}(\textbf{x}). \end{eqnarray*}} : \begin{eqnarray} {\cal L} &=& -\;\frac{1}{4\mu_{0}}{\cal F}_{ij}(\textbf{X}){\cal F}^{ij}(\textbf{X})+J^{i}(\textbf{X})A^{i}(\textbf{X}) \nonumber \\ &=& -\;\frac{1}{4\mu_{0}} F_{ij}(\textbf{x})F^{ij}(\textbf{x})+ \frac{1}{4\mu_{0}}(\hbar\sqrt{2\beta})^{2}\nonumber \\ & & F_{ij}(\textbf{x})\nabla^{2} F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x})+{\cal O}\left((\hbar\sqrt{2\beta})^{4}\right). \end{eqnarray} The term $\frac{1}{4\mu_{0}}(\hbar\sqrt{2\beta})^{2}F_{ij}(\textbf{x})\nabla^{2} F^{ij}(\textbf{x})$ in Eq. (26) can be considered as a minimal length effect. After neglecting terms of order $(\hbar\sqrt{2\beta})^{4}$ and higher in Eq. (26) we obtain \begin{eqnarray} {\cal L} &=& -\;\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})+ \frac{1}{4\mu_{0}}(\hbar\sqrt{2\beta})^{2} F_{ij}(\textbf{x})\nabla^{2} F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}). \end{eqnarray} The Lagrangian density (27) is similar to the magnetostatic sector of the Abelian Lee-Wick model which was introduced by Lee and Wick as a finite theory of quantum electrodynamics [32-36]. Eq. (27) can be written as \begin{eqnarray} {\cal L} &=& -\;\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})- \frac{1}{4\mu_{0}}(\hbar\sqrt{2\beta})^{2} \partial_{n}F_{ij}(\textbf{x})\,\partial_{n}F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}) + \partial_{n}\Lambda_{n}(\textbf{x}), \end{eqnarray} where \begin{eqnarray} \Lambda_{n}(\textbf{x}):=\frac{1}{4\mu_{0}}(\hbar\sqrt{2\beta})^{2}F_{ij}(\textbf{x}) \partial_{n}F^{ij}(\textbf{x}). \end{eqnarray} After dropping the total derivative term $ \partial_{n}\Lambda_{n}(\textbf{x})$, the Lagrangian density (28) will be equivalent to the following Lagrangian density: \begin{eqnarray} {\cal L} &=& -\;\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})- \frac{1}{4\mu_{0}} a ^{2} \partial_{n}F_{ij}(\textbf{x})\partial_{n}F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}), \end{eqnarray} where $a:=\hbar\sqrt{2\beta}$ is a constant parameter which is called Podolsky's characteristic length [37-41]. The Euler-Lagrange equation for the Lagrangian density (30) is [42-44] \begin{equation} \frac{\partial{\cal L}}{\partial A_{k} }-\partial_{l}\left(\frac{\partial{\cal L}}{\partial(\partial_{l}A_{k})}\right)+\partial_{m}\partial_{l}\left(\frac{\partial{\cal L}}{\partial(\partial_{m}\partial_{l}A_{k})}\right)=0. \end{equation} If we substitute (30) into (31), we obtain the following field equation for the magnetostatic field in the deformed space \footnote{Here, we use the following definition: \begin{eqnarray*} \frac{\partial\phi_{i_{1}\cdots i_{k}}}{\partial\phi_{j_{1}\cdots j_{k}}}=\delta_{i_{1}}^{j_{1}}\cdots\delta_{i_{k}}^{j_{k}}, \end{eqnarray*} where $\phi_{i_{1}\cdots i_{k}}:=\partial_{i_{1}}\cdots \partial_{i_{k}}\phi$. This definition has been used by Moeller and Zwiebach in Ref. [44].} \begin{equation} \partial_{l}F^{lk}(\textbf{x})-a^{2}\;\nabla^{2}\partial_{l}F^{lk}(\textbf{x})=\mu_{0}J^{k}(\textbf{x}). \end{equation} Using Eqs. (18) and (19), Eqs. (17) and (32) can be written in the vector form as follows: \begin{eqnarray} (1-a^{2}\nabla^{2})\boldsymbol{\nabla}\times\textbf{B}(\textbf{x})&=&\mu_{0}\textbf{J}(\textbf{x}),\\ \boldsymbol{\nabla}\cdot\textbf{B}(\textbf{x})&=& 0. \end{eqnarray} Equations (33) and (34) are fundamental equations of Podolsky's magnetostatics [45-48]. It should be noted that Eqs. (30), (33), and (34) can be obtained as the magnetostatic limit of Eqs. (20), (26), and (27) in our previous paper [15]. Using Stokes's theorem the integral form of Eq. (33) can be written in the form: \begin{equation} \oint_{C}[\textbf{B}(\textbf{x})-(\hbar\sqrt{2\beta})^{2}\nabla^{2}\textbf{B}(\textbf{x})]\cdot d\textbf{l}=\mu_{0} I, \end{equation} where $I$ is the total current passing though the closed curve $C$. Equation (35) is Ampere's law in the presence of a minimal length scale. It is clear that for $\hbar\sqrt{2\beta}\rightarrow 0$, the modified Ampere's law in Eq. (35) becomes the usual Ampere's law. \\ Now, let us obtain the energy density of a magnetostatic field in the presence of a minimal length scale. The energy density of a magnetostatic field in the usual magnetostatics is given by [30] \begin{eqnarray} u_{B}&=&\frac{1}{2\mu_{0}}\textbf{B}(\textbf{x})\cdot\textbf{B}(\textbf{x}) \nonumber\\ &=&\frac{1}{2\mu_{0}}(\boldsymbol{\nabla}\times\textbf{A}(\textbf{x}))\cdot(\boldsymbol{\nabla}\times\textbf{A}(\textbf{x}))\;. \end{eqnarray} Using Eqs. (23) and (24) the energy density of a magnetostatic field under the influence of a minimal length scale becomes\\ \begin{eqnarray*} u_{B}=\frac{1}{2\mu_{0}}(\boldsymbol{\nabla}\times\textbf{A}(\textbf{x}))\cdot(\boldsymbol{\nabla}\times\textbf{A}(\textbf{x})) \longrightarrow u^{^{\textbf{ML}}}_{B}=\frac{1}{2\mu_{0}}(\textbf{D}\times\textbf{A}(\textbf{X}))\cdot(\textbf{D}\times\textbf{A}(\textbf{X})), \end{eqnarray*} or\\ \begin{eqnarray} u^{^{\textbf{ML}}}_{B}&=&\frac{1}{2\mu_{0}}[(1-\beta\hbar^{2}\nabla^{2})\boldsymbol{\nabla}\times\textbf{A}(\textbf{x})]\cdot[(1-\beta\hbar^{2}\nabla^{2}) \boldsymbol{\nabla}\times\textbf{A}(\textbf{x})] \nonumber\\ &=&\frac{1}{2\mu_{0}}\textbf{B}(\textbf{x})\cdot\textbf{B}(\textbf{x})-\frac{1}{2\mu_{0}}(\hbar\sqrt{2\beta})^{2} \textbf{B}(\textbf{x})\cdot\nabla^{2}\textbf{B}(\textbf{x})+{\cal O}\left((\hbar\sqrt{2\beta})^{4}\right), \end{eqnarray} where we use the abbreviation $\textbf{ML}$ for the minimal length. If we use the vector identities\\ \begin{eqnarray} \boldsymbol{\nabla}\times(\boldsymbol{\nabla}\times\textbf{a})&=&\boldsymbol{\nabla}(\boldsymbol{\nabla}\cdot\textbf{a})-\nabla^{2}\textbf{a}\;,\\ \boldsymbol{\nabla}\cdot(\textbf{a}\times\textbf{b})&=&\textbf{b}\cdot(\boldsymbol{\nabla}\times\textbf{a})-\textbf{a}\cdot(\boldsymbol{\nabla}\times\textbf{b})\;, \end{eqnarray} together with Eq. (34), the modified energy density $u^{^{\textbf{ML}}}_{B}$ can be written in the form \begin{eqnarray} u^{^{\textbf{ML}}}_{B}&=&\frac{1}{2\mu_{0}}\textbf{B}(\textbf{x})\cdot\textbf{B}(\textbf{x})+\frac{1}{2\mu_{0}}(\hbar\sqrt{2\beta})^{2} (\boldsymbol{\nabla}\times\textbf{B}(\textbf{x}))\cdot(\boldsymbol{\nabla}\times\textbf{B}(\textbf{x})) \nonumber\\ &&+\boldsymbol{\nabla}\cdot\boldsymbol{\Omega}(\textbf{x}) +{\cal O}\left((\hbar\sqrt{2\beta})^{4}\right), \end{eqnarray} where \begin{eqnarray} \boldsymbol{\Omega}(\textbf{x})&:=&\frac{1}{2\mu_{0}}(\hbar\sqrt{2\beta})^{2} (\boldsymbol{\nabla}\times\textbf{B}(\textbf{x}))\times\textbf{B}(\textbf{x})\;. \end{eqnarray} After dropping the total divergence term $\boldsymbol{\nabla}\cdot\boldsymbol{\Omega}(\textbf{x})$, the modified energy density (40) will be equivalent to the following modified energy density: \begin{eqnarray} u^{^{\textbf{ML}}}_{B}&=&\frac{1}{2\mu_{0}}\textbf{B}(\textbf{x})\cdot\textbf{B}(\textbf{x})+\frac{1}{2\mu_{0}}(\hbar\sqrt{2\beta})^{2} (\boldsymbol{\nabla}\times\textbf{B}(\textbf{x}))\cdot(\boldsymbol{\nabla}\times\textbf{B}(\textbf{x})) \nonumber\\ && +{\cal O}\left((\hbar\sqrt{2\beta})^{4}\right). \end{eqnarray} The term $\frac{1}{2\mu_{0}}(\hbar\sqrt{2\beta})^{2} (\boldsymbol{\nabla}\times\textbf{B}(\textbf{x}))\cdot(\boldsymbol{\nabla}\times\textbf{B}(\textbf{x}))$ in Eq. (42) shows the effect of minimal length corrections. \section{Green's Function for a Magnetostatic Field in the Presence of a Minimal Length Scale} Substituting Eq. (22) into Eq. (33) and using the vector identity (38) we obtain \begin{eqnarray} (1-a^{2}\nabla^{2})[\boldsymbol{\nabla}(\boldsymbol{\nabla}\cdot\textbf{A}(\textbf{x}))-\nabla^{2}\textbf{A}(\textbf{x})]=\mu_{0}\textbf{J}(\textbf{x}). \end{eqnarray} In the Coulomb gauge $(\boldsymbol{\nabla}\cdot\textbf{A}(\textbf{x})=0)$, Eq. (43) can be written as \begin{eqnarray} (1-a^{2}\nabla^{2})\nabla^{2}\textbf{A}(\textbf{x})=-\mu_{0}\textbf{J}(\textbf{x}). \end{eqnarray} The solution of Eq. (44) in terms of the Green's function, $G(\textbf{x},\textbf{x}')$, is given by \begin{eqnarray} \textbf{A}(\textbf{x})=\textbf{A}_{0}(\textbf{x})+\frac{\mu_{0}}{4\pi}\int G(\textbf{x},\textbf{x}')\textbf{J}(\textbf{x}')d^{3}x'\; , \end{eqnarray} where $\textbf{A}_{0}(\textbf{x})$ and $G(\textbf{x},\textbf{x}')$ satisfy the equations \begin{eqnarray} (1-a^{2}\nabla^{2})\nabla^{2}\textbf{A}_{0}(\textbf{x})=0, \end{eqnarray} and \begin{eqnarray} (1-a^{2}\nabla^{2}_{\textbf{x}})\nabla^{2}_{\textbf{x}}G(\textbf{x},\textbf{x}')=-4\pi\delta(\textbf{x}-\textbf{x}'). \end{eqnarray} Now, let us solve Eq. (47) by writting $G(\textbf{x},\textbf{x}')$ and $\delta(\textbf{x}-\textbf{x}')$ in terms of Fourier integrals as follows: \begin{eqnarray} G(\textbf{x},\textbf{x}')&=&\frac{1}{(2\pi)^{3}}\int\;e^{-i\textbf{k}\cdot(\textbf{x}-\textbf{x}')}\widetilde{G}(\textbf{k})d^{3}k\; , \\ \delta(\textbf{x}-\textbf{x}')&=&\frac{1}{(2\pi)^{3}}\int\;e^{-i\textbf{k}\cdot(\textbf{x}-\textbf{x}')}d^{3}k\; . \end{eqnarray} If we substitute Eqs. (48) and (49) into Eq. (47), we obtain the functional form of $\widetilde{G}(\textbf{k})$ as follows: \begin{eqnarray} \widetilde{G}(\textbf{k})&=&\frac{4\pi}{\textbf{k}^{2}+a^{2}(\textbf{k}^{2})^{2}} \nonumber\\ &=&4\pi(\frac{1}{\textbf{k}^{2}}-\frac{a^{2}}{1+a^{2}\textbf{k}^{2}}). \end{eqnarray} If Eq. (50) is inserted into Eq. (48), the Green's function $G(\textbf{x},\textbf{x}')$ becomes \begin{eqnarray} G(\textbf{x},\textbf{x}')&=&\frac{1}{2\pi^{2}}\int\;e^{-i\textbf{k}\cdot(\textbf{x}-\textbf{x}')}(\frac{1}{\textbf{k}^{2}}-\frac{a^{2}}{1+a^{2}\textbf{k}^{2}})d^{3}k\; \nonumber\\ &=&\frac{1-e^{-\frac{|\textbf{x}-\textbf{x}'|}{a}}}{|\textbf{x}-\textbf{x}'|}. \end{eqnarray} This type of Green's function has been considered in electrodynamics to avoid divergences associated with point charges [38,45,49,50]. Using Eqs. (45) and (51) the particular solution of Eq. (44), which vanishes at infinity is \begin{eqnarray} \textbf{A}(\textbf{x})=\frac{\mu_{0}}{4\pi}\int\frac{1-e^{-\frac{|\textbf{x}-\textbf{x}'|}{a}}}{|\textbf{x}-\textbf{x}'|}\textbf{J}(\textbf{x}')d^{3}x'. \end{eqnarray} The vector potential (52) satisfies the Coulomb gauge condition $\boldsymbol{\nabla}\cdot\textbf{A}(\textbf{x})=0$. The expression (52) can be applied to current circuits by making the substitution: $\textbf{J}(\textbf{x}')d^{3}x'\rightarrow Id\textbf{l}'$. Thus \begin{eqnarray} \textbf{A}(\textbf{x})=\frac{\mu_{0} I}{4\pi}\int_{C}\frac{1-e^{-\frac{|\textbf{x}-\textbf{x}'|}{a}}}{|\textbf{x}-\textbf{x}'|}d\textbf{l}', \end{eqnarray} where $C$ is the contour defined by the wire. If we use Eqs. (22) and (52), we obtain the magnetic induction vector $\textbf{B}(\textbf{x})$ as follows: \begin{eqnarray*} \textbf{B}(\textbf{x})=\frac{\mu_{0}}{4\pi}\int\frac{\textbf{J}(\textbf{x}')\times(\textbf{x}-\textbf{x}')}{|\textbf{x}-\textbf{x}'|^{3}} [1-(1+\frac{|\textbf{x}-\textbf{x}'|}{a})e^{-\frac{|\textbf{x}-\textbf{x}'|}{a}}]d^{3}x', \end{eqnarray*} or \begin{eqnarray} \textbf{B}(\textbf{x})=\frac{\mu_{0}I}{4\pi}\int_{C}\frac{d \textbf{l}'\times(\textbf{x}-\textbf{x}')}{|\textbf{x}-\textbf{x}'|^{3}} [1-(1+\frac{|\textbf{x}-\textbf{x}'|}{a})e^{-\frac{|\textbf{x}-\textbf{x}'|}{a}}]. \end{eqnarray} Equation (54) is the Biot-Savart law in the presence of a minimal length scale.\\ In the limit $a=\hbar\sqrt{2\beta}\rightarrow 0$, the modified Biot-Savart law in (54) smoothly becomes the usual Biot-Savart law, i.e., \begin{eqnarray} \lim_{a\rightarrow0}\textbf{B}(\textbf{x})=\frac{\mu_{0}I}{4\pi}\int_{C}\frac{d \textbf{l}'\times(\textbf{x}-\textbf{x}')}{|\textbf{x}-\textbf{x}'|^{3}}. \end{eqnarray} \section{Upper Bound Estimation of the Minimal Length Scale in Modified Magnetostatics} Now, let us estimate the upper bounds on the isotropic minimal length scale in modified magnetostatics. By putting $\beta'=2\beta$ into (6) the isotropic minimal length scale becomes \begin{equation} (\triangle X^{i})_{min}=\sqrt{\frac{D+2}{2}}\;(\hbar\sqrt{2\beta})\; , \quad\forall i\in \{1,2, \cdots ,D\}. \end{equation} The isotropic minimal length scale (56) in three spatial dimensions is given by \begin{equation} (\triangle X^{i})_{min}={\frac{\sqrt{10}}{2}}\;a\quad , \quad\forall i\in \{1,2,3\}, \end{equation} where $a=\hbar\sqrt{2\beta}$.\\ In a series of papers, Sprenger and co-workers [51,52] have concluded that the minimal length scale $(\triangle X^{i})_{min}$ in Eq. (57) might lie anywhere between the Planck length scale $(\ell_{P}\sim 10^{-35}\, m)$ and the electroweak length scale $(\ell_{electroweak}\sim 10^{-18}\, m)$, i.e., \begin{equation} 10^{-35}\, m < (\triangle X^{i})_{min}< 10^{-18}\, m. \end{equation} According to above statements, the upper bound on the isotropic minimal length scale in three spatial dimensions becomes \begin{equation} (\triangle X^{i})_{min}< 10^{-18}\, m. \end{equation} Inserting (59) into (57), we find \begin{equation} a < 0.63 \times 10^{-18}\, m. \end{equation} In a series of papers, Accioly et al. [34, 36, 37] have estimated an upper bound on Podolsky's characteristic length $a$ by computing the anomalous magnetic moment of the electron in the framework of Podolsky's electrodynamics. This upper bound on $a$ is \begin{equation} a < 4.7 \times 10^{-18}\, m. \end{equation} Note that the upper bound on the Podolsky's characteristic length $a$ in Eq. (60) is near to the upper bound on the Podolsky's characteristic length in Eq. (61).\\ Another upper bound on the minimal length scale has been obtained in Ref. [53] by considering minimal length corrections to the gyromagnetic moment of electrons and muons. If we compare Eq. (13) in this work with Eq. (40) in Ref. [16], we obtain \begin{equation} \hbar\sqrt{\beta}=\frac{L_{f}}{\sqrt{3}}\;, \end{equation} where $L_{f}$ is the minimal length scale in Refs. [16,53]. If we substitute (62) into (56), we will obtain the isotropic minimal length in three spatial dimensions as follows: \begin{equation} (\triangle X^{i})_{min}=\sqrt{\frac{5}{3}}L_{f},\quad\forall i\in \{1,2,3\}. \end{equation} The minimal length scale $L_{f}$ in Eqs. (62) and (63) can be written as \begin{equation} L_{f}=\frac{\hbar}{M_{f}c}\;, \end{equation} where $M_{f}$ is a new fundamental mass scale [16,53]. Inserting Eq. (64) into Eq. (63), we find \begin{equation} (\triangle X^{i})_{min}=\sqrt{\frac{5}{3}}\; \frac{\hbar}{M_{f}c}\;,\quad\forall i\in \{1,2,3\}. \end{equation} In Ref. [53] it was shown that the effect of minimal length corrections to the gyromagnetic moment of the muon leads to the following lower bound on the fundamental mass scale of the theory: \begin{equation} M_{f}\geq 577\; \frac{GeV}{c^{2}}. \end{equation} Substituting Eq. (66) into Eq. (65), the isotropic minimal length scale in three spatial dimensions becomes \begin{equation} (\triangle X^{i})_{min}\leq 4.42\times10^{-19}m. \end{equation} If we insert Eq. (67) into Eq. (57), we will find \begin{equation} a \leq 2.79\times 10^{-19}\, m. \end{equation} It is interesting to note that the numerical value of the upper bound on $a$ in Eq. (68) and the numerical value of the upper bound on $a$ in Eq. (60) are close to each other. \section{The Equivalence between the Gaete-Spallucci Non-Local Magnetostatics and Magnetostatics in the Presence of a Minimal Length Scale} Smailagic and Spallucci have proposed an approach to formulate quantum field theory in the presence of a minimal length scale [54-56]. Using the Smailagic-Spallucci approach, Gaete and Spallucci have introduced a $U(1)$ gauge field with a non-local kinetic term whose magnetostatic sector is \begin{equation} {\cal L}=-\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})exp(-\theta \nabla^{2})F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}), \end{equation} where $\theta$ is a constant parameter with dimension of $(length)^{2}$ [57]. The function $exp(-\theta\nabla^{2})$ in Eq. (69) can be expanded in a formal power series as follows: \begin{equation} \exp{(-\theta\nabla^{2})}=\sum_{l=0}^{+\infty} (-1)^{l}\;\frac{\theta^l}{l\;!}\;(\nabla^{2})^{l}, \end{equation} where $(\nabla^{2})^{l}$ denotes the $\nabla^{2}$ operator applied $l$ times [58].\\ After inserting Eq. (70) into Eq. (69), we obtain the following Lagrangian density: \begin{eqnarray} {\cal L}&=&-\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})+\frac{1}{4\mu_{0}}\theta F_{ij}(\textbf{x})\nabla^{2}F^{ij}(\textbf{x})\nonumber\\ &+&\frac{1}{4\mu_{0}}\sum_{l=2}^{+\infty} (-1)^{l+1}\;\frac{\theta^l}{l\;!}\;F_{ij}(\textbf{x})(\nabla^{2})^{l}F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}). \end{eqnarray} If we neglect terms of order $\theta^{2}$ and higher in Eq. (71), we find \begin{equation} {\cal L}=-\frac{1}{4\mu_{0}}F_{ij}(\textbf{x})F^{ij}(\textbf{x})+\frac{1}{4\mu_{0}}\theta F_{ij}(\textbf{x})\nabla^{2}F^{ij}(\textbf{x})+J^{i}(\textbf{x})A^{i}(\textbf{x}). \end{equation} A comparison between Eqs. (27) and (72) clearly shows that there is an equivalence between the Gaete-Spallucci non-local magnetostatics to the first order in $\theta$ and the magnetostatic sector of the Abelian Lee-Wick model (or magnetostatics in the presence of a minimal length scale). The relationship between the non-commutative constant parameter $\theta$ in Eq. (72) and $a=\hbar\sqrt{2\beta}$ in Eq. (27) is \begin{equation} \theta=a^{2}. \end{equation} According to Eq. (73), $a=\sqrt{\theta}$ plays the role of the minimal length in the Gaete-Spallucci non-local magnetostatics [57,59].\\ If we insert Eq. (73) into Eq. (57), we find \begin{equation} (\triangle X^{i})_{min}=\frac{\sqrt{10\;\theta}}{2}\quad , \quad\forall i\in \{1,2,3\}. \end{equation} Using Eq. (68) in Eq. (73), we obtain the following upper bound for the non-commutative parameter $\theta$: \begin{equation} \theta_{_{\textbf{MLCGMM}}}\leq 7.78\times10^{-38} \; m^{2}, \end{equation} where we use the abbreviation $\textbf{MLCGMM}$ for the minimal length corrections to the gyromagnetic moment of the muon. Chaichian and his collaborators have investigated the Lamb shift in non-commutative quantum electrodynamics $(\textbf{NCQED})$ [60,61]. They found the following upper bound for the non-commutative parameter $\theta$: \begin{eqnarray*} \theta_{_{\textbf{NCQED}}}\leq(10^{4} GeV)^{-2} , \end{eqnarray*} or \begin{equation} \theta_{_{\textbf{NCQED}}}\leq 3.88\times10^{-40}\;m^{2} . \end{equation} For a review of the phenomenology of non-commutative geometry see Ref. [62]. The upper bound (75) is about two orders of magnitude larger than the upper bound (76), i.e., \begin{equation} \theta_{_{\textbf{MLCGMM}}}\sim \;10^{2} \;\theta_{_{\textbf{NCQED}}}. \end{equation} If we insert (61) into (73), we obtain the following upper bound for $\theta$: \begin{equation} \theta_{_{\textbf{MLCGME}}}\leq 2.2\times10^{-35} \; m^{2}, \end{equation} where we use the abbreviation $\textbf{MLCGME}$ for the minimal length corrections to the gyromagnetic moment of the electron. The upper bound (78) is about four orders of magnitude larger than the upper bound (76), i.e., \begin{equation} \theta_{_{\textbf{MLCGME}}}\sim \;10^{4} \;\theta_{_{\textbf{NCQED}}}. \end{equation} A comparison between Eq. (77) and Eq. (79) shows that $\theta_{_{\textbf{MLCGMM}}}$ is nearer to $\theta_{_{\textbf{NCQED}}}$. It should be emphasized that the magnetostatics in the presence of a minimal length scale is only correct to the first order in the deformation parameter $\beta$, while the Gaete-Spallucci non-local magnetostatics is valid to all orders in the non-commutative parameter $\theta$. \section{Conclusions} After the appearance of quantum field theory many theoretical physicists have attempted to reformulate quantum field theory in the presence of a minimal length scale [63,64]. The hope was that the introduction of such a minimal length scale leads to a divergenceless quantum field theory [65]. Recent studies in perturbative string theory and quantum gravity suggest that there is a minimal length scale in nature [1]. Today's we know that the existence of a minimal length scale leads to a generalization of Heisenberg uncertainty principle. An immediate consequence of the GUP is that the usual position and derivative operators must be replaced by the modified position and derivative operators according to Eqs. (23) and (24) for $\beta'=2\beta$. We have formulated magnetostatics in the presence of a minimal length scale based on the Kempf algebra. It was shown that there is a similarity between magnetostatics in the presence of a minimal length scale and the magnetostatic sector of the Abelian Lee-Wick model. The integral form of Ampere's law and the energy density of a magnetostatic field in the presence of a minimal length scale have been obtained. Also, the Biot-Savart law in the presence of a minimal length scale has been found. We have shown that in the limit $\hbar\sqrt{2\beta}\rightarrow 0$, the modified Ampere and Biot-Savart laws become the usual Ampere and Biot-Savart laws. It is necessary to note that the upper bounds on the isotropic minimal length scale in Eqs. (59) and (67) are close to the electroweak length scale $(\ell_{electroweak}\sim 10^{-18}\, m)$. We have demonstrated the equivalence between the Gaete-Spallucci non-local magnetostatics up to the first order over $\theta$ and magnetostatics with a minimal length up to the first order over the deformation parameter $\beta$. Recently, Romero and collaborators have formulated a higher-derivative electrodynamics [66]. In this work we have formulated a higher-derivative magnetostatics in the framework of Kempf algebra whereas the authors of [66] have studied an electrodynamics consistent with anisotropic transformations of spacetime with an arbitrary dynamic exponent $z$. \vspace{2cm} \section*{Acknowledgments} We are grateful to S. Meljanac and J. M. Romero for their interest in this work and for drawing our attention to the references [8,19,66]. Also, we would like to thank the referee for useful comments and suggestions.
1,116,691,501,340
arxiv
\section{Introduction} \begin{figure}[ht] \centering \includegraphics[width=.8\linewidth]{overview.pdf} \caption{ \small {Learning robust joint representations via multimodal cyclic translations. Top: cyclic translations from a source modality (language) to a target modality (visual). Bottom: the representation learned between language and vision are further translated into the acoustic modality, forming the final joint representation. In both cases, the joint representation is then used for sentiment prediction.}} \label{fig:overview} \end{figure} Sentiment analysis is an open research problem in machine learning and natural language processing which involves identifying a speaker's opinion~\cite{Pang:2002:TUS:1118693.1118704}. Previously, text-only sentiment analysis through words, phrases, and their compositionality can be found to be insufficient for inferring sentiment content from spoken opinions~\cite{morency2011towards}, especially in the presence of rich nonverbal behaviors which can accompany language~\cite{shaffer2018exploring}. As a result, there has been a recent push towards using machine learning methods to learn joint representations from additional information present in the visual and acoustic modalities. This research field has become known as multimodal sentiment analysis and extends the conventional text-based definition of sentiment analysis to a multimodal environment.\blfootnote{* Equal contributions} For example,~\cite{kaushik2013sentiment} explore the additional acoustic modality while~\cite{wollmer2013youtube} use the language, visual, and acoustic modalities present in monologue videos to predict sentiment. This push has been further bolstered by the advent of multimodal social media platforms, such as YouTube, Facebook, and VideoLectures which are used to express personal opinions on a worldwide scale. The abundance of multimodal data has led to the creation of multimodal datasets, such as CMU-MOSI~\cite{zadeh2016multimodal} and ICT-MMMO~\cite{wollmer2013youtube}, as well as deep multimodal models that are highly effective at learning discriminative joint multimodal representations~\cite{localglobal,factorized,chen2017msa}. Existing prior work learns joint representations using multiple modalities as input~\cite{multistage,morency2011towards,zadeh2016multimodal}. However, these joint representations also regain all modalities at test time, making them sensitive to noisy or missing modalities at test time~\cite{DBLP:conf/cvpr/Tran0ZJ17,Cai:2018:DAL:3219819.3219963}. To address this problem, we draw inspiration from the recent success of Seq2Seq models for unsupervised representation learning~\cite{seq2seq_nn,DBLP:journals/corr/TuLSLL16}. We propose the Multimodal Cyclic Translation Network \ model (MCTN) to learn robust joint multimodal representations by translating between modalities. Figure~\ref{fig:overview} illustrates these translations between two or three modalities. Our method is based on the key insight that translation from a source modality $S$ to a target modality $T$ results in an intermediate representation that captures joint information between modalities $S$ and $T$. MCTN \ extends this insight using a cyclic translation loss involving both \textit{forward translations} from source to target modalities, and \textit{backward translations} from the predicted target back to the source modality. Together, we call these \textit{multimodal cyclic translations} to ensure that the learned joint representations capture maximal information from both modalities. We also propose a hierarchical MCTN \ to learn joint representations between a source modality and multiple target modalities. MCTN \ is trainable end-to-end with a coupled translation-prediction loss which consists of (1) the cyclic translation loss, and (2) a prediction loss to ensure that the learned joint representations are task-specific (\textit{i.e.} multimodal sentiment analysis). Another advantage of MCTN \ is that once trained with multimodal data, we \textit{only} need data from the source modality at test time to infer the joint representation and label. As a result, MCTN \ is completely robust to test time perturbations or missing information on other modalities. Even though translation and generation of videos, audios, and text are difficult~\cite{DBLP:journals/corr/abs-1710-00421}, our experiments show that the learned joint representations can help for discriminative tasks: MCTN \ achieves new state-of-the-art results on multimodal sentiment analysis using the CMU-MOSI~\cite{zadeh2016multimodal}, ICT-MMMO~\cite{wollmer2013youtube}, and YouTube~\cite{morency2011towards} public datasets. Additional experiments show that MCTN \ learns increasingly discriminative joint representations with more input modalities during training. \section{Related Work} Early work on sentiment analysis focused primarily on written text~\cite{Pang:2002:TUS:1118693.1118704,pang2008opinion,socher2013recursive}. Recently, multimodal sentiment analysis has gained more research interest~\cite{mm_survey}. Probably the most challenging task in multimodal sentiment analysis is learning a joint representation of multiple modalities. Earlier work used fusion approaches such as concatenation of input features ~\cite{ngiam2011multimodal,lazaridou2015combining}. Several neural network models have also been proposed to learn joint multimodal representations. \cite{multistage} presented a multistage approach to learn hierarchical multimodal representations. The Tensor Fusion Network~\cite{tensoremnlp17} and its approximate low-rank model~\cite{lowrank} presented methods based on Cartesian-products to model unimodal, bimodal and trimodal interactions. The Gated Multimodal Embedding model~\cite{chen2017msa} learns an on-off switch to filter noisy or contradictory modalities. Other models have proposed using attention~\cite{Cheng:2017:HMA:3077136.3080671} and memory mechanisms~\cite{zadeh2018memory} to learn multimodal representations. In addition to purely supervised approaches, generative methods based on Generative Adversarial Networks (GANs)~\cite{gan} have attracted significant interest in learning joint distributions between two or more modalities~\cite{bigan,triplegan}. Another method for multimodal data is to develop conditional generative models~\cite{conditionalvae,variationalmultimodal} and learn to translate one modality to another. Generative-discriminative objectives have been used to learn either joint~\cite{seq2seq,kiros2014unifying} or factorized~\cite{factorized} representations. Our work takes into account the sequential dependency of modality translations and explores the effect of a cyclic translation loss on modality translations. Finally, there has been some progress on accounting for noisy or missing modalities at test time. One general approach is to infer the missing modalities by modeling the probabilistic relationships among different modalities.~\citet{JMLR:v15:srivastava14b} proposed using Deep Boltzmann Machines to jointly model the probability distribution over multimodal data. Sampling from the conditional distributions over each modality allows for test-time inference in the presence of missing modalities.~\citet{NIPS2014_5279} trained Restricted Boltzmann Machines to minimize the variation of information between modality-specific latent variables. Recently, neural models such as cascaded residual autoencoders~\cite{DBLP:conf/cvpr/Tran0ZJ17}, deep adversarial learning~\cite{Cai:2018:DAL:3219819.3219963}, or multiple kernel learning~\cite{10.1007/978-3-642-15549-9_49} have also been proposed for these tasks. It was also found that training with modalities dropped at random can improve the robustness of joint representations~\cite{ngiam2011multimodal}. These methods approximately infer the missing modalities before prediction~\cite{Q14-1023,AAAI1714811}, leading to possible error compounding. On the other hand, MCTN \ remains fully robust to missing or perturbed modalities during testing. \section{Proposed Approach}\label{sec:PROPAPR} In this section, we describe our approach for learning joint multimodal representations through modality translations. \subsection{Problem Formulation and Notation} A multimodal dataset consists of $N$ labeled video segments defined as $\mathbf{X} = (\mathbf{X}^{l}, \mathbf{X}^{v}, \mathbf{X}^{a})$ for the language, visual, and acoustic modalities respectively. The dataset is indexed by $N$ such that $\mathbf{X} = (\mathbf{X}_1, \mathbf{X}_2, ..., \mathbf{X}_N)$ where $\mathbf{X}_i = ({\mathbf{X}_i^{l}}, {\mathbf{X}_i^v}, {\mathbf{X}_i^a}), \, 1 \leq i \leq N$. The corresponding labels for these $N$ segments are denoted as $\mathbf{y}=(y_1, y_2, ..., y_N), \, y_i \in \mathbb{R}$. Following prior work, the multimodal data is synchronized by aligning the input based on the boundaries of each word and zero-padding each example to obtain time-series data of the same length~\cite{multistage}. The $i$th sample is given by ${\mathbf{X}_i^l} = ({\mathbf{w}_i}^{(1)}, {\mathbf{w}_i}^{(2)}, ..., {\mathbf{w}_i}^{(L)})$ where ${\mathbf{w}_i}^{(\ell)}$ stands for the $\ell$th word and $L$ is the length of each example. To accompany the language features, we also have a sequence of visual features ${\mathbf{X}_i^v} = ({\mathbf{v}_i}^{(1)}, {\mathbf{v}_i}^{(2)}, ..., {\mathbf{v}_i}^{(L)})$ and acoustic features ${\mathbf{X}_i^a} = ({\mathbf{a}_i}^{(1)}, {\mathbf{a}_i}^{(2)}, ..., {\mathbf{a}_i}^{(L)})$. \subsection{Learning Joint Representations} Learning a joint representation between two modalities $\mathbf{X}^{S}$ and $\mathbf{X}^{T}$ is defined by a parametrized function $f_{\theta}$ that returns an embedding $\mathcal{E}_{ST} = f_{\theta}(\mathbf{X}^{S},\mathbf{X}^{T})$. From there, another function $g_{w}$ is learned that predicts the label given this joint representation: $\hat{\mathbf{y}} = g_w(\mathcal{E}_{ST})$. Most work follows this framework during both training and testing~\cite{multistage,lowrank,factorized,zadeh2018memory}. During training, the parameters $\theta$ and $w$ are learned by empirical risk minimization over paired multimodal data and labels in the training set $(\mathbf{X}^{S}_{tr},\mathbf{X}^{T}_{tr},\mathbf{y}_{tr})$: \begin{align} {\mathcal{E}}_{ST} &= f_{\theta}(\mathbf{X}^{S}_{tr},\mathbf{X}^{T}_{tr}), \\ \hat{\mathbf{y}}_{tr} &= g_{w}({\mathcal{E}}_{ST}), \\ \theta^*, w^* &= \argmin_{\theta,w} \mathbb{E} \ [\ell_{\mathbf{y}}(\hat{\mathbf{y}}_{tr},\mathbf{y}_{tr})]. \end{align} for a suitable choice of loss function $\ell_{\mathbf{y}}$ over the labels ($tr$ denotes training set). During testing, paired multimodal data in the test set $(\mathbf{X}^{S}_{te},\mathbf{X}^{T}_{te})$ are used to infer the label ($te$ denotes test set): \begin{align} {\mathcal{E}}_{ST} &= f_{\theta^*}(\mathbf{X}^{S}_{te},\mathbf{X}^{T}_{te}), \\ \hat{\mathbf{y}}_{te} &= g_{w^*}({\mathcal{E}}_{ST}). \end{align} \subsection{Multimodal Cyclic Translation Network} Multimodal Cyclic Translation Network \ (MCTN) is a neural model that learns robust joint representations by modality translations. Figure~\ref{fig:s2s} shows a detailed description of MCTN \ for two modalities. Our method is based on the key insight that translation from a source modality $\mathbf{X}^{S}$ to a target modality $\mathbf{X}^{T}$ results in an intermediate representation that captures joint information between modalities $\mathbf{X}^{S}$ and $\mathbf{X}^{T}$, but using only the source modality $\mathbf{X}^{S}$ as input during test time. To ensure that our model learns joint representations that retain maximal information from all modalities, we use a cycle consistency loss~\cite{DBLP:journals/corr/ZhuPIE17} during modality translation. This method can also be seen as a variant of back-translation which has been recently applied to style transfer~\cite{DBLP:journals/corr/abs-1804-09000,DBLP:journals/corr/ZhuPIE17} and unsupervised machine translation~\cite{DBLP:journals/corr/abs-1804-07755}. We use back-translation in a multimodal environment where we encourage our translation model to learn informative joint representations but with only the source modality as input. The cycle consistency loss for modality translation starts by decomposing function $f_{\theta}$ into two parts: an encoder $f_{\theta_e}$ and a decoder $f_{\theta_d}$. The encoder takes in $\mathbf{X}^{S}$ as input and returns a joint embedding $\mathcal{E}_{S \rightarrow T}$: \begin{equation} \mathcal{E}_{S \rightarrow T} = f_{\theta_e} (\mathbf{X}^{S}), \end{equation} which the decoder then transforms into target modality $\mathbf{X}^{T}$: \begin{equation} \mathbf{X}^{T} = f_{\theta_d} (\mathcal{E}_{S \rightarrow T}), \end{equation} following which the decoded modality $T$ is translated back into modality $S$: \begin{align} {\mathcal{E}}_{T \rightarrow S} = f_{\theta_e}(\hat{\mathbf{X}}^{T}_{}), \ \hat{\mathbf{X}}^{S}_{} = f_{\theta_d} ({\mathcal{E}}_{T \rightarrow S}). \end{align} \begin{figure}[tbp] \centering \includegraphics[width=0.5\textwidth]{bimodal.pdf} \caption{ \small { MCTN \ architecture for two modalities: the source modality $\mathbf{X}^{S}$ and the target modality $\mathbf{X}^{T}$. The joint representation ${\mathcal{E}}_{S \leftrightarrows T}$ is obtained via a cyclic translation between $\mathbf{X}^{S}$ and $\mathbf{X}^{T}$. Next, the joint representation ${\mathcal{E}}_{S \leftrightarrows T}$ is used for sentiment prediction. The model is trained end-to-end with a coupled translation-prediction objective. At test time, only the source modality $\mathbf{X}^{S}$ is required.} } \label{fig:s2s} \end{figure} The joint representation is learned by using a Seq2Seq model with attention~\cite{bahdanau2014neural} that translates source modality $\mathbf{X}^{S}$ to a target modality $\mathbf{X}^{T}$. While Seq2Seq models have been predominantly used for machine translation, we extend its usage to the realm of multimodal machine learning. The hidden state output of each time step is based on the previous hidden state along with the input sequence and is constructed using a recurrent network. \begin{equation} \mathbf{h}_\ell = \mathtt{RNN}(\mathbf{h}_{\ell-1}, \mathbf{X}^{S}_\ell) \quad \forall \ell \in [1,L]. \end{equation} The encoder's output is the concatenation of all hidden states of the encoding RNN, \begin{equation} \mathcal{E}_{S \rightarrow T} = [\mathbf{h}_{1}, \mathbf{h}_{2}, ..., \mathbf{h}_{L}], \end{equation} where $L$ is the length of the source modality $\mathbf{X}^{S}$. The decoder maps the representation $\mathcal{E}_{S \rightarrow T}$ into the target modality $\mathbf{X}^{T}$. This is performed by decoding each token $\mathbf{X}^{T}_t$ at a time based on $\mathcal{E}_{S \rightarrow T}$ and all previous decoded tokens, which is formulated as \begin{equation} p(\mathbf{X}^{T}) = \prod_{\ell=1}^{L} p(\mathbf{X}^{T}_\ell|\mathcal{E}_{S \rightarrow T}, \mathbf{X}^{T}_1, ..., \mathbf{X}^{T}_{\ell-1}). \end{equation} MCTN \ accepts variable-length inputs of $\mathbf{X}^{S}$ and $\mathbf{X}^{T}$, and is trained to maximize the translational condition probability $p(\mathbf{X}^{T}|\mathbf{X}^{S})$. The best translation sequence is then given by \begin{equation} \hat{\mathbf{X}^{T}} = \operatorname*{arg\,max}_{\mathbf{X}^{T}} p(\mathbf{X}^{T}|\mathbf{X}^{S}). \end{equation} We use the traditional beam search approach~\cite{seq2seq_nn} for decoding. To obtain the joint representation for multimodal prediction, we only use the forward translated representation during inference to remove the dependency on the target modality at test time. If cyclic translation is used, we denote the translated representation with the symbol $\leftrightarrows$: \begin{equation} {\mathcal{E}}_{S \leftrightarrows T} = {\mathcal{E}}_{S \rightarrow T}. \end{equation} ${\mathcal{E}}_{S \leftrightarrows T}$ is then used for sentiment prediction: \begin{align} \hat{\mathbf{y}}_{} &= g_{w}({\mathcal{E}}_{S \leftrightarrows T}). \end{align} \subsection{Coupled Translation-Prediction Objective} Training is performed with paired multimodal data and labels in the training set $(\mathbf{X}^{S}_{tr},\mathbf{X}^{T}_{tr},\mathbf{y}_{tr})$ The first two losses are the forward translation loss $\mathcal{L}_t$ defined as \begin{equation} \mathcal{L}_t = \mathbb{E} [\ell_{\mathbf{X}^{T}}(\hat{\mathbf{X}}^{T}_{},{\mathbf{X}}^{T}_{})], \label{lt} \end{equation} and the cycle consistency loss $\mathcal{L}_c$ defined as \begin{equation} \mathcal{L}_c = \mathbb{E} [\ell_{\mathbf{X}^{S}}(\hat{\mathbf{X}}^{S}_{},{\mathbf{X}}^{S}_{})] \label{lc} \end{equation} where $\ell_{\mathbf{X}^{T}}$ and $\ell_{\mathbf{X}^{S}}$ represent the respective loss functions. We use the Mean Squared Error (MSE) between the ground-truth and translated modalities. Finally, the prediction loss $\mathcal{L}_p$ is defined as \begin{align} \mathcal{L}_p &= \mathbb{E} [ \ell_{\mathbf{y}}(\hat{\mathbf{y}}_{},\mathbf{y}_{})] \label{lp} \end{align} with a loss function $\ell_{\mathbf{y}}$ defined over the labels. Our MCTN \ model is trained end-to-end with a coupled translation-prediction objective function defined as \begin{equation} \mathcal{L} = \lambda_t \mathcal{L}_t + \lambda_c \mathcal{L}_c + \mathcal{L}_p. \label{eq:objective} \end{equation} where $\lambda_t$, $\lambda_t$ are weighting hyperparameters. MCTN \ parameters are learned by minimizing this objective function \begin{align} \theta_e^*, \theta_d^*, w^* &= \argmin_{\theta_e,\theta_d,w} \ [\lambda_t \mathcal{L}_t + \lambda_c \mathcal{L}_c + \mathcal{L}_p]. \end{align} Parallel multimodal data is not required at test time. Inference is performed using only the source modality $\mathbf{X}^{S}$: \begin{align} {\mathcal{E}}_{S \leftrightarrows T} &= f_{\theta_e^*} (\mathbf{X}^{S}), \\ \hat{\mathbf{y}} &= g_{w^*}({\mathcal{E}}_{S \leftrightarrows T}). \end{align} This is possible because the encoder $f_{\theta_e^*}$ has been trained to translate the source modality $\mathbf{X}^{S}$ into a joint representation ${\mathcal{E}}_{S \leftrightarrows T}$ that captures information from both source and target modalities. \subsection{Hierarchical MCTN \ for Three Modalities} \begin{figure}[tbp] \centering \includegraphics[width=0.5\textwidth]{trimodal.pdf} \caption{ \small { Hierarchical MCTN \ for three modalities: the source modality $\mathbf{X}^{S}$ and the target modalities $\mathbf{X}^{T_1}$ and $\mathbf{X}^{T_2}$. The joint representation ${\mathcal{E}}_{S \leftrightarrows T_1}$ is obtained via a cyclic translation between $\mathbf{X}^{S}$ and $\mathbf{X}^{T_1}$, then further translated into $\mathbf{X}^{T_2}$. Next, the joint representation of all three modalities, ${\mathcal{E}}_{(S \leftrightarrows T_1) \rightarrow T_2}$, is used for sentiment prediction. The model is trained end-to-end with a coupled translation-prediction objective. At test time, only the source modality $\mathbf{X}^{S}$ is required for prediction.} } \label{fig:h_s2s} \end{figure} We extend the MCTN \ in a hierarchical manner to learn joint representations from more than two modalities. Figure~\ref{fig:h_s2s} shows the case for three modalities. The hierarchical MCTN \ starts with a source modality $\mathbf{X}^{S}$ and two target modalities $\mathbf{X}^{T_1}$ and $\mathbf{X}^{T_2}$. To learn joint representations, two levels of modality translations are performed. The first level learns a joint representation from $\mathbf{X}^{S}$ and $\mathbf{X}^{T_1}$ using multimodal cyclic translations as defined previously. At the second level, a joint representation is learned hierarchically by translating the first representation $\mathcal{E}_{S \rightarrow T_1}$ into $\mathbf{X}^{T_2}$. For more than three modalities, the modality translation process can be repeated hierarchically. Two Seq2Seq models are used in the hierarchical MCTN \ for three modalities, denoted as encoder-decoder pairs $(f_{\theta_e}^1,f_{\theta_d}^1)$ and $(f_{\theta_e}^2,f_{\theta_d}^2)$. A multimodal cyclic translation is first performed between source modality $\mathbf{X}^{S}$ and the first target modality $\mathbf{X}^{T_1}$. The forward translation is defined as \begin{align} {\mathcal{E}}_{S \rightarrow T_1} = f_{\theta_e}^1 (\mathbf{X}^{S}_{tr}), \ \hat{\mathbf{X}}^{T_1}_{tr} = f_{\theta_d}^1 ({\mathcal{E}}_{S \rightarrow T_1}), \end{align} and followed by the decoded modality $\mathbf{X}^{T_1}$ being translated back into modality $\mathbf{X}^{S}$: \begin{align} {\mathcal{E}}_{T_1 \rightarrow S} = f_{\theta_e}^1(\hat{\mathbf{X}}^{T_1}_{tr}), \ \hat{\mathbf{X}}^{S}_{tr} = f_{\theta_d}^1 ({\mathcal{E}}_{T_1 \rightarrow S}). \end{align} A second hierarchical Seq2Seq model is applied on the outputs of the first encoder $f_{\theta_e}^1$: \begin{align} {\mathcal{E}}_{S \leftrightarrows T_1} &= {\mathcal{E}}_{S \rightarrow T_1}, \\ {\mathcal{E}}_{(S \leftrightarrows T_1) \rightarrow T_2} = f_{\theta_e}^2 ({\mathcal{E}}_{S \leftrightarrows T_1})&, \ \hat{\mathbf{X}}^{T_2}_{tr} = f_{\theta_d}^2 ({\mathcal{E}}_{(S \leftrightarrows T_1) \rightarrow T_2}). \end{align} The joint representation between modalities $\mathbf{X}^{S}$, $\mathbf{X}^{T_1}$ and $\mathbf{X}^{T_2}$ is now ${\mathcal{E}}_{(S \leftrightarrows T_1) \rightarrow T_2}$. It is used for sentiment prediction via a recurrent neural network via regression method. Training the hierarchical MCTN \ involves computing a cycle consistent loss for modality $T_1$, given by the respective forward translation loss $\mathcal{L}_{t_1}$ and the cycle consistency loss $\mathcal{L}_{c_1}$. We do not use a cyclic translation loss when translating from ${\mathcal{E}}_{S \leftrightarrows T_1}$ to $\mathbf{X}^{T_2}$ since the ground truth ${\mathcal{E}}_{S \leftrightarrows T_1}$ is unknown, and so only the translation loss $\mathcal{L}_{t_2}$ is computed. The final objective for hierarchical MCTN \ is given by \begin{align} \mathcal{L} = \lambda_{t_1} \mathcal{L}_{t_1} + \lambda_{c_1} \mathcal{L}_{c_1} + \lambda_{t_2} \mathcal{L}_{t_2} + \mathcal{L}_p \end{align} We emphasize that for MCTN \ with three modalities, \textit{only} a single source modality $\mathbf{X}^{S}$ is required at test time. Therefore, MCTN \ has a significant advantage over existing models since it is robust to noisy or missing target modalities. \section{Experimental Setup}\label{sec:Expset} In this section, we describe our experimental methodology to evaluate the joint representations learned by MCTN\footnote{Our source code is released at \url{https://github.com/hainow/MCTN}.} \newcolumntype{K}[1]{>{\centering\arraybackslash}p{#1}} \begin{table}[t!] \fontsize{7.5}{10}\selectfont \setlength\tabcolsep{1.0pt} \begin{tabular}{l : c: *{4}{K{1.24cm}}} \Xhline{3\arrayrulewidth} Dataset & & \multicolumn{4}{c}{\textbf{CMU-MOSI}} \\ Model & Test Inputs & Acc($\uparrow$) & F1($\uparrow$) & MAE($\downarrow$) & Corr($\uparrow$)\\ \Xhline{0.5\arrayrulewidth} RF & $\{\ell,v,a\}$ & 56.4 & 56.3 & - & - \\ SVM & $\{\ell,v,a\}$ &71.6 & 72.3 & 1.100 & 0.559 \\ THMM & $\{\ell,v,a\}$ &50.7 & 45.4 & - & -\\ EF-HCRF & $\{\ell,v,a\}$ &65.3 & 65.4 & - & -\\ MV-HCRF& $\{\ell,v,a\}$ &65.6 & 65.7 & - & -\\ DF & $\{\ell,v,a\}$ &74.2 & 74.2 & 1.143 & 0.518 \\ EF-LSTM & $\{\ell,v,a\}$ & 74.3 & 74.3 & 1.023 & 0.622 \\ MV-LSTM & $\{\ell,v,a\}$ &73.9 & 74.0 & 1.019 & 0.601 \\ BC-LSTM & $\{\ell,v,a\}$ &75.2 & 75.3 & 1.079 & 0.614 \\ TFN & $\{\ell,v,a\}$ &74.6 & 74.5 & 1.040 & 0.587 \\ GME-LSTM(A) & $\{\ell,v,a\}$ &76.5 & 73.4 & 0.955 & - \\ MARN & $\{\ell,v,a\}$ &77.1 & 77.0 & 0.968 & 0.625 \\ MFN & $\{\ell,v,a\}$ &77.4 & 77.3 & 0.965 & 0.632 \\ LMF & $\{\ell,v,a\}$ &76.4 & 75.7 & 0.912 & 0.668 \\ RMFN & $\{\ell,v,a\}$ &78.4 & 78.0 & 0.922 & \textbf{0.681} \\ \Xhline{0.5\arrayrulewidth} {MCTN} & $\{\ell\}$ &\textbf{79.3} & \textbf{79.1} & \textbf{0.909} & 0.676 \\ \Xhline{3\arrayrulewidth} \end{tabular} \caption{ \small { Sentiment prediction results on CMU-MOSI. Best results are highlighted in bold. MCTN \ outperforms the current state-of-the-art across most evaluation metrics and uses only the language modality during testing.} } \label{mosi} \end{table} \begin{table}[tb] \fontsize{7.5}{10}\selectfont \setlength\tabcolsep{1.0pt} \begin{tabular}{l : c : *{2}{K{1.3cm}} : *{2}{K{1.3cm}}} \Xhline{3\arrayrulewidth} Dataset & & \multicolumn{2}{c:}{\textbf{ICT-MMMO}} & \multicolumn{2}{c}{\textbf{YouTube}} \\ Model & Test Inputs & Acc($\uparrow$) & F1($\uparrow$) & Acc($\uparrow$) & F1($\uparrow$) \\ \Xhline{0.5\arrayrulewidth} RF & $\{\ell,v,a\}$ & 70.0 & 69.8 & 33.3 & 32.3 \\ SVM & $\{\ell,v,a\}$ &68.8 & 68.7 & 42.4 & 37.9 \\ THMM & $\{\ell,v,a\}$ &53.8 & 53.0 & 42.4 & 27.9 \\ EF-HCRF & $\{\ell,v,a\}$ &73.8 & 73.1 & 45.8 & 45.0 \\ MV-HCRF & $\{\ell,v,a\}$ &68.8 & 67.1 & 44.1 & 44. \\ DF & $\{\ell,v,a\}$ & 65.0 & 58.7 & 45.8 & 32.0 \\ EF-LSTM & $\{\ell,v,a\}$ &72.5 & 70.9 & 44.1 & 43.6 \\ MV-LSTM & $\{\ell,v,a\}$ &72.5 & 72.3 & 45.8 & 43.3 \\ BC-LSTM & $\{\ell,v,a\}$ &70.0 & 70.1 & 45.0 & 45.1 \\ TFN & $\{\ell,v,a\}$ &72.5 & 72.6 & 45.0 & 41.0 \\ MARN & $\{\ell,v,a\}$ &71.3 & 70.2 & 48.3 & 44.9 \\ MFN & $\{\ell,v,a\}$ &73.8 & 73.1 & \textbf{51.7} & 51.6 \\ \Xhline{0.5\arrayrulewidth} {MCTN} & $\{\ell\}$ & \textbf{81.3} & \textbf{80.8} & \textbf{51.7} & \textbf{52.4} \\ \Xhline{0.5\arrayrulewidth} \Xhline{3\arrayrulewidth} \end{tabular} \caption{ \small {Sentiment prediction results on ICT-MMMO and YouTube. Best results are highlighted in bold. MCTN \ outperforms the current state-of-the-art across most evaluation metrics and uses only the language modality during testing.} } \label{full} \end{table} \subsection{Dataset and Input Modalities} \label{sec:MOSI} We use the CMU Multimodal Opinion-level Sentiment Intensity dataset (CMU-MOSI) which contains 2199 video segments each with a sentiment label in the range $[-3,+3]$. To be consistent with prior work, we use 52 segments for training, 10 for validation and 31 for testing. The same speaker does not appear in both training and testing sets to ensure that our model learns speaker-independent representations. We also run experiments on ICT-MMMO~\cite{wollmer2013youtube} and YouTube~\cite{morency2011towards} which consist of online review videos annotated for sentiment. \subsection{Multimodal Features and Alignment} Following previous work~\cite{multistage}, GloVe word embeddings~\cite{pennington2014glove}, Facet~\cite{emotient}, and COVAREP~\cite{degottex2014covarep} features are extracted for the language, visual and acoustic modalities respectively\footnote{Details on feature extraction are in supplementary material.}. Forced alignment is performed using P2FA~\cite{P2FA} to obtain spoken word utterance times. The visual and acoustic features are aligned by computing their average over the utterance interval of each word. \subsection{Evaluation Metrics} For parameter optimization on CMU-MOSI, the prediction loss function is set as the Mean Absolute Error (MAE): $\ell_p (\hat{\mathbf{y}}_{train},\mathbf{y}_{train}) = |\hat{\mathbf{y}}_{train}-\mathbf{y}_{train}|$. We report MAE and Pearson's correlation $r$. We also perform sentiment classification on CMU-MOSI and report binary accuracy (Acc) and F1 score (F1). On ICT-MMMO and YouTube, we set the prediction loss function as categorical cross-entropy and report sentiment classification and F1 score. For all metrics, higher values indicate stronger performance, except MAE where lower values indicate stronger performance. \subsection{Baseline Models} \label{sec:base} \begin{figure*}[ht] \centering \includegraphics[width=\textwidth]{variants.pdf} \caption{ \small {Variations of our models: (a) MCTN \ Bimodal with cyclic translation, (b) Simple Bimodal without cyclic translation, (c) No-Cycle Bimodal with different inputs of the same modality pair, and without cyclic translation, (d) Double Bimodal for two modalities without cyclic translation, with two different inputs (of the same pair), (e) MCTN \ Trimodal with input from (a), (f) Simple Trimodal for three modalities, with input as a joint representation taken from previous MCTN \ for two modalities from (b) or (c), (g) Double Trimodal with input from (d), (h) Concat Trimodal which is similar to (b) but with input as the concatenation of 2 modalities, (i) Paired Trimodal using one encoder and 2 separate decoders for modality translations. \textit{Legend}: black modality is ground truth, red (``hat'') modality represents translated output, blue (``hat'') modality is target output from previous translation outputs, and yellow box denotes concatenation.} } \label{fig:variants} \end{figure*} \addtocounter{figure}{-1} We compare to the following multimodal models: \textit{RMFN}~\cite{multistage} uses a multistage approach to learn hierarchical representations (current state-of-the-art on CMU-MOSI). \textit{LMF}~\cite{lowrank} approximates the expensive tensor products in \textit{TFN}~\cite{tensoremnlp17} with efficient low-rank factors. \textit{MFN}~\cite{zadeh2018memory} synchronizes sequences using a multimodal gated memory. \textit{EF-LSTM} concatenates multimodal inputs and uses a single LSTM~\cite{Hochreiter:1997:LSM:1246443.1246450}. For a description of other baselines, please refer to the supplementary material. \section{Results and Discussion} \label{sec:Results} This section presents and discusses our experimental results. \subsection{Comparison with Existing Work} \textit{Q1: How does MCTN \ compare with existing state-of-the-art approaching for multimodal sentiment analysis?} We compare MCTN \ with previous models \footnote{For full results please refer to the supplementary material.}. From Table~\ref{mosi}, MCTN \ using language as the source modality achieves new start-of-the-art results on CMU-MOSI for multimodal sentiment analysis. State-of-the-art results are also achieved on ICT-MMMO and YouTube (Table~\ref{full}). It is important to note that MCTN \ only uses language during testing, while other baselines use all three modalities. \subsection{Adding More Modalities} \textit{Q2: What is the impact of increasing the number of modalities during training for MCTN \ with cyclic translations?} \begin{figure}[!tb] \centering \setlength\tabcolsep{1.0pt} \begin{tabular}{ccc} \begin{subfigure}{0.33\linewidth}\centering\includegraphics[width=\columnwidth]{bimodal.png}\caption{ \centering \footnotesize{ \centering MCTN \ Bimodal \textit{without} cyclic translations} }\label{fig:taba}\end{subfigure} & \begin{subfigure}{0.33\linewidth}\centering\includegraphics[width=\columnwidth]{bimodal_cycle.png}\caption{ \centering \footnotesize{MCTN \ Bimodal \textit{with} cyclic translations} }\label{fig:tabb}\end{subfigure} & \begin{subfigure}{0.33\linewidth}\centering\includegraphics[width=\columnwidth]{trimodal_cycle.png}\caption{ \centering \footnotesize{MCTN \ Trimodal \textit{with} cyclic translations} }\label{fig:tabc}\end{subfigure}\\ \end{tabular} \captionof{figure}[]{ \small {t-SNE visualization of the joint representations learned by MCTN. \textit{Legend}: red: videos with negative sentiment, blue: videos with positive sentiment. Adding modalities and using cyclic translations improve discriminative performance and leads to increasingly separable representations.} } \label{fig:tsne} \end{figure} \begin{table}[t!] \fontsize{7.5}{10}\selectfont \setlength\tabcolsep{1.0pt} \begin{tabular}{l : *{16}{K{0.95cm}}} \Xhline{3\arrayrulewidth} Dataset & \multicolumn{5}{c}{\textbf{CMU-MOSI}} \\ Model & \multicolumn{1}{c}{Translation} & Acc & F1 & MAE & Corr\\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{MCTN \ Bimodal (\ref{fig:variants}a)} & \multicolumn{1}{c}{$V \leftrightarrows A$} & \multicolumn{1}{c}{{53.1}} & \multicolumn{1}{c}{{53.2}} & \multicolumn{1}{c}{{1.420}} & \multicolumn{1}{c}{{0.034}} \\ & \multicolumn{1}{c}{$T \leftrightarrows A$} & \multicolumn{1}{c}{{76.4}} & \multicolumn{1}{c}{{76.4}} & \multicolumn{1}{c}{{0.977}} & \multicolumn{1}{c}{{0.636}} \\ & \multicolumn{1}{c}{$T \leftrightarrows V$} & \multicolumn{1}{c}{{76.8}} & \multicolumn{1}{c}{{76.8}} & \multicolumn{1}{c}{{1.034}} & \multicolumn{1}{c}{{0.592}} \\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{MCTN \ Trimodal (\ref{fig:variants}e)} & \multicolumn{1}{c}{$(V \leftrightarrows A) \rightarrow T$} & \multicolumn{1}{c}{{56.4}} & \multicolumn{1}{c}{{56.3}} & \multicolumn{1}{c}{{1.455}} & \multicolumn{1}{c}{{0.151}} \\ & \multicolumn{1}{c}{$(T \leftrightarrows A) \rightarrow V$} & \multicolumn{1}{c}{{78.7}} & \multicolumn{1}{c}{{78.8}} & \multicolumn{1}{c}{{0.960}} & \multicolumn{1}{c}{{0.650}} \\ & \multicolumn{1}{c}{$(T \leftrightarrows V) \rightarrow A$} & \multicolumn{1}{c}{\textbf{79.3}} & \multicolumn{1}{c}{\textbf{79.1}} & \multicolumn{1}{c}{\textbf{0.909}} & \multicolumn{1}{c}{\textbf{0.676}} \\ \Xhline{3\arrayrulewidth} \end{tabular} \caption{ \small { MCTN \ performance improves as more modalities are introduced for cyclic translations during training.} } \label{tbl:trimodal} \end{table} We run experiments with MCTN \ using combinations of two or three modalities with cyclic translations. From Table~\ref{tbl:trimodal}, we observe that adding more modalities improves performance, indicating that the joint representations learned are leveraging the information from more input modalities. This also implies that cyclic translations are a viable method to learn joint representations from multiple modalities since little information is lost from adding more modality translations. Another observation is that using language as the source modality always leads to the best performance, which is intuitive since the language modality contains the most discriminative information for sentiment~\cite{tensoremnlp17}. In addition, we visually inspect the joint representations learned from MCTN \ as we add more modalities during training (see Table~\ref{tbl:ablation}). The joint representations for each segment in CMU-MOSI are extracted from the best performing model for each number of modalities and then projected into two dimensions via the t-SNE algorithm~\cite{vanDerMaaten2008}. Each point is colored red or blue depending on whether the video segment is annotated for positive or negative sentiment. From Figure~\ref{fig:tsne}, we observe that the joint representations become increasingly separable as the more modalities are added when the MCTN \ is trained. This is consistent with increasing discriminative performance with more modalities (as seen in Table~\ref{tbl:trimodal}). \subsection{Ablation Studies} \begin{table}[t!] \fontsize{7.5}{10}\selectfont \setlength\tabcolsep{1.0pt} \begin{tabular}{l : *{16}{K{0.85cm}}} \Xhline{3\arrayrulewidth} Dataset & \multicolumn{5}{c}{\textbf{CMU-MOSI}} \\ Model & \multicolumn{1}{c}{Translation} & Acc($\uparrow$) & F1($\uparrow$) & MAE($\downarrow$) & Corr($\uparrow$)\\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{MCTN \ Bimodal (\ref{fig:variants}a)} & \multicolumn{1}{c}{$V \leftrightarrows A$} & \multicolumn{1}{c}{{53.1}} & \multicolumn{1}{c}{{53.2}} & \multicolumn{1}{c}{{1.420}} & \multicolumn{1}{c}{{0.034}} \\ & \multicolumn{1}{c}{$T \leftrightarrows A$} & \multicolumn{1}{c}{{76.4}} & \multicolumn{1}{c}{{76.4}} & \multicolumn{1}{c}{{0.977}} & \multicolumn{1}{c}{\textbf{0.636}} \\ & \multicolumn{1}{c}{$T \leftrightarrows V$} & \multicolumn{1}{c}{\textbf{76.8}} & \multicolumn{1}{c}{\textbf{76.8}} & \multicolumn{1}{c}{{1.034}} & \multicolumn{1}{c}{{0.592}} \\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{Simple Bimodal (\ref{fig:variants}b)} & \multicolumn{1}{c}{$V \rightarrow A$} & \multicolumn{1}{c}{{55.4}} & \multicolumn{1}{c}{{55.5}} & \multicolumn{1}{c}{{1.422}} & \multicolumn{1}{c}{{0.119}} \\ & \multicolumn{1}{c}{$T \rightarrow A$} & \multicolumn{1}{c}{{74.2}} & \multicolumn{1}{c}{{74.2}} & \multicolumn{1}{c}{{0.988}} & \multicolumn{1}{c}{{0.616}} \\ & \multicolumn{1}{c}{$T \rightarrow V$} & \multicolumn{1}{c}{{75.7}} & \multicolumn{1}{c}{{75.6}} & \multicolumn{1}{c}{{1.002}} & \multicolumn{1}{c}{{0.617}} \\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{No-Cycle Bimodal (\ref{fig:variants}c)} & \multicolumn{1}{c}{$V \rightarrow A, \, A \rightarrow V$} & \multicolumn{1}{c}{{55.4}} & \multicolumn{1}{c}{{55.5}} & \multicolumn{1}{c}{{1.422}} & \multicolumn{1}{c}{{0.119}} \\ & \multicolumn{1}{c}{$T \rightarrow A, \, A \rightarrow T$} & \multicolumn{1}{c}{{75.5}} & \multicolumn{1}{c}{{75.6}} & \multicolumn{1}{c}{\textbf{0.971}} & \multicolumn{1}{c}{{0.629}} \\ & \multicolumn{1}{c}{$T \rightarrow V, \, V \rightarrow T$} & \multicolumn{1}{c}{{75.2}} & \multicolumn{1}{c}{{75.3}} & \multicolumn{1}{c}{{0.972}} & \multicolumn{1}{c}{{0.627}} \\ \Xhline{0.5\arrayrulewidth} \multirow{4}{*}{Double Bimodal (\ref{fig:variants}d)} & \multicolumn{1}{c}{$[V \rightarrow A, A \rightarrow V]$} & \multicolumn{1}{c}{{57.0}} & \multicolumn{1}{c}{{57.1}} & \multicolumn{1}{c}{{1.502}} & \multicolumn{1}{c}{{0.168}} \\ & \multicolumn{1}{c}{$[T \rightarrow A, A \rightarrow T]$} & \multicolumn{1}{c}{{72.3}} & \multicolumn{1}{c}{{72.3}} & \multicolumn{1}{c}{{1.035}} & \multicolumn{1}{c}{{0.578}} \\ & \multicolumn{1}{c}{$[T \rightarrow V, V \rightarrow T]$} & \multicolumn{1}{c}{{73.3}} & \multicolumn{1}{c}{{73.4}} & \multicolumn{1}{c}{{1.020}} & \multicolumn{1}{c}{{0.570}} \\ \Xhline{3\arrayrulewidth} \end{tabular} \caption{ \small { Bimodal variations results on CMU-MOSI dataset. MCTN \ Bimodal with cyclic translations performs best.} } \label{tbl:baselines} \end{table} \begin{table}[t!] \fontsize{7.5}{10}\selectfont \setlength\tabcolsep{1.0pt} \begin{tabular}{l : *{16}{K{0.75cm}}} \Xhline{3\arrayrulewidth} Dataset & \multicolumn{5}{c}{\textbf{CMU-MOSI}} \\ Model & \multicolumn{1}{c}{Translation} & Acc($\uparrow$) & F1($\uparrow$) & MAE($\downarrow$) & Corr($\uparrow$)\\ \Xhline{0.5\arrayrulewidth} \multirow{3}{*}{MCTN \ Trimodal (\ref{fig:variants}e)} & \multicolumn{1}{c}{$(V \leftrightarrows A) \rightarrow T$} & \multicolumn{1}{c}{{56.4}} & \multicolumn{1}{c}{{56.3}} & \multicolumn{1}{c}{{1.455}} & \multicolumn{1}{c}{{0.151}} \\ & \multicolumn{1}{c}{$(T \leftrightarrows A) \rightarrow V$} & \multicolumn{1}{c}{{78.7}} & \multicolumn{1}{c}{{78.8}} & \multicolumn{1}{c}{{0.960}} & \multicolumn{1}{c}{{0.650}} \\ & \multicolumn{1}{c}{$(T \leftrightarrows V) \rightarrow A$} & \multicolumn{1}{c}{\textbf{79.3}} & \multicolumn{1}{c}{\textbf{79.1}} & \multicolumn{1}{c}{\textbf{0.909}} & \multicolumn{1}{c}{\textbf{0.676}} \\ \Xhline{0.5\arrayrulewidth} \multirow{6}{*}{Simple Trimodal (\ref{fig:variants}f)} & \multicolumn{1}{c}{$(V \rightarrow T) \rightarrow A$} & \multicolumn{1}{c}{{54.1}} & \multicolumn{1}{c}{{52.9}} & \multicolumn{1}{c}{{1.408}} & \multicolumn{1}{c}{{0.040}} \\ & \multicolumn{1}{c}{$(V \rightarrow A) \rightarrow T$} & \multicolumn{1}{c}{{52.0}} & \multicolumn{1}{c}{{51.9}} & \multicolumn{1}{c}{{1.439}} & \multicolumn{1}{c}{{0.015}} \\ & \multicolumn{1}{c}{$(A \rightarrow V) \rightarrow T$} & \multicolumn{1}{c}{{56.6}} & \multicolumn{1}{c}{{56.7}} & \multicolumn{1}{c}{{1.593}} & \multicolumn{1}{c}{{0.067}} \\ & \multicolumn{1}{c}{$(A \rightarrow T) \rightarrow V$} & \multicolumn{1}{c}{{54.1}} & \multicolumn{1}{c}{{54.2}} & \multicolumn{1}{c}{{1.577}} & \multicolumn{1}{c}{{0.028}} \\ & \multicolumn{1}{c}{$(T \rightarrow A) \rightarrow V$} & \multicolumn{1}{c}{{74.3}} & \multicolumn{1}{c}{{74.4}} & \multicolumn{1}{c}{{1.001}} & \multicolumn{1}{c}{{0.609}} \\ & \multicolumn{1}{c}{$(T \rightarrow V) \rightarrow A$} & \multicolumn{1}{c}{{74.3}} & \multicolumn{1}{c}{{74.4}} & \multicolumn{1}{c}{{0.997}} & \multicolumn{1}{c}{{0.596}} \\ \Xhline{0.5\arrayrulewidth} \multirow{1}{*}{Double Trimodal (\ref{fig:variants}g)} & \multicolumn{1}{c}{$[T \rightarrow V, \, V \rightarrow T] \rightarrow A$} & \multicolumn{1}{c}{{73.3}} & \multicolumn{1}{c}{{73.1}} & \multicolumn{1}{c}{{1.058}} & \multicolumn{1}{c}{{0.578}} \\ \Xhline{0.5\arrayrulewidth} \multirow{7}{*}{Concat Trimodal (\ref{fig:variants}h)} & \multicolumn{1}{c}{$[V, A] \rightarrow T$} & \multicolumn{1}{c}{{55.0}} & \multicolumn{1}{c}{{54.6}} & \multicolumn{1}{c}{{1.535}} & \multicolumn{1}{c}{{0.176}} \\ & \multicolumn{1}{c}{$[A, T] \rightarrow V$} & \multicolumn{1}{c}{{73.3}} & \multicolumn{1}{c}{{73.4}} & \multicolumn{1}{c}{{1.060}} & \multicolumn{1}{c}{{0.561}} \\ & \multicolumn{1}{c}{$[T, V] \rightarrow A$} & \multicolumn{1}{c}{{72.3}} & \multicolumn{1}{c}{{72.3}} & \multicolumn{1}{c}{{1.068}} & \multicolumn{1}{c}{{0.576}} \\ & \multicolumn{1}{c}{$A \rightarrow [T, V]$} & \multicolumn{1}{c}{{55.5}} & \multicolumn{1}{c}{{55.6}} & \multicolumn{1}{c}{{1.617}} & \multicolumn{1}{c}{{0.056}} \\ & \multicolumn{1}{c}{$T \rightarrow [A, V]$} & \multicolumn{1}{c}{{75.7}} & \multicolumn{1}{c}{{75.7}} & \multicolumn{1}{c}{{0.958}} & \multicolumn{1}{c}{{0.634}} \\ & \multicolumn{1}{c}{$[T, A] \rightarrow [T, V]$} & \multicolumn{1}{c}{{73.2}} & \multicolumn{1}{c}{{73.2}} & \multicolumn{1}{c}{{1.008}} & \multicolumn{1}{c}{{0.591}} \\ & \multicolumn{1}{c}{$[T, V] \rightarrow [T, A]$} & \multicolumn{1}{c}{{74.1}} & \multicolumn{1}{c}{{74.1}} & \multicolumn{1}{c}{{0.999}} & \multicolumn{1}{c}{{0.607}} \\ \Xhline{0.5\arrayrulewidth} Paired Trimodal (\ref{fig:variants}i) & \multicolumn{1}{c}{$[T \rightarrow A, T \rightarrow V]$} & 73.8 & 73.8 & 1.022 & 0.611 \\ \Xhline{3\arrayrulewidth} \end{tabular} \caption{ \small { Trimodal variations results on CMU-MOSI dataset. MCTN \ (hierarchical) with cyclic translations performs best.} } \label{tbl:ablation} \end{table} We use several models to test our design decisions. Specifically, we evaluate the impact of cyclic translations, modality ordering, and hierarchical structure. For bimodal MCTN, we design the following ablation models shown in the left half of Figure~\ref{fig:variants}: (a) MCTN \ bimodal between $\mathbf{X}^S$ and $\mathbf{X}^T$, (b) simple bimodal by translating from $\mathbf{X}^S$ to $\mathbf{X}^T$ without cyclic loss, (c) no-cycle bimodal which does not use cyclic translations but rather performs two independent translations between $\mathbf{X}^S$ and $\mathbf{X}^T$, (d) double bimodal: two seq2seq models with different inputs (of the same modality pair) and then using the concatenation of the joint representations ${\mathcal{E}}_{S \rightarrow T}$ and ${\mathcal{E}}_{T \rightarrow S}$ as the final embeddings. For trimodal MCTN, we design the following ablation models shown in the right half of Figure~\ref{fig:variants}: (e) MCTN \ trimodal which uses the proposed hierarchical translations between $\mathbf{X}^S$, $\mathbf{X}^{T_1}$ and $\mathbf{X}^{T_2}$, (f) simple trimodal based on translation from $\mathbf{X}^S$ to $\mathbf{X}^{T_1}$ without cyclic translations, (g) double trimodal extended from (d) which does not use cyclic translations but rather performs two independent translations between $\mathbf{X}^S$ and $\mathbf{X}^{T_1}$, (h) concat trimodal which does not perform a first level of cyclic translation but directly translates the concatenated modality pair $[\mathbf{X}^S, \mathbf{X}^{T_1}]$ into $\mathbf{X}^{T_2}$, and finally, (i) paired trimodal which uses two separate decoders on top of the intermediate representation. \textit{Q3: What is the impact of cyclic translations in MCTN?} The bimodal results are in Table~\ref{tbl:baselines}. The models that employ cyclic translations (Figure~\ref{fig:variants}(a)) outperform all other models. The trimodal results are in Table~\ref{tbl:ablation} and we make a similar observation: Figure~\ref{fig:variants}(e) with cyclic translations outperforms the baselines (f), (g) and (h). The gap for the trimodal case is especially large. This implies that using cyclic translations is crucial for learning discriminative joint representations. Our intuition is that using cyclic translations: (1) encourages the model to enforce symmetry between the representations from source and target modalities thus adding a source of regularization, and (2) ensures that the representation retains maximal information from all modalities. \textit{Q4: What is the effect of using two Seq2Seq models instead of one shared Seq2Seq model for cyclic translations?} We compare Figure~\ref{fig:variants}(c), which uses one Seq2Seq model for cyclic translations with Figure~\ref{fig:variants}(d), which uses two separate Seq2Seq models: one for forward translation and one for backward translation. We observe from Table~\ref{tbl:baselines} that (c) $>$ (d), so using one model with shared parameters is better. This is also true for hierarchical MCTN: (f) $>$ (g) in Table~\ref{tbl:ablation}. We hypothesize that this is because training two deep Seq2Seq models requires more data and is prone to overfitting. Also, it does not learn only a single joint representation but instead two separate representations. \textit{Q5: What is the impact of varying source and target modalities for cyclic translations?} From Tables \ref{tbl:trimodal}, \ref{tbl:baselines} and \ref{tbl:ablation}, we observe that language contributes most towards the joint representations. For bimodal cases, combining language with visual is generally better than combining the language and acoustic modalities. For hierarchical MCTN, presenting language as the source modality leads to the best performance, and a first level of cyclic translations between language and visual is better than between language and audio. On the other hand, only translating between visual and acoustic modalities dramatically decreases performance. Further adding language as a target modality for hierarchical MCTN \ will not help much as well. Overall, for the MCTN, language appears to be the most discriminative modality making it crucial to be used as the source modality during translations. \textit{Q6: What is the impact of using two levels of translations instead of one level when learning from three modalities?} Our hierarchical MCTN \ is shown in Figure~\ref{fig:variants}(e). In Figure~\ref{fig:variants}(h), we concatenate two modalities as input and use only one phase of translation. From Table \ref{tbl:ablation}, we observe that (e) $>$ (h): both levels of modality translations are important in the hierarchical MCTN. We believe that representation learning is easier when the task is broken down recursively: using two translations each between a single pair of modalities, rather than a single translation between all modalities. \section{Conclusion} This paper investigated learning joint representations via cyclic translations from source to target modalities. During testing, we only need the source modality for prediction which ensures robustness to noisy or missing target modalities. We demonstrate that cyclic translations and seq2seq models are useful for learning joint representations in multimodal environments. In addition to achieving new state-of-the-art results on three datasets, our model learns increasingly discriminative joint representations with more input modalities while maintaining robustness to all target modalities. \section{Acknowledgements} PPL and LPM are partially supported by the NSF (Award \#1833355) and Oculus VR. HP and BP are supported by NSF grant IIS1563887 and the DARPA D3M program. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of National Science Foundation, DARPA, or Oculus VR, and no official endorsement should be inferred. The authors thank Hieu Pham, Amir Zadeh, and anonymous reviewers for useful discussions and feedback. \small
1,116,691,501,341
arxiv
\section{\label{}} \section{THE MILAGRO OBSERVATORY} Milagro is a TeV gamma-ray detector which uses the water Cherenkov technique to detect extensive air-showers produced by very high energy (VHE, $>$ 100 GeV) gamma rays as they interact with the Earth's atmosphere. Milagro is located in the Jemez Mountains of northern New Mexico, at an altitude of 2630 m, has a field of view of $\sim$2 sr and a duty cycle greater than 90\%. The effective area of Milagro is a function of zenith angle and ranges from $\sim10 m^2$ at 100 GeV to $\sim10^5 m^2$ at 10 TeV. A sparse array of 175 4000 liter water tanks, each containing an individual PMT, was recently added. These additional detectors, known as ``outriggers'' (Figure~\ref{outrigger}), extend the physical area of Milagro to 40,000 $m^2$, substantially increasing the sensitivity of the instrument and lowering the energy threshold. The angular resolution is approximately 0.75 degrees without the outriggers and 0.45 degrees with them. \begin{figure}[t] \centering \includegraphics[width=70mm]{figure1.eps} \caption{One of 175 outriggers (recently added to Milagro).} \label{outrigger} \end{figure} \section{THE ALL-SKY SEARCH} A full survey of the northern hemisphere (declination of 1.1--80$^{\circ}$) for point sources was carried out~\citep{atkins04}. Figure~\ref{all-sky-map} shows the map of the northern hemisphere in TeV gamma rays for the Milagro data set between 15 December 2000 and 25 November 2003. Table~\ref{table1} lists all point sources with an excess greater than 4$\sigma$. The last column represents the 95\% confidence upper limit on the flux, in units of the Crab. The Crab and Mkn 421 are both clearly visible in the map, and are listed in the sixth and seventh row, respectively, of Table~\ref{table1}. A square bin of $2.1^{\circ}$ in declination ($\delta$) and [2.1/cos($\delta$)]$^{\circ}$ in right ascension was used in this analysis. To detect other possible sources of VHE emission it is necessary to survey the sky at many different timescales and at many different bin sizes. In addition to searching for steady point sources of TeV gamma rays, Milagro has searched for short bursts of TeV gamma rays~\citep{2004ApJ...604L..25A}, for TeV emission from the direction of satellite detected GRBs~\citep{saz04}, and for extended sources of TeV emission~\citep{smith04}, as we summarize in this paper. \begin{figure*}[t] \centering \includegraphics[width=170mm]{figure2.eps} \caption{Northern hemisphere as seen in TeV gamma rays. At each point, the excess is summed over a 2.1 by [$2.1/\cos(\delta)$]$^{\circ}$ bin, and the significance of the excess in standard deviations is shown by the color scale (Figure from~\cite{atkins04}).} \label{all-sky-map} \end{figure*} \begin{table}[t] \begin{center} \caption{Locations of All Regions with an Excess Greater than 4$\sigma$} \begin{tabular}{|c|c|c|c|c|c|c|} \hline \textbf{R.A.} & \textbf{Decl.} & \textbf{ON} & \textbf{OFF} & \textbf{Excess} & \textbf{$\sigma$} & \textbf{UL} \\ \hline 0.3 & 34.3& 3.12308e+06& 3.11456e+06& 8623& 4.7& 0.84 \\ 37.8 & 6.7& 7.02166e+05& 6.98667e+05& 3498& 4.0& 1.8 \\ 43.6 & 4.8& 5.85952e+05& 5.82716e+05& 3236& 4.1& 2.0 \\ 49.1 & 22.5& 2.21431e+06& 2.20813e+06& 6175& 4.0& 0.87 \\ 79.9 & 26.8& 2.57841e+06& 2.57025e+06& 8161& 4.9& 0.97 \\ 83.6 & 22.0& 2.17188e+06& 2.16222e+06& 9665& 6.3& NA \\ 166.5 & 38.6& 3.23552e+06& 3.22467e+06& 10850& 5.8& NA \\ 306.6 & 38.9& 3.25329e+06& 3.24531e+06& 7983& 4.2& 0.78 \\ 313.0 & 32.2& 3.08380e+06& 3.07548e+06& 8320& 4.5& 0.85 \\ 339.1 & 72.5& 6.63534e+05& 6.59727e+05& 3807& 4.2& 3.02 \\ 356.4 & 29.5& 2.98656e+06& 2.97910e+06& 7455& 4.1& 0.84 \\ \hline \end{tabular} \label{table1} \end{center} \end{table} \section{THE GALACTIC PLANE} Diffuse emission from the Galactic plane is the dominant source in the gamma-ray sky~\citep{hunter97}. Most of the diffuse VHE emission from the Galactic plane is thought to be produced by the interaction of cosmic-ray hadrons with the interstellar matter. The flux measured by EGRET below 1 GeV fits models well, but that measured between 1 and 40 GeV is significantly larger than what is predicted by most models. One possible explanation for this enhanced emission is the inverse-Compton scattering of cosmic-ray electrons~\citep{1997JPhG...23.1765P}. If this turns out to be the dominant source of diffuse gamma-ray emission from the Galactic plane, then the flux at TeV energies could be an order of magnitude higher than previously thought. Figure~\ref{egret_figure} shows the EGRET observations of the Galactic plane. The top panel shows the all-sky map, in Galactic coordinates, produced by EGRET, clearly showing the Galactic plane. The bottom panel shows the EGRET diffuse GeV flux (in black) along with the Milagro exposure (in red), indicating that the Milagro observation region is optimized for the detection of the diffuse emission predicted by the EGRET measurements. Using 36 months of data, from 19 July 2000 to 18 July 2003, we looked at the inner (40-100 degrees) and outer (140-220 degrees) regions of the Galaxy~\citep{fleysher04}. While the outer Galaxy shows no significant excess, the inner Galaxy shows a 5 sigma excess~\citep{fleysher04}. Figure~\ref{profile} shows the profile in latitude for the longitude band (40 to 100 degrees) of the inner Galactic region (left panel) and the profile in longitude for the latitude band (-5 to 5 degrees) of the inner Galactic region (right panel), where the enhancement can be seen just north of the equator. Figure~\ref{galaxy} shows the significance map of the Galaxy. The region of the inner Galaxy shows an enhancement along and just north of the Galactic equator. This is the same region where EGRET detected the strongest signal in the 100 MeV energy range. The 5 sigma excess is seen by summing the entire inner Galaxy with a +/- 5 degree latitude band, as was suggested by the EGRET results. We note that the Milagro observation of the Galactic plane remains significant even when the region around the Cygnus Arm is excluded. This constitutes the first detection of the Galactic plane at TeV energies. \begin{figure}[t] \begin{center} \begin{tabular}{c} \includegraphics[width=80mm]{figure6a.eps} \\ \includegraphics[width=80mm]{figure6b.eps} \\ \end{tabular} \caption{{\bf Top} -- EGRET all-sky map showing Galactic plane emission above 100 MeV. {\bf Bottom} -- EGRET diffuse GeV flux (in black) along with the Milagro exposure (in red).} \label{egret_figure} \end{center} \end{figure} \begin{figure*}[t] \begin{center} \begin{tabular}{cc} \includegraphics[width=85mm]{figure7a.eps} & \includegraphics[width=85mm]{figure7b.eps} \\ \end{tabular} \caption{{\bf Left} -- Profile of the fractional excess as a function of Galactic latitude ( for a Galactic longitude between 40 and 100 degrees). {\bf Right} -- Profile of the fractional excess as a function of Galactic longitude (for a Galactic latitude between -5 and 5 degrees). The EGRET longitudinal source shape is superposed. (Figures from ~\cite{atkins05})} \label{profile} \end{center} \end{figure*} \begin{figure*}[t] \centering \includegraphics[width=150mm]{figure8.eps} \caption{Map in Galactic coordinates of Milagro significances in $5^{\circ}$ by $5^{\circ}$ bins. The light area has no Milagro exposure (Figure from ~\cite{atkins05}).} \label{galaxy} \end{figure*} \section{EXTENDED SOURCES} A search for extended emission was carried out for the Milagro data collected between 17 August 2000 and 5 May 2004~\citep{smith04}. A set of standard cuts has been developed by the Milagro collaboration and validated by observations of the Crab~\citep{atkins03} and Mkn 421~\citep{atkins04}. Events with 20 or more PMT hits used by the angle fitter (NFIT $\geq$ 20) are kept (this rejects about 20\% of events which have poor fits). In addition, we cut on a parameter known as ``compactness''~\citep{atkins04} (X2$\geq$2.5) to retain 50\% of the gammas while removing more than 90\% of the background protons. The excess at each position is determined by counting the number of events in a particular bin and subtracting the estimated background. The background is computed from data collected at the same local detector coordinates, but at a different time, ensuring the celestial angles of the background event sample do not overlap with the source position under consideration. The method of \cite{lima} is used to compute the significance of each excess. While the optimal square bin for detection of point sources with Milagro is 2.1 degrees on each side~\citep{atkins04}, to look for diffuse sources, the standard Milagro sky maps were searched using a range of bin sizes from 2.1 to 5.9 degrees in steps of 0.2. 20 separate searches were performed on the same maps, though the results are highly correlated. Monte Carlo simulations were used to compute the post-trials probability for each source candidate. \subsection{3EG J0520+2556} The most significant candidate found in our search of extended sources had a pre-trials significance of 5.9 sigma, located at RA=79.8 degrees and Dec=26.0 degrees and was identified using a 2.9 degree bin size. The probability of observing an excess this significant at any point in the sky at any bin size is 0.8\%. Figure~\ref{egret_both} (left panel) shows the map of significances around the source, which is located $\sim$5.5 degrees from the Crab. This candidate was first reported in 2002~\citep{sinnis02}. The cumulative significance using only data since it was first reported is 3.7$\sigma$. The right panel of Figure~\ref{egret_both} shows the accumulation of significance with time, indicating that the excesss increases at a constant rate and shows no periods of significant flaring. This candidate is coincident with the EGRET unidentified source 3EG J0520+2556 (See Figure~\ref{egret_source}). \begin{figure*}[t] \begin{center} \begin{tabular}{cc} \includegraphics[width=85mm]{figure3a.eps} & \includegraphics[width=85mm,height=82mm]{figure3b.eps} \\ \end{tabular} \caption{{\bf Left} -- Milagro significance map, showing the Crab in the center and the TeV source coincident with EGRET source 3EGJ0520+2556 to the left. {\bf Right} -- Accumulated significance on TeV 0520+2556 as a function of time (Figure from~\cite{smith04}).} \label{egret_both} \end{center} \end{figure*} \begin{figure}[t] \centering \includegraphics[width=70mm]{figure4.eps} \caption{EGRET Source Location and contours. We include the position reported by Milagro in 2002~\citep{sinnis02}, as well as the current location of maximum significance (Figure from~\cite{smith04}).} \label{egret_source} \end{figure} \subsection{The Cygnus Arm} The second extended source candidate is coincident with the region known as the Cygnus Arm, a spiral arm within our Galaxy that extends radially away from observers on Earth. This region is known to be a dense region of gas and dust and was observed by EGRET as the brightest source of GeV gamma rays in the northern sky, with a diffuse GeV emission comparable to the Galactic bulge. Like the emission from the Galactic plane region, VHE emission from the Cygnus Arm is thought to originate mainly from interactions of cosmic rays with the interstellar gas and dust. A 5.5 sigma excess was detected using a 5.9 degree bin, at RA=308 degrees and Dec=42 degrees. The probability of observing an excess this significant at any point in the sky at any bin size is 2.0\%. Figure~\ref{cygnus_region} shows the significance of the region of the sky containing the Cygnus arm, showing also the Galactic plane from l=20 degrees to l=100 degrees and b=+/- 5 degrees superimposed on the plot. The excess observed by Milagro is inconsistent with a point source and the number of events coming from the entire 5.9$^{\circ}$ bin is approximately twice that of the Crab. Like in the case of 3EG J0520+2556, the accumulation of the excess is steady, and no evidence for flaring is observed. While this is an extremely bright region, making it the hottest spot in the Galactic plane, it is not surprising that it has not been detected yet by any of the Atmospheric Cerenkov Telescopes, given the diffuse nature of the source, and the limited field of view of such telescopes. \begin{figure*}[t] \centering \includegraphics[width=130mm]{figure5.eps} \caption{Milagro significance map, showing a clear excess in the Cygnus region. Also shown is the Galactic plane region from a latitude of 20 degrees to 100 degrees and longitude of -5 to 5 degrees (Figure from~\cite{smith04}).} \label{cygnus_region} \end{figure*} \section{CONCLUSION} Milagro has detected three previously unknown diffuse sources of TeV gamma rays. Milagro has reported the first observation of diffuse TeV emission from the Galactic plane. In addition to this, two other diffuse sources have been detected. The first, coincident with 3EG J0520+2556, and the second coincident with the Cygnus Arm, at 5.9 and 5.5 sigma respectively. When all trials of the all-sky search are considered, the probabilities of these excesses being due to background fluctuations are 0.8\% and 2.0\% respectively. The source coincident with 3EG J0520+2556 was previously reported by Milagro as a ``hot spot''. \bigskip \begin{acknowledgments} Many people helped bring Milagro to fruition. In particular, we acknowledge the efforts of Scott DeLay, Neil Thompson and Michael Schneider. This work has been supported by the National Science Foundation (under grants PHY-0075326, -0096256, -0097315, -0206656, -0245143, -0245234, -0302000, and ATM-0002744) the US Department of Energy (Office of High-Energy Physics and Office of Nuclear Physics), Los Alamos National Laboratory, the University of California, and the Institute of Geophysics and Planetary Physics. \end{acknowledgments} \bigskip
1,116,691,501,342
arxiv
\section{Introduction} \label{sec:intro} Computer simulation experiments play an increasingly pivotal role in scientific inquiry. STEM training, cheap hardware, and robust numerical libraries have democratized simulation as a means of exploration of complex physical \citep[e.g.,][]{mehta2014modeling}, biological \citep[e.g.,][]{johnson:2008}, engineering/materials \cite[e.g.,][]{zhang2015microstructure}, and economic phenomena \citep[e.g.,][]{kita2016realistic}, to provide a few representative examples. Simulation in industry borrows elements from all of the above depending on the nature of application/business. We are interested in the use of simulation in the development of a so-called {\em honeycomb seal}, a device integral to oil and gas recovery, together with colleagues at Baker Hughes, a General Electric company (BHGE). The honeycomb experiment mirrors a common setup in engineering applications, leveraging a general purpose simulator called {\tt ISOTSEAL}, a commercial spin-off of tools developed first at Texas A\&M \citep{isotseal}. Adapting a general-purpose solver like {\tt ISOTSEAL} to a particular application like BHGE's honeycomb is often a two-stage process. One first adapts configuration files to reflect the known design and operational parameters/conditions of the particular instance under study. Inevitably some of these computer model settings are unknown (precisely enough) in the actual system, so there is interest in {\em calibrating} these parameters based a limited physical/field experimentation campaign. Simulations, collected under a range of settings of both unknown {\em calibration parameters} and known {\em design inputs} characterizing operating conditions, can then be ``matched'' with analogs observed in the field, and their residuals can drive inference for the unknown settings, comprising the second stage. Although there are several ways to operationalize that idea, the canonical setup in the computer experiments literature is due to \citet[][KOH hereafter]{Kennedy:O'Hagan:2001}. At a high level, with details in Section \ref{sec:review}, KOH couples two Gaussian processes (GPs) together -- one as a surrogate for the computer model (e.g., {\tt ISOTSEAL}), and another to capture bias and noise between simulation and field observation, to form a multivariate normal (MVN) marginal likelihood which can drive inference for all unknowns, including the calibration parameters. KOH offers a nice synthesis of information sources, but is susceptible to confounding \citep[e.g.,][]{bryn2014learning, tuo2016,plumlee2017bayesian,gu2018jointly}. More important for us, however, is that it is computationally daunting. Cubic matrix decompositions for MVNs involved in GP-based inference will severely limit the size of simulation campaigns that can be entertained in this setting (See \cite{Santner2018, gramacy2020surrogates}). Historically, computer simulation was cumbersome, limiting the size of campaigns. Nowadays that's changing. The {\tt ISOTSEAL} simulator is fast, so a big campaign is reasonable computationally, but not so fast nor steady enough that simulations can be used directly (forgoing the surrogate) in an otherwise KOH-style setting \citep{Higdon:2004}. Meanwhile a large simulation campaign is essential to capture stark changes in dynamics, simulation artifacts, and other ``features'' common to modern numerical solvers. A flexible meta-modeling apparatus is essential in order to cope with large training data sizes, and to adapt to and/or smooth over (i.e., separate signal from) ``noise''/artifacts in such settings. \citet{Huang:2018} proposed one such approach based on {\em on-site surrogates} (OSSs), motivated specifically by {\tt ISOTSEAL} and the BHGE honeycomb, but also vetted generically in benchmark exercises. \citeauthor{Huang:2018} showed how the KOH apparatus, and variations based on modularization \citep{Liu:2009} and maximization \citep{gra:etal:2015}, could be adapted to work with OSSs. Although promising, this setup was only designed to calibrate for one output feature at a time and honeycomb/{\tt ISOTSEAL} output is multi-dimensional. Our BHGE collaborators are interested in at least four frequencies of outputs on four properties of the system (16 total). Section \ref{sec:honeycomb} demonstrates the inadequacy of a one-at-a-time approach to calibrating outputs independently, leading to inconsistent inference about calibration parameters, thus challenging downstream data synthesis, which stems from honeycomb's unique architecture and (simulated and real) dynamics. This happens because the physical dynamics in play are complex, and the imposition of complete independence across property and frequency throws too much information away. KOH has subsequently been extended to multi-output simulation and field experimentation \cite[e.g.][]{Higdon2008}, but with ordinary GP surrogates. In this paper we detail the application of OSSs in that context, which is easier said than done. Although the basic ingredients are similar to \citeauthor{Higdon:2004}, via principal component decompositions, their application is non-trivial in this setting and requires care in methodological development as well as implementation, as we provide. Following \citet{Huang:2018}, we take a two-pronged approach: first via thrifty marginalize--maximize calculations, rather than faithful KOH, then full Bayes with MCMC. The result is a multi-output, large-scale, computer model calibration framework that -- at least in the case of the honeycomb -- is able to resolve stark multi-output calibration inconsistencies by making effective use of the (joint) information in all of the available simulation and field data. The remainder of paper is organized as follows. Section \ref{sec:review} begins with review of basics, like KOH and GPs, and multi-output (ordinary GP) calibration. Honeycomb specifics and univariate OSS calibration review is in Section \ref{sec:honeycomb}, establishing the straw man. Section \ref{sec:muloss} combines these building blocks for calibration with multivariate outputs via OSSs leveraging linear dependence across output frequency. Thrifty modular and fully Bayesian KOH are enumerated in turn. Section \ref{sec:allcali} combines across output property and provides honeycomb's multi-output calibration results. Section \ref{sec:discuss} concludes with a discussion. \section{Review of elements} \label{sec:review} We introduce KOH calibration via GPs, and extensions for scale and multivariate output. \subsection{Basics: calibration and surrogate modeling} \label{sec:basics} \citet{Kennedy:O'Hagan:2001} described a univariate Bayesian calibration framework, combining field experimental observations $y^F(\mathbf{x})$, at a vector of input design variables $\mathbf{x}$, with computer simulations $y^M(\mathbf{x}, \mathbf{u}^\star)$, under ideal or ``true'' calibration/tuning parameter(s) $\mathbf{u}^\star$, through a discrepancy or bias correction $b(\mathbf{x})$, between simulation and field observation: \begin{equation} y^F(\mathbf{x}) = y^M(\mathbf{x}, \mathbf{u}^\star) + b(\mathbf{x}) +\mathbf{\epsilon}, \quad \epsilon\stackrel{\mathrm{iid}}{\sim} \mathcal N(0, \sigma^2_\epsilon). \label{eq:koh} \end{equation} Modeling and inference commences via Gaussian processes (GP) priors on $y^M(\mathbf{x}, \mathbf{u}^\star)$ and $b(\mathbf{x})$. GPs provide a flexible nonparametric (Bayesian) structure for smooth functional relationships between inputs $\mathbf{x}$ and output $f(\mathbf{x})$, where any finite number $N$ of evaluations follow a multivariate normal (MVN) distribution: $f(\mathbf{x}) \sim \mathcal{N}_N ( \mu(\mathbf{x}), \Sigma(\mathbf{x},\mathbf{x}'))$. Inference for any aspect of $\mu$ and $\Sigma$ given training data $\mathbf{D}_N = (\mathbf{X}_N,\mathbf{y}_N)$ may be facilitated by likelihoods, i.e., MVN densities. Often in practice $\mu(\cdot)=0$. MVN density/likelihood evaluation for aspects of $\Sigma$, which is usually based on inverse Euclidean distance up to several unknown hyperparameters $\bm{\phi}$ ,involve cubic-in-$N$ matrix decomposition for inverses and determinants, which can be a bottleneck in large-scale applications. This is exacerbated in the KOH setting where Eq.~(\ref{eq:koh}) implies a joint distribution for computer model training data $\mathbf{D}_{N_M}= (\mathbf{X}_{N_M}, \mathbf{y}_{N_M})$ and field data $\mathbf{D}_{N_F}= (\mathbf{X}_{N_F}, \mathbf{y}_{N_F})$: \begin{align} \begin{bmatrix} \mathbf{y}_{N_M} \\ \mathbf{y}_{N_F} \end{bmatrix} \sim \mathcal{N}_{N_M + N_F}(\mathbf{0}, \Sigma(\mathbf{u})), \quad \text{where} \quad \Sigma(\mathbf{u}) \equiv \begin{bmatrix} \Sigma_{N_M}& \Sigma^\top_{N_F, N_M}(\mathbf{u})\\ \Sigma_{N_F, N_M}(\mathbf{u}) & \Sigma_{N_F}(\mathbf{u}) + \Sigma^b_{N_F} \end{bmatrix}. \label{eq:mvn} \end{align} In (\ref{eq:mvn}) above, $\Sigma_{N_M} \equiv \Sigma([\mathbf{X}_{N_M}, \mathbf{U}_{N_M} ])$ is an $N_M \times N_M$ covariance matrix for simulations $y^M(\cdot)$, capturing pairwise covariance between $p_x + p_u$ dimensional inputs $( \mathbf{x}, \mathbf{u})$, and $\mathbf{U}_{N_M} \equiv [ \mathbf{u}^\top_1, \dots, \mathbf{u}^\top_{N_M}]$ stacks $N_M$ length $p_u$ row vectors. The off-diganal $\Sigma_{N_F, N_M}(\mathbf{u})$ is an $N_F \times N_M$ matrix capturing covariance between simulation inputs $[\mathbf{X}_{N_M}, \mathbf{U}_{N_M} ]$ and field inputs $[\mathbf{X}_{N_F}, \mathbf{U}_{N_F}]$, again under $y^M(\cdot)$, where $ \mathbf{U}_{N_F} \equiv [ \mathbf{u}^\top, \dots, \mathbf{u}^\top]$ stacks $N_F$ identical row vectors of (unknown) parameters $\mathbf{u}$. Similarly, $\Sigma_{N_F}(\mathbf{u}) \equiv \Sigma([\mathbf{X}_{N_F}, \mathbf{U}_{N_F}])$ is $N_F \times N_F$ for $[\mathbf{X}_{N_F}, \mathbf{U}_{N_F}]$, $y^M(\cdot)$ dynamics between field data observations. Lastly, $\Sigma^b_{N_F}$ is an $N_F \times N_F$ matrix of covariances specified by the bias correction GP $\hat{b}(\mathbf{x})$ acting only field inputs $\mathbf{X}_{N_F}$. Under these coupled GPs, fully Bayesian inference for unknown $\mathbf{u}$ under prior $p(\mathbf{u})$, generically, hinges on $p( \mathbf{y}_{N_M}, \mathbf{y}_{N_F} \mid \mathbf{u})$, the MVN (marginal) likelihood implied by Eq.~(\ref{eq:mvn}). The main computational challenges are evident in inverse $\Sigma^{-1}(\mathbf{u})$ and determinant $|\Sigma(\mathbf{u})|$ evaluations involved in such density evaluations. A single Cholesky decomposition could furnish both in $\mathcal{O}((N_M+N_F)^3)$ flops. Holding covariance parameters fixed, notice that not all blocks of $\Sigma(\mathbf{u})$ change as $\mathbf{u}$ varies. While this implies potential for several economies, the computational demands are still substantial (e.g., cubic in $N_M \gg N_F$) and nonetheless requires $\mathcal{O}((N_M+N_F)^2)$ for storage. For more details see \citet[][Section 8.1]{gramacy2020surrogates}. However, if computational hurdles can be surmounted, synthesis of information at varying fidelity (i.e., over field and simulation) can be a highly lucrative affair in spite of notorious identifiability challenges in the tussle between between estimated calibration parameters $\hat{\mathbf{u}}$ and model discrepancies $\hat{b}(\mathbf{x})$, with remedies of varying degree coming in Bayesian \citep{Higdon:2004,plumlee2017bayesian,bryn2014learning,Gu:2018} and frequentist \citep{tuo2015,tuo2016,wong2017,plumlee2019} flavors. One of the most basic of these, due to \cite{Liu:2009}, is known as \textit{modularization}. Modularized Bayesian calibration imposes partial independence on the joint MVN KOH structure, separating a surrogate for $\hat{y}(\mathbf{x}, \mathbf{u})$ and bias $\hat{b}(\mathbf{x})$ into two distinct phases. \subsection{Scaling up} Besides other philosophical advantages, the modular setup allows for thriftier surrogate modeling, and thus more tractable posterior inference for $\mathbf{u}$ in large-data settings. \cite{gra:etal:2015} leveraged modularization for a very large radiative shock hydrodynamics experiment via local approximate Gaussian processes \citep[LAGP;][]{gramacy:apley:2015}. LAGP uses a neighborhood of $n_M \ll N_M$ nearby data subsets for much faster inference. \begin{align} \begin{array}{ccc} \hat{\mathbf{y}}^M_{N_M} (\mathbf{X}^M_{N_M}, \mathbf{U}_{N_M}) & \longrightarrow & \hat{\mathbf{y}}^M_{n_M} (\mathbf{X}^M_{n_M}, \mathbf{U}_{n_M}) \\ \text{Global GP} & \longrightarrow & \text{laGP} \label{eq:lagp} \end{array} \end{align} Inference for $\mathbf{u}$ proceeds via maximization of the posterior for $b(\cdot)$ through the observed discrepancy $\mathbf{D}^{B}_{N_F}(\mathbf{u}) =( \mathbf{X}^F_{N_F}, \mathbf{y}^F_{N_F} - \hat{\mathbf{y}}^M_{N_F}({\mathbf{u}}))$: \begin{equation} \hat{\mathbf{u}} = \mathrm{arg}\max_\mathbf{u} \left\{ p(\mathbf{u}) \left[ \max_{\bm{\phi}_b} p_b(\bm{\phi}_b \mid \mathbf{D}^{B}_{N_F}(\mathbf{u}))\right] \right\}, \label{eq:opt1} \end{equation} where $p_b$ is the MVN log (marginal) likelihood for the GP prior on $b(\cdot)$, and $\bm{\phi}_b$ are any hyperparameters involved in the bias covariance structure, e.g., lengthscales. \cite{Huang:2018} developed on-site surrogates (OSSs), essentially pairing a design strategy with local GP surrogate modeling for KOH. Foreshadowing somewhat, as details are coming shortly in Section \ref{sec:honeycomb}, they were motivated by the unique characteristics of (cheap/fast but erratic and high-dimensional) {\tt ISOTSEAL} for the honeycomb. Compared to conventional simulators (slow but smooth and low-dimensional) the honeycomb demanded a large {\tt ISOTSEAL} campaign of $N_M = 292{,}000$ runs to fully map out the response surface. Yet only a small handful $N_F = 292$ of field-data observations were available. To resolve this ``too big, too small" dilemma, \citeauthor{Huang:2018}~proposed to design/fit {\tt ISOTSEAL} simulations/surrogates ``on-site'' as follows: focus simulation designs, separately, on each of the physical experimental input sites $\mathbf{x}_i$, for $i = 1, 2, \dots, N_F,$ paired with space-filling designs at calibration inputs $\mathbf{u}$ of size $N_M = 1000$. Extending chart (\ref{eq:lagp}) ... \begin{align} \begin{array}{ccccc} \hat{\mathbf{y}}^M_{N_M}(\mathbf{X}^M_{N_M}, \mathbf{U}_{N_M}) & \longrightarrow & \hat{\mathbf{y}}^M_{n_M} (\mathbf{X}^M_{n_M}, \mathbf{U}_{n_M}) & \longrightarrow & \hat{\mathbf{y}}^M_{n_i}(\mathbf{U}_{n_i}), i = 1, 2, \dots, N_F \\ \text{Global GP} & \longrightarrow & \text{laGP} & \longrightarrow & \text{OSSs} \end{array} \label{eq:oss} \end{align} where $n_i$ denotes the number of {\tt ISOTSEAL} runs at the $i^\mathrm{th}$ site. In this way, the heavy $p_x + p_u =$ 17d simulation/emulation cargo is decomposed onto a handful $(N_F)$ of lighter, individually focused $p_u=$ 4d sites. Consequently, OSSs address computational bottlenecks, provide non-stationary flexibility, automatically smooth over artifacts in some cases, and interpolates dynamics in others. Modularized calibration via maximization (\ref{eq:opt1}) for $\hat{\mathbf{u}}$ is straightforward. OSSs also create a highly sparse kernel for $\Sigma(\mathbf{u})$ in (\ref{eq:mvn}), so fully Bayesian KOH inference $\mathbf{u}$ is tractable even with $N_M$ in the millions. \subsection{Calibration with multivariate outputs} Although ideal for {\tt ISOTSEAL} in many respects, this OSS strategy was for single-outputs. In Section \ref{sec:multioutputs} we show that separate application on honeycomb's multiple outputs (varying frequencies and stiffness/damping coefficients) is problematic, motivating our main methodological contribution [Section \ref{sec:muloss}]. Generally speaking, high dimensional simulation output can manifest in many ways: functional \citep{Bayarri2007b, Higdon2008}, time series \citep{conti2010, fadikar2018}, spatial \citep{Bayarri2009}, spatial-temporal \citep{Gu2016}, spectral \citep{Guinness2019}, with derivatives \citep{mcfarland2008calibration}. Besides being more complex, multivariate output naturally implies larger data size, aggravating computational challenges. Yet combining highly multivariate, physically meaningful information, offers the potential for improved posterior concentration and identification in calibration \citep[see, e.g.][]{Arendt2012, Jiang2016}. Dimension reduction, utilized appropriately, can help. For example, \cite{Higdon2008} extended univariate KOH into highly multivariate settings through principal components, \begin{equation} \mathbf{y}^F(\mathbf{x}) = \mathbf{K}^M \mathbf{w}^M(\mathbf{x}, \mathbf{u}^\star) + \mathbf{K}^B\mathbf{w}^B(\mathbf{x}) +\mathbf{\epsilon}. \label{eq:pccali} \end{equation} In this framework, high dimensional field observations $\mathbf{y}^F(\mathbf{x})$ are modeled through $\mathbf{w}^M(\mathbf{x}, \mathbf{u}^\star)$ via orthogonal basis matrix $\mathbf{K}^M$, and discrepancies $\mathbf{w}^B(\mathbf{x})$ via $\mathbf{K}^B$. Crucially, inference remains tractable via MCMC, at least compared to the single-output analog, and so long as training data sizes $(N_F, N_M)$ are moderate. We aim to port this into the OSS framework [Section \ref{sec:muloss}]. Other multi-output calibration approaches, which seem less well-matched to our honeycomb setting, include wavelet bases for functional outputs \citep{Bayarri2007b}, and the linear model of co-regionalization \citep[LMC;][]{Paulo:2012}. \section{Honeycomb specifics} \label{sec:honeycomb} Centrifugal compressors employ seals to minimize leakage in gas compression phases, preventing back flow and consequently performance decay. Conventional annular gas seals, such as labyrinth and abradable seals, cause gas recirculation around the shaft and produce destabilizing vibration effects. Honeycomb seals are used in high performance turbomachinery to promote stability via damping \citep[see, e.g.][]{childs}. Here we consider a honeycomb rotor stabilizing gas seal under development at BHGE. The system under study is characterized by input--output relationships between variables representing seal geometry and flow dynamics. These include $p_x=13$ controllable physical design inputs $\mathbf{x}$, including rotational speed, cell depth, seal diameter and length, inlet swirl, gas viscosity, gas temperature, compressibility factor, specific heat, inlet/outlet pressure, and inlet/outlet clearance. The field experiment, from BHGE's component-level honeycomb seal test campaign, comprises $N_F = 292$ runs varying a subset of those conditions, $\mathbf{X}_{N_F}$, believed to have greatest variability during turbomachinery operation: clearance, swirl, cell depth, seal length, and seal diameter. Measured output features include direct/cross stiffness and damping properties at multiple frequencies. A general-purpose rotordynamic simulator called {\tt ISOTSEAL}, built upon bulk-flow theory, virtually stress seals like the honeycomb. First developed at Texas A\&M University \citep{isotseal}, it offers fast evaluation (usually about one second) of gas seal force coefficients. Our BHGE colleagues developed an {\sf R} interface mapping the seventeen scalar inputs for the honeycomb into the format required for {\tt ISOTSEAL}. Thirteen of those inputs match up with the columns of $\mathbf{X}_{N_F}$ (i.e., they are $\mathbf{x}$'s); four are calibration parameters $\mathbf{u}$, which could not be controlled in the field. These comprise statoric and rotoric friction coefficients $n_s, n_r$ and exponents $m_s, m_r$. We work with friction factors coded to the unit cube: $(n_s, m_s, n_r, m_r)^\top \rightarrow (u_1, u_2, u_3, u_4)^\top \in [0,1]^4$, primarily to protect BHGE's intellectual property. \blu{Throughout, we follow \citet{Huang:2018} and use independent $\text{Beta}(2, 2)$ priors on $(u_1, u_2, u_3, u_4)$ to nudge the posterior toward the interior of the space. However, we comment briefly on a limited sensitivity analysis in Section \ref{sec:baysresults}.} \subsection{Multivariate outputs} \label{sec:multioutputs} The potential set of output features that could be monitored for the honeycomb seal are many. Here we focus on four rotordynamic coefficients, or properties: direct stiffness ($K_d$), cross stiffness ($k_c$), direct damping ($C_d$), and cross damping ($c_c$), measured at the following frequencies: 28, 70, 126, and 154 Hz; so 16 outputs in total. These are our $\mathbf{y}$-values, measured either in the field as $\mathbf{y}^F_i$, collected as $\mathbf{Y}_{N_F}$, or as $\mathbf{y}^M_i$ simulated via {\tt ISOTSEAL}, collected as $\mathbf{Y}_{N_M}$. \cite{Huang:2018} only considered one of these: $K_d$ at 28 Hz. The turbomachinery literature and bulk-flow theory \citep{Hirs:1973} provides some insight into the relationship between these four properties. For example, \cite{D'Souza:Childs:2002} demonstrate that classical transfer for a honeycomb gas seal process can be expressed in a conventional linear motion/reaction-force model \begin{align} - \begin{bmatrix} F_x \\ F_y \end{bmatrix} = \begin{bmatrix} K_d & k_c \\ -k_c & K_d \end{bmatrix} \begin{bmatrix} x \\ y \end{bmatrix} + \begin{bmatrix} C_d & c_c \\ -c_c & C_d \end{bmatrix} \begin{bmatrix} \dot x \\ \dot y \end{bmatrix} \label{eq:trans} \quad \quad \mbox{(units omitted).} \end{align} Direct stiffness $K_d$ and damping $C_d$ account for orthogonal reaction forces in $x$ and $y$ axes. Cross-coupled stiffness $k_c$ and damping $c_c$ describe reaction orthogonal to directions of motion. Other mechanical engineering studies of rotordynamic coefficiets include \citet{childs,isotseal,delgado2012}. To access simulated versions of these outputs we augmented BHGE's {\sf R} interface for {\tt ISOTSEAL} and then re-ran the campaign of \cite{Huang:2018}, collecting following: \begin{align} \hat{\mathbf{Y}}^M(\mathbf{x}, \mathbf{u}) &\equiv \hat{y}_{ijk}^M(\mathbf{u}), \quad \mbox{for } \left\{ \begin{array}{rl} i &= 1, 2, \dots, N_F \mbox{ (i.e., each field data pair)} \\ j &\in \{1,2,3,4\} \mbox{ coding properties } \{K_d, k_c, C_d, c_c \} \\ k &\in \{1,2,3,4\} \mbox{ coding frequencies.} \end{array} \right. \label{eq:inds} \end{align} In total this involved $292 \times 1000 \times 4 \approx 1{,}168{,}000$ {\tt ISOTSEAL} runs. Each run at inputs $(\mathbf{x}, \mathbf{u})$ is for a single output frequency, producing all four rotordynamic coefficients simultaneously. In about 2\% of cases a convergence issue is detected, terminating with an {\tt NA}-coded missing value after about three seconds. Collecting all $4{,}672{,}000$ measurements, over the four frequencies, took about three days when divvied up across several multi-core compute \blu{nodes}. Our experiment resulted in $N_M=\sum_{i=1}^{N_F}n_i= 286,282$ successfully terminated runs, with most sites (241 out of 292) having a full $n_i = 1{,}000$. Of the 51 with missing responses of varying multitudes, the smallest was $n_{238} = 574$. In total, we collected $16 \times 286{,}282 = 4{,}580{,}512$ on-site multivariate {\tt ISOTSEAL} runs. The missingness pattern is similar to that reported in \citet{Huang:2018}, even across output coefficients and frequencies because a failed run at a particular input affects all outputs equally. One of the aims of OSS calibration is to extrapolate to these unknown/missing parameter regions after calibrating to field data. See, e.g., \citet{marcy:2020}, albeit on a somewhat smaller scale. A 16-fold increase in data, exhibiting all of the features of the single-output case (nonstationarity, missingness, etc.), demands a scale-up of OSS calibration in several directions. We describe that in Section \ref{sec:muloss}, but it shares pre-processing with a separate analysis of each output. We therefore turn first to a description of that simpler process, which ultimately serves as a straw man against our fully multivariate analysis. \subsection{Separate univariate analysis} \label{sec:unioss} Consider OSSs built separately for each of the outputs. Each OSS comprises a fitted GP between successful on-site {\tt ISOTSEAL} run outputs $y^M_{ijk}$ at $\mathbf{x}_i$ and with novel $1{,}000$-element maximin Latin hypercube sample \citep[LHS;][]{morris:1995} $\mathbf{U}_{ijk}$, for fixed $j$ and $k$. Specifically, $\hat{y}^M_{ijk}$ is built by fitting a stationary zero-mean GP using a scaled and nugget-augmented separable Gaussian kernel \blu{trained only on the space of parameter $\mathbf{u}$, } \begin{align} \blu{\Sigma_{ijk}(\mathbf{u}_{jk}, \mathbf{u}_{jk}') = \tau_{ijk}^2\exp\left\{ - \sum_{l=1}^{p_u} \frac{||\mathbf{u}_{ijkl} - \mathbf{u}'_{ijkl}||^2}{\theta_{ijkl}} + \delta_{u,u'} \eta_{ijk}\right\}, } \label{eq:kernel} \end{align} where $\delta_{u,u'}$ is the Kronecker delta, $\tau_{ijk}^2$ is a scale parameter, $\bm{\theta}_{ijk}=(\theta_{ijk1}, \theta_{ijk2}, \dots, \theta_{ijkp_u} )^\top$ a vector of lengthscales, and $\eta_{ijk}$ is a nugget -- all \blu{being specific to the $i^\mathrm{th}$ site, $j^\mathrm{th}$ output, and $k^\mathrm{th}$ frequency}. Denote the set of hyperparameters of the $ijk^\mathrm{th}$ OSS as $\bm{\phi}_{ijk}=\{\tau^2_{ijk}, \bm{\theta}_{ijk}, \eta_{ijk}\}$, with indices following Eq.~(\ref{eq:inds}). \begin{figure}[ht!] \centering \includegraphics[scale=0.415, trim=0 45 25 0,clip=TRUE]{noise1} \includegraphics[scale=0.415, trim=30 45 0 0,clip=TRUE]{noise2} \includegraphics[scale=0.415, trim=0 0 25 0,clip=TRUE]{noise3} \includegraphics[scale=0.415, trim=30 0 0 0,clip=TRUE]{noise4} \caption{Univariate OSS noise levels represented by the trained $\hat{\tau}_{ijk}^2 \hat{\eta}_{ijk}$ for direct/cross stiffness and damping at each of the four frequencies.} \label{fig:noise1}% \end{figure} To offer some visual contrast between these fits, Figure \ref{fig:noise1} shows the site-wise noise level $\hat{\tau}_{ijk}^2 \hat{\eta}_{ijk}$ across the $N_F=292$ sites for the sixteen outputs. These are clearly non-constant, a testament to the nonstationary nature of OSSs across the input space represented by the 292 $\mathbf{x}_i$ settings, but also highly consistent across outputs at each frequency level. This suggests that a certain amount of information in each frequency is redundant. Although somewhat less obvious at first glance, notice that the overall noise level decreases as the frequency increases. The amount by which noise level drops depends on the input location, suggesting there is novel information in each output frequency, which might manifest as spatial dependency, albeit in a large ($p_x = 17$d) space. \begin{figure}[ht!] \centering \includegraphics[width=1\linewidth,trim= 0 25 0 50,clip=TRUE]{pred_oss.pdf} \caption{Boxplots of 292 out-of-sample root mean-squared errors (RMSEs).} \label{fig:rmse}% \end{figure} Before inserting these $292 \times 16 = 4{,}672$ OSSs into the KOH apparatus for calibration, we performed a hyperparameter stability and predictive accuracy check on a commensurately sized out-of-sample on-site testing data set, again sixteen-fold across $1{,}672{,}000$ {\tt ISOTSEAL} runs. The hyperparameters looked similar to Figure \ref{fig:noise1}, and are not shown here to save space. Prediction accuracies are summarized in Figure \ref{fig:rmse}. Each boxplot collects the 292 on-site root mean-squared errors (RMSEs) from the $n_{ijk} \approx 1{,}000$ converged runs on site $i$, separately for output $j$ and frequency $k$. Observe that most OSSs ($>50\%$ via the median lines in the boxplots) act as interpolators on the sites, giving RMSEs are close to zero. The rest act as extrapolators or smoothers, with a small handful ($\approx 5\%$ outside the whiskers) extremely so. Also notice that the scales of RMSEs consistently decrease as frequency increases, coinciding with the in-sample pattern(s) observed in Figure \ref{fig:noise1}. \subsection{Challenges from univariate OSSs calibration} \label{sec:challenge} Next, combining with field data, we performed separate OSS-based fully Bayesian calibrations for $\mathbf{u}$, repeating the \citeauthor{Huang:2018}~univariate approach for each output and frequency. I.e., we performed $4 \times 4 = 16$ MCMCs, $100{,}000$ samples each, and discarded $5{,}000$ as burn-in. Each MCMC run took about three days to finish, after a warm start from modular optimization \blu{(\ref{eq:opt1})}. To demonstrate the drawbacks from this univariate approach, Figures \ref{fig:unibayes} and \ref{fig:unibayes_2} present two views into the posteriors for $\mathbf{u}_{jk}$. \begin{figure}[ht!] \centering \includegraphics[width=1\linewidth, trim=0 20 0 0,clip=TRUE]{uni_vs_pca} \caption{Marginal posterior distributions of $\mathbf{u}$ from outputs $K_d, k_c, C_d$, and $c_c$ at 28Hz (circles) and via principal components combining all output frequencies (triangles, Section \ref{sec:pcresult}). Dots indicate MAP values and error bars form 90\% credible intervals.} \label{fig:unibayes} \end{figure} Figure \ref{fig:unibayes} shows marginal posteriors for the components of $\mathbf{u}_{jk}$ for each output at 28Hz ($k=1$). The other frequencies, provided in Supplement \ref{sec:uniap}, look similar. To support a comparison coming later in Section \ref{sec:pcresult}, the plot also shows results combining all frequencies which will be discussed in due course. Focusing on the 28Hz results, notice that while some coordinates, like $u_3$ and $u_4$, exhibit consistency in the high overlap of their credible intervals, others like $u_1$ and $u_2$ do not. There would appear to be complimentary (unused) information in these independent analyses. \begin{figure}[ht!] \centering \includegraphics[width=1\linewidth, trim=0 5 0 0,clip=TRUE]{uni_bayes} \includegraphics[width=1\linewidth, trim=0 15 0 50,clip=TRUE]{uni_bayes_2} \caption{Bivariate marginal plots of single-output posterior samples of $\mathbf{u}$ from outputs $K_d, k_c, C_d$, and $c_c$ at 28 Hz. Dots are $100{,}000$ MCMC samples of $\mathbf{u}$, heat colored according to the rank of (log) posterior probability. The ``+" indicates the MAP values. } \label{fig:unibayes_2} \end{figure} For a slightly higher resolution view, Figure \ref{fig:unibayes_2} shows posteriors for pairs $u_1 \times u_2$ for each output (at 28Hz) across the top, and $u_3 \times u_4$ across the bottom. Again, the set of similar visuals is completed in the supplemental material. In the top row, $k_c$ stands out as the exception. Despite wider credible regions in the bottom row, only one pair ($k_c, c_c$) among the six resemble one another. Taken together -- and even for just these four sets of $\mathbf{u}_{jk}$ posteriors, ignoring the other output frequencies -- there is no way to stack them together in the 4-dimensional space to generate one single, concentrated solution with appropriate evaluation of individual parameter uncertainty from different outputs. These separately sampled $\mathbf{u}_{jk}$ posteriors indicate heterogeneity in parameter learning with varied amount of uncertainty, creating more uncertainty downstream to any engineering decision-making. A joint analysis not only holds the potential to reconcile (at times) contradictory information, but would also drastically simplify downstream decision-making with far fewer posterior views to scrutinize. \section{On-site surrogates with basis representation} \label{sec:muloss} The essential insight behind OSS-based calibration is that learning for $\mathbf{u}$ is primarily driven by model discrepancy $b(\mathbf{x})$, which can only be observed at field input sites $\mathbf{X}_{N_F}$, through residuals. At a conceptual level, extending that to multivariate output is as simple as making the relevant quantities bold: $\mathbf{y}_{ijk}^F({\mathbf{X}_{N_F}})$ in the field and $\hat{\mathbf{y}}_{ijk}^{M}({\mathbf{X}_{N_F}, \mathbf{U}_i})$ for the surrogate, and then calculating discrepancies with extra indices. \begin{align} \mathbf{y}_{ijk}^F({\mathbf{X}_{N_F}})-\hat{\mathbf{y}}_{ijk}^{M}({\mathbf{X}_{N_F}, \mathbf{U}_i}), \quad i = 1, 2, \dots, N_F, \quad j=1,2, ..., J, \quad k=1,2, ..., K, \label{eq:onsite} \end{align} \blu{where $ \mathbf{U}_i$ is the design matrix (maximim LHS) of calibration parameter $\mathbf{u}$ for converged $n_i$ on-site runs for the $i^\mathrm{th}$ site.} As with univariate outputs, as long as the surrogate is good at those locations, which OSSs facilitate, further considerations can be pushed downstream. For example, when new locations are of interest, say for prediction, new OSSs/runs can be built/performed there, which is trivial if (like {\tt ISOTSEAL}) the simulations are fast. The devil is, however, in the details for how these residuals (\ref{eq:onsite}) are modeled. Section \ref{sec:challenge} demonstrates that separating over $j$ and $k$ for the honeycomb leads to pathologies. Here we begin the description for a joint analysis, building a fitted bias for residuals all at once, to resolve those inconsistencies while ensuring that all relevant information is incorporated in posterior inference for $\mathbf{u}$, thus filtering to downstream tasks like prediction. What we propose below is customized, to a degree, to the honeycomb setting, e.g., via fixed $j$ (output classes) and pooling over $k$ (output frequencies) in Eq.~(\ref{eq:onsite}). Completing the description by combining over output classes (pooling over $j$) is deferred to Section \ref{sec:allcali}. We begin here with some notational setup that applies for all $j$ and $k$, and then fix $j$ for the remainder of the section. Although we believe other applications -- perhaps with more or fewer indices -- could be set similarly, further speculation is left to Section \ref{sec:discuss}. Let $\mathbf{Y}_i^M(\mathbf{U}) = [\mathbf{y}_{i11}^M(\mathbf{U}_i), \mathbf{y}_{i12}^M(\mathbf{U}_i), \dots, \mathbf{y}_{i44}^M(\mathbf{U}_i)]$, for $i=1, 2, \dots,N_F$, be a 16-column matrix (four outputs at four frequencies) holding the $n_i \approx 1000$ rows of converged {\tt ISOTSEAL} runs for the $i^\mathrm{th}$ site. Now collect $N_F=292$ of these $\mathbf{Y}_i^M(\mathbf{U}) = [\mathbf{Y}_1^M(\mathbf{U})^\top, \dots, \mathbf{Y}_{N_F}^M(\mathbf{U})^\top ]^\top$ \begin{align} \mathbf{Y}^M(\mathbf{U}) &= \label{eq:ym} \begin{bmatrix} \mathbf{y}^M_{111}(\mathbf{U}_1) & \dots & \mathbf{y}^M_{144}(\mathbf{U}_1) \\ \vdots & \ddots & \vdots \\ \mathbf{y}^M_{N_F11}(\mathbf{U}_{N_F}) & \dots & \mathbf{y}^M_{N_F44}(\mathbf{U}_{N_F}) \end{bmatrix}_{N_M \times 16} \end{align} whose row dimension is $N_M = \sum_{i=1}^{N_F} n_i = 286{,}282$. Recall that $\mathbf{U}_i$ are $n_i \times p_u$ on-site (maximin LHS) design matrices. g \begin{figure}[ht!] \centering \includegraphics[scale=0.65,trim=20 80 0 35,clip=TRUE]{cor_ym_2} \caption{Correlation matrix (number in lower triangle and ellipse in upper triangle) of $N_M = 286{,}282$ multivariate {\tt ISOTSEAL} simulation of direct/cross stiffness and damping at frequencies 28, 70, 126, and 154 Hz. In the output labels $y_{jk}$, $j = 1, \dots, 4$ for output property and $k = 1, \dots, 4$ for frequency, as defined in Eq.~(\ref{eq:inds}).} \label{fig:cor_ym} \end{figure} We performed an initial exploratory data analysis (EDA) with these data, one aspect of which is $\mathbf{C}\mathrm{or}\{\mathbf{Y}^M(\mathbf{U})\}$, visualized in Figure \ref{fig:cor_ym}. Correlation strength and association is indicated numerically in the lower-triangle, by elliptical shape and direction in the upper half, and by color and shading in both. Four-by-four blocks are clearly evident in this view, indicating strong linear correlation between different frequency levels within the same property (block of) output, but weaker correlation between types. Our EDA also revealed similar correlations exhibited by the output-combined field data $\mathbf{Y}^F$, defined similarly. This is not shown here for brevity. Taken together, we conclude that parsimonious representation of across-frequency information could be beneficial to a joint modeling enterprise. \subsection{Principal component OSSs} \label{sec:osspca} We propose performing principal component analysis (PCA) across frequency outputs in an ``on-site'' fashion, i.e., fit a PC basis in the subspace spanned by $\mathbf{U}_i, i=1,\dots, N_F$ on the $N_F$ observed physical sites $\mathbf{X}_{N_F}$ for each type (block, indexed by $j$) of outputs. Fixing $j$, we first center and standardize the correlated $K=4$ frequencies, perform PCA on the $N_M \times K$ dimensional ``on-site'' matrix, \begin{align} \mathbb{PC} \{ \mathbf{Y}_j^F - \mathbf{Y}_j^M \}, \quad \mbox{yielding eigenvectors} \quad \mathbf{W}_j, \quad \text{for} \quad j = 1, \dots, J. \label{eq:pca} \end{align} In Eq.~(\ref{eq:pca}), $\mathbf{Y}_j^F$ collects $K=4$ columns of $n_i$-row replicated field outputs across frequencies, \begin{align} \mathbf{Y}_j^F = \begin{bmatrix} \mathbf{y}^F_{1j1} & \cdots & \mathbf{y}^F_{1j4} \\ \vdots & \ddots & \vdots \\ \mathbf{y}^F_{{N_F} j1} & \cdots & \mathbf{y}^F_{{N_F}j4} \\ \end{bmatrix}_{N_M \times 4,} \label{eq:YF} \end{align} where $ \mathbf{y}^F_{ijk}\equiv (y^F_{ijk}(\mathbf{x}_i), \dots, y^F_{ijk}(\mathbf{x}_i) )^\top$ is the $n_i$-time \blu{duplicated} field output $y^F_{ijk}(\mathbf{x}_i)$ on $i^\mathrm{th}$ site for output $j$ at frequency $k$. Similarly, $\mathbf{Y}_j^M$ collects $K=4$ columns of on-site {\tt ISOTSEAL} simulations on $N_F$ sites at multiple frequencies provided in Eq.~(\ref{eq:ym}). For honeycomb, there are $J=4$ properties (blocks): \{$K_{d}$, $k_{c}$, $C_{d}$, $c_{c}$\}. Thus, we performed four separate PCAs in total. Figure \ref{fig:scree} summarizes these via scree plots of variance decomposition, accompanied by a table with a numerical summary via the top variances. \begin{figure}[ht!] \centering \includegraphics[width=0.243\linewidth, trim=10 10 10 20]{scree_kdir} \includegraphics[width=0.243\linewidth, trim=10 10 10 20]{scree_kcross} \includegraphics[width=0.243\linewidth, trim=10 10 10 20]{scree_cdir} \includegraphics[width=0.243\linewidth, trim=10 10 10 20]{scree_ccross} \vspace{0.25cm} \begin{tabular}{r | r | r | r | r} Observed discrepancy on output & $K_{d}$ & $k_{c}$ & $C_{d}$ & $c_{c}$ \\ \hline Variation represented in the first-PC & 96.46\% & 94.89\% & 91.49\% & 84.19\% \\ Variation represented in the second-PC& 2.84\% & 4.66\% & 6.41\% & 12.68\% \\ \end{tabular} \caption{Scree plots and tabulation of PCA variances.}% \label{fig:scree}% \end{figure} Notice that the first-PCs dominate in all four cases. Output $c_c$ which might marginally benefit from the second principal direction. This suggests that a \citet{Higdon2008}-style basis-based-surrogate (\ref{eq:pccali}), projecting down to one principal direction for these four outputs (separately over output property $j$) could be effective here, after upgrading to accommodate OSSs. Instead of breaking down the simulator and discrepancies into two separate principal representations in (\ref{eq:pccali}), we prefer a single PCA through the matrix of eigenvectors $\mathbf{W}_j$ trained from the whole ``on-site'' observed discrepancy in (\ref{eq:pca}). In particular, we use the first (column) eigenvector $\mathbf{w}^1_j$ of $\mathbf{W}_j$ to extract the first-PCs from multiple frequency outputs of both simulated $\mathbf{y}_j^{M1} = \mathbf{Y}_j^M \mathbf{w}_j^1$ and field $\mathbf{y}_j^{F1} = \mathbf{Y}_j^F\mathbf{w}_j^1$. Once extracted, we can put them together as \begin{equation} \mathbf{y}_j^{F1}(\mathbf{x}) = \mathbf{y}_j^{M1} (\mathbf{x}, \mathbf{u}^{\star1}) + \mathbf{b}^1_j(\mathbf{x}) + \mathbf{\epsilon}^1_j. \label{eq:pckoh} \end{equation} which may be interpreted as ordinary KOH in the first-PC subspace. Observe the introduction of new notation for model discrepancy $\mathbf{b}^1_j(\mathbf{x})$, best calibration setting $\mathbf{u}^{\star1}$, and iid noise $\mathbf{\epsilon}_j^1$, to recognize PC pre-processing to form $\mathbf{y}_j^{F1}(\mathbf{x})$ and $\mathbf{y}_j^{M1}$. Focus remains on observed sites only, i.e., $\mathbf{x} \equiv \mathbf{X}_F$ in the on-site setting, for observed discrepancies, and surrogate(s) trained on $N_M= 286{,}282$ dense paired with space-filling $\mathbf{U}_i$'s. After learning, prediction, etc., may be linearly back-transformed to the original full space. \subsection{PC-level OSSs calibration: optimization and full Bayes} \label{sec:pcacali} Each first on-site PC can be used in a separate calibration: four this time instead of sixteen in Section \ref{sec:challenge}. Here we illustrate how these may plugged into the OSS calibration framework as an intermediate step toward a fully joint model in Section \ref{sec:allcali}. \paragraph{Modular calibration via optimization.} Here the goal is to search for $\hat{\mathbf{u}}_j^1$ by maximizing the posterior probability of (observed) model discrepancy (\ref{eq:opt1}) in the first principal direction. Update Eq.~(\ref{eq:opt1}) by swapping the univariate (column vector) observed discrepancy with that based on first eigenvector $\mathbf{w}^1_j$ obtained in Eq. (\ref{eq:pca}). I.e., for each of $j=1, \dots, J$, \begin{align} \mathbf{D}^{B}_{N_F}(\mathbf{u}) =( \mathbf{X}^F_{N_F}, \mathbf{y}^F_{N_F} - \hat{\mathbf{y}}^M_{N_F}({\mathbf{u}})) \rightarrow \mathbf{D}^{j1}_{N_F}(\mathbf{u}_j^1) =( \mathbf{X}^F_{N_F}, (\mathbf{Y}_j^F - \hat{\mathbf{Y}}_j^M(\mathbf{u}_j^1) ) \mathbf{w}^1_j ), \label{eq:pcbias} \end{align} In (\ref{eq:pcbias}), $\mathbf{Y}_j^F$ is fixed and $\hat{\mathbf{Y}}_j^M(\mathbf{u}_j^1)$ are OSS [fitted as in Section \ref{sec:unioss}] evaluations at $\mathbf{u}_j^1$, which would vary along a numerical optimizer's search trajectory. The mathematical programs are identical to Eq.~(\ref{eq:opt1}) but with $\mathbf{D}^{B}_{N_F}(\mathbf{u}) \rightarrow \mathbf{D}^{j1}_{N_F}(\mathbf{u}_j^1)$ and $\bm{\phi} \rightarrow \bm{\phi}_j^1$ denoting hyperparameters involved in each of $j$ GP-based \blu{(separable squared exponential)} fitted discrepancies. \paragraph{Bayesian joint inference.} Posterior sampling of $\mathbf{u}^1_j$ requires projecting $\mathbf{Y}_j^F$ (\ref{eq:YF}) and $\mathbf{Y}_j^M$ (\ref{eq:ym}) onto their first principal axis, $\mathbf{y}^{F1}_j$ and $\mathbf{y}^{M1}_j$ respectively, via Eq.~(\ref{eq:pca}). Then, following Eq.~(\ref{eq:pckoh}), one may impose a joint MVN (\ref{eq:mvn}), with mean zero and covariance $\Sigma_j^1(\mathbf{u}^1_j)$, whose structure is re-notated below with appropriate indices/first-PC indicators: \begin{align} \Sigma_j^1(\mathbf{u}^1_j) \equiv \begin{bmatrix} \Sigma^{j1}_{N_M}& \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j)^\top\\ \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j) & \Sigma^{j1}_{N_F}(\mathbf{u}^1_j) + \Sigma^{j1}_{b} \end{bmatrix}. \label{eq:Sigma} \end{align} Posterior evaluation for $\mathbf{u}^1_j$ in a Metropolis setting requires decomposing this $\Sigma_j^1(\mathbf{u}^1_j)$ for inverse and determinant components of the MVN log likelihood. Note the size of this matrix is $(N_M + N_F)^2$, where $N_M + N_F = 286,574$ for our honeycomb application. Consequently, ordinary cubic in $(N_M + N_F)$ decomposition costs pose a serious bottleneck. Fortunately, $\Sigma_{j}^1(\mathbf{u}^1_j)$ has a convenient structured-sparsity form under OSSs. The largest block $\Sigma^{j1}_{N_M}$, corresponding to the OSSs themselves, is the most sparse. It is block-diagonal with $N_F$ blocks: $\Sigma^{j1}_{N_M}=\Diag[\Sigma^{j1}_i(\mathbf{U}_i, \mathbf{U}_i)]$. Each block $\Sigma^{j1}_i(\mathbf{U}_i, \mathbf{U}_i)$ may be built from the kernel of the $i^\mathrm{th}$ OSS conditioned on any fitted hyperparameters from $\mathbf{y}_{ij}^{M1}$. Since it does not depend on $\mathbf{u}^1_j$, each may be pre-decomposed separately at manageable $\mathcal{O}(n_i^3)$ cost. The rest of $\Sigma_j^1(\mathbf{u}^1_j)$ requires bespoke construction given novel $\mathbf{u}^1_j$, but still has convenient block--sparse structure. The simulator--field cross covariance piece is block-diagonal, $\Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j)=\Diag[\Sigma^{j1}_i(\mathbf{u}^1_j, \mathbf{U}_i)] $, where $\Sigma^{j1}_i(\mathbf{u}^1_j, \mathbf{U}_i)$ is a row vector of site-wise covariance between calibration inputs $\mathbf{u}^1_j$ and on-site design matrix $\mathbf{U}_i$. Finally, although $\Sigma^{j1}_{N_F}(\mathbf{u}^1_j) + \Sigma^{j1}_{b}$ is dense, it is small ($N_F \times N_F$) and thus cheap to decompose. The first component $\Sigma^{j1}_{N_F}(\mathbf{u}^1_j)$ is block diagonal, where each block is like $\Sigma^j_i(\mathbf{U}_i, \mathbf{U}_i)$ from the $i^\mathrm{th}$ OSS kernel, except with a stacked $\mathbf{u}^1_j$ vectors in lieu of the $\mathbf{U}_i$. The second component $\Sigma^{j1}_{b}$ is dense and comes from the discrepancy kernel using all field-data inputs. Any hyperparameters, e.g., $\hat{\bm{\phi}}_{j}^1$ are most easily set via maximization-based pre-analysis, e.g., following Eq.~(\ref{eq:pcbias}), but could also be included in the MCMC. \blu{For consistency, zero-mean separable GPs are fitted for $\hat{\bm{\phi}}_{j}^1$.} Once $\Sigma_j^1(\mathbf{u}^1_j)$ in Eq.~(\ref{eq:Sigma}) is built, for each proposed $\mathbf{u}^1_j$ setting in a Metropolis scheme, and after its component parts have been decomposed, a full decomposition -- i.e., combining from constituent parts -- involves tedious but ultimately straightforward matrix multiplication via partition inverse and determinant equations \citep[e.g.,][]{Petersen:2008} \blu{ leveraging the multiple-OSSs setup, \begin{align} \Sigma^1_j(\mathbf{u}^1_j)^{-1} & = \begin{bmatrix}(\Sigma^{j1}_{N_M} )^{-1} +(\Sigma^{j1}_{N_M} )^{-1} \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j) ^\top \mathbf{C}^{-1} (\mathbf{u}^1_j ) \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j)(\Sigma^{j1}_{N_M} )^{-1} & (\cdot)^\top \\ -\mathbf{C}^{-1} (\mathbf{u}^1_j ) \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j)(\Sigma^{j1}_{N_M} )^{-1} & \mathbf{C}^{-1} (\mathbf{u}^1_j ) \end{bmatrix} \nonumber \\ \det[ \Sigma^1_j(\mathbf{u}^1_j)] &= \det(\Sigma^{j1}_{N_M} ) \times \det(\mathbf{C} (\mathbf{u}^1_j )), \mathbf{C} (\mathbf{u}^1_j ) \nonumber \\ & = \Sigma^{j1}_{N_F}(\mathbf{u}^1_j) + \Sigma^{j1}_{b} - \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j) \Sigma^{j1}_{N_M} \Sigma^{j1}_{N_F, N_M}(\mathbf{u}^1_j) ^\top. \end{align} } \subsection{PC-level calibration results} \label{sec:pcresult} Isolating each \{$K_{d}$, $k_{c}$, $C_{d}$, $c_{c}$\}, but leveraging strong linear correlation across frequencies ($4 \times 4 = 16$ total outputs), eliminates redundant information and brings the unit of analysis down fourfold to 4. The computational merits of OSSs, ported to a PC basis, enables efficient modular optimization and fully Bayes inference on parameter(s) $\mathbf{u}^1_j$. Here we present the outcome of such analysis with an eye toward a fully combined setup in Section \ref{sec:allcali}. We focus on contrasting to two earlier views, from a fully independent, separated (16-fold) analysis provided in Section \ref{sec:multioutputs}. Figure \ref{fig:unibayes} shows 1d posterior marginals for $\mathbf{u}^1_j$, for each $j=1,\dots,4$, alongside their 28Hz-only analog. With four coordinates of $\mathbf{u}$, a total of sixteen comparisons may be made with this view. Observe, for example, that $u_1$, $u_2$, and $u_4$ show considerable concentration of density under PC for outputs $C_d$ and $c_c$. There is also a considerable shift in the location of posterior mass (i.e., the MAP), with $u_1$ and $u_2$ shifting up and $u_4$ shifting down for $c_c$. The rest of the PC marginals are similar to their 28Hz counterparts. \begin{figure}[ht!] \centering \includegraphics[width=1\linewidth, trim=0 5 0 0,clip=TRUE]{pc_bayes} \includegraphics[width=1\linewidth, trim=0 15 0 50,clip=TRUE]{pc_bayes_2} \caption{Bivariate marginal plots of posterior samples of $\mathbf{u}^1_j$ from PC-combined outputs $K_d, k_c, C_d$, and $c_c$. Dots are $100{,}000$ MCMC samples of $\mathbf{u}^1_j$, heat colored according to the rank of (log) posterior probability. The ``+" indicates the MAP values. } \label{fig:pcabayes_2} \end{figure} Figure \ref{fig:pcabayes_2} offers a similar comparison in 2d when contrasted with Figure \ref{fig:unibayes_2} in Section \ref{sec:multioutputs}. As in that view, only a subset of pairs of outputs are shown. The rest are in Supplement \ref{sec:pcap}. Notice the marked improvement in posterior concentration in the rightmost four panels. Two of those panels, $(u_3 \times u_4)$ for $C_d$ and $(u_1, u_2)$ for $c_c$, also show substantial re-location of density compared to their 28Hz analog. The leftmost four panels offer less stark contrast, a view shared by the 1d analysis in Figure \ref{fig:unibayes}. These results indicate that we are headed in the right direction. We embarked on this analysis knowing well that the diversity of information across output frequencies would not be substantial. Nevertheless, representing it in a parsimonious way, through first-PCs, seems to enhance posterior concentration even though data is being discarded (i.e., the other three PCs). This is a hallmark of enhanced learning through dimension reduction. The last step is to combine these separate analyses into one, forming a posterior for a single set of unknowns given the entirety of data on all simulation and field outputs. \section{Full integration of outputs} \label{sec:allcali} The final step in our analysis is, in some sense, the easiest -- that is, once all the hard work of building OSSs, forming first-PC posteriors, etc., is done. After detailing how outputs $j=1,\dots,J$ may be combined, we provide a final suite of views into a unified posterior for $\mathbf{u}^1$. Although the setup is rather generic in $j$, in what follows we continue to focus the discussion on the honeycomb application in anticipation of those results. \subsection{Inferential apparatus} Section \ref{sec:osspca} demonstrated that a PC basis is effective as a dimension reduction tool for four pairs of four honeycomb output frequencies. Those four bases, calculated separately but combined momentarily, captures around a $4 \times 90\% \approx 360\%$-fold higher degree multivariate variability as compared to each singe univariate output. On the other hand, each output property $j$ (direct/cross stiffness or damping) has a distinct physical meaning, and the dependence between them is highly non-linear.\footnote{This is quite different from the linear form of the differential equation linking them (\ref{eq:trans}).} In Figure \ref{fig:cor_ym} we saw no evidence of strong linear dependence across the $J=4$ output classes, which nudged us toward an independence assumption into this joint analysis. We shall return to that in our discussion in Section \ref{sec:discuss}. As a related but practical matter, each output is measured on a different scale. PCA helps here. Standardization of the 16d raw outputs, as a pre-processing step, followed by orthogonal projection onto the 4-column subspaces spanned by their first eigenvectors $\mathbf{w}^1_j$, naturally place those quantities on an equal footing. Besides being represented in the direction of highest variability, they are scale-free which simplifies joint modeling downstream. \paragraph{Modular calibration via optimization.} Recall that each OSS, i.e., $N_F \times J \times K = 4,672$ univariate fits [Section \ref{sec:unioss}] involves a manageable $\mathcal{O}(n^3_i)$ calculation. Then each of $K=4$ output frequencies are combined into their first-PC (\ref{eq:pca}). Finally, combine these representations together into a unified objective to obtain a single $\mathbf{u}^1$ under Eq.~(\ref{eq:opt1}) for all ($J=4$) observed discrepancies, with modular inference for GP hyperparameter $\bm{\phi}^1_j$ in each subspace. So basically we wish to \blu{jointly optimize} multiple Eq.~(\ref{eq:opt1})'s for a single $\mathbf{u}^1$ via \blu{tuning $\mathbf{u}^1_j$ on observed discrepancies} $\mathbf{D}^{j1}_{N_F}(\mathbf{u}^1_j) =( \mathbf{X}^F_{N_F}, (\mathbf{Y}_j^F - \hat{\mathbf{Y}}_j^M(\mathbf{u}^1_j) ) \mathbf{w}^1_j )$, for $j=1, \dots, J$. After imposing conditional independence given common $\mathbf{u}^1$, the following \blu{joint} objective \blu{across all $J$ outputs} is immediate: \begin{equation} \hat{\mathbf{u}}^1 = \mathrm{arg}\max_{\mathbf{u}^1} \left\{ p(\mathbf{u}^1) \prod^J_{i = 1} \left[ \max_{\bm{\phi}^1_j} p_b(\bm{\phi}^1_j \mid \mathbf{D}^{j1}_{N_F}(\mathbf{u}^1))\right] \right\}. \label{eq:opt2} \end{equation} In practice it easiest to solve this in log space. For each value of $\mathbf{u}^1$ entertained by a numerical optimizer, evaluation requires GP fitting for $J=4$ observed discrepancies $\mathbf{D}^{j1}_{N_F}(\mathbf{u}^1_j)$, each with \blu{optimized GP hyperparameter} $\hat{\bm{\phi}}^1_j$ offloaded to a separate, library-facilitated, numerical optimizer. \blu{Each of $J$ inner optimizations over hyperparameter $\bm{\phi}_j$ in $p_b(\bm{\phi}^1_j \mid \mathbf{D}^{j1}_{N_F}(\mathbf{u}^1))$ is wrapped in an outer optimization over the product-form joint marginal likelihood with prior $p(\mathbf{u}^1)$. } To leverage ubiquitous modern multi-core workstation resources, we fit these in parallel. Following \citet{Huang:2018}, we use \blu{{\tt optim} with BFGS in {\sf R} for the inner GP hyperparameter optimization(s) and} \blu{{\tt nloptr} wrapped in a multi-start scheme} for the \blu{outer optimization} to find $\hat{\mathbf{u}}^1$. \paragraph{Bayesian joint inference.} Posterior sampling $\mathbf{u}^1$ may follow similar \blu{principles.} With the $j^\mathrm{th}$ output in first-PC representation, i.e., $\mathbf{y}^{F1}_j$ and $\mathbf{y}^{M1}_j$ via Eq.~(\ref{eq:pca}), a joint posterior via conditional independence follows from a likelihood in product form: \begin{equation} p(\mathbf{u}^1 \mid \mathbf{y}^{F1}_1, \mathbf{y}^{M1}_1, \dots, \mathbf{y}^{F1}_J, \mathbf{y}^{M1}_J ) \propto p(\mathbf{u}^1) \cdot \prod^J_{i = 1} p( \mathbf{y}^{F1}_j, \mathbf{y}^{M1}_j \mid \mathbf{u}^1). \label{eq:lik} \end{equation} \blu{To sample from this posterior, a Metropolis-within-Gibbs scheme can be easily coded up, with each Gibbs step taking a marginal Gaussian random walk on $\mathbf{u}^1$.} Under the multivariate OSSs emulation structure, Metropolis rejection for $\mathbf{u}^1$ can be broken down to evaluation of each (log) MVN-likelihood , $p( \mathbf{y}^{F1}_j, \mathbf{y}^{M1}_j \mid \mathbf{u}^1)$, for $j= 1, \dots J$. \blu{We see potential for each to be evaluated} in parallel and then put together into the joint (log) likelihood, \blu{Still, our serial implementation (with vectored linear algebra subroutines) was fast enough to furnish thousands of samples/hour.} A modular optimization solution (\ref{eq:opt2}) was helpful in providing a warm-start to minimize burn-in efforts. \subsection{Joint results} \label{sec:baysresults} Here we present views into our fully integrated calibration results. Our modular optimization(s) utilized a 500-random-multi-start BFGS implemented in parallel. Although more time is required for jointly optimizing four separate sub-objectives at each iteration, the overall waiting time for joint optimization turns out to be comparable to univariate and PC-based variations. A post-optimization analysis summarizes the median number of {\tt nloptr} iterations until convergence for each methods from 500 random initialization: 335 for PC-$K_d$, 114 for PC-$k_c$, 552 for PC-$C_d$, 430 for PC-$c_c$, and 117 for the unified approach. Speed of the unified approach may owe to the flatter/smoother surface, shown momentarily. For full Bayes via MCMC, the running time is also comparable to its univariate and PC counterparts, thanks to the multiple, and highly parallelizable, and sparse OSS covariance. Figure \ref{fig:comb} provides a 2d look via full posterior (bottom-left triangle) and multi-start optimization (top-right) and univariate marginals (diagonal). This may be contrasted with any of the PC-level analogues in Supplement \ref{sec:pcap} to reveal how information is synthesized across outputs in this joint analysis. \begin{figure}[ht!] \centering \centering \includegraphics[width=1\linewidth, trim= 0 0 20 80,clip=TRUE]{combined} \includegraphics[width=1\linewidth, trim=50 40 16 55,clip=TRUE]{heatcolorscale} \caption{Fully Bayesian (lower and diagonal) and modular optimization (upper) calibration results for $\mathbf{u}^1$ from the unified approach combining all 16 outputs. Heat color are derived from rank of (joint) log posterior probability of these parameter values. Bayesian results are from 100,000 MCMC samples after burn-in. Optimization results are from 500 converged optimization under random initialization. ``+" signs indicate the MAP values.} \label{fig:comb}% \end{figure} While the optimized solutions demonstrate how local dynamics challenge optimization, its MAP estimation and high density regions are comparable to the fully Bayesian ones. From both approaches, two dependent structures between pairs of parameter $(u_1, u_2)$ and $(u_3, u_4)$ can be observed, displaying an interesting relationship between the friction coefficients $(u_1, u_2)$ and friction exponents $(u_3, u_4)$. Our BHGE colleagues concluded that this pattern may be explained by an underlying turbulent-lubrication friction factor model from bulk-flow theory \citep{Hirs:1973}. Figure \ref{fig:pcandall} is lower resolution, providing a 1d look, but allows a more visually immediate comparison. \begin{figure}[ht!] \centering \includegraphics[width=1\linewidth, trim=0 20 0 0,clip=TRUE]{pc_vs_all} \caption{Side-by-side comparison between marginal posterior distributions of $\mathbf{u}^1_j, j = 1, \dots, 4$ from outputs $K_d, k_c, C_d$, and $c_c$ (labeled ``PC") and $\mathbf{u}^1$ via unified output (labeled ``All"). Dots indicate MAP values and error bars form 90\% credible intervals. } \label{fig:pcandall} \end{figure} Here we see how each output property contributes to the final global solution $\mathbf{u}^1$, with the output property. Apparently, cross stiffness ($k_c$) is the most influential. Notice the similarity between the $\mathbf{u}^1$ posterior distribution with $k_c$ PC posterior $\mathbf{u}^1_2$ in each of the $u$ dimensions, especially in $u_1$. Further comparisons in 2d, via both the $\mathbf{u}^1$ marginals in Figure \ref{fig:comb} and PC-calibrated $\mathbf{u}^1_2$ marginals shown in Figure \ref{fig:pca_1} from Supplement \ref{sec:pcap}, demonstrate dependence in parameters $(u_1, u_2)$. When comparing the landscape of solutions in upper triangle of Figure \ref{fig:comb} with those from Figures \ref{fig:pca_1}--\ref{fig:pca_2} one immediately appreciates the benefit of a smoother posterior surface via full integration. Notice in those figures that the optimized solutions from outputs $K_d$, $k_c$ and $C_d$ are quite concentrated in very small (dense red) peaks, in contrast to the much flatter surfaces from $c_c$ [Figure \ref{fig:pca_1}] and the fully integrated [Figure \ref{fig:comb}]. The full Bayes (lower and diagonal) marginals further exhibit this pattern: fully integrated and PC-$c_c$ exhibit flatter posteriors, with two ridges in $(u_1, u_2)$ and $(u_3, u_4)$; the other three PC-based results produce much narrower peaked posterior surfaces, specially in dimension of $(u_1, u_2)$. \blu{Interestingly, different outputs could contribute unequally to the fully integrated result [Figure \ref{fig:comb}], possibly due to different amount of signal versus noise for the output relative to the rest. In the fully integrated honeycomb analysis, output $k_c$ seems to pull the parameter posterior towards itself more than the other three outputs, especially in the $(u_1, u_2)$ subspace (See Figure \ref{fig:pcandall}). Finally, we performed a limited sensitivity analysis, entertaining uniform rather than Beta$(2,2)$ priors on $\mathbf{u}^1$. Like in \citeauthor{Huang:2018}'s scalar analysis, the qualitative results were similar, with heaver concentration of posterior mass near the boundaries of the study region.} \section{Prediction} \label{sec:mpred} \blu{ Inference for the calibration parameter is often the primary interest in a KOH-style calibration exercise. Integrating over the posterior predictive distribution, say at $\mathbf{x}_{\mathrm{(new)}}$, alongside $\mathbf{u}$ involves many of the same steps as above, except now with a MVN conditional distribution that has three components: field observed, simulated, and field unobserved (at $\mathbf{x}_{\mathrm{(new)}}$). In the univariate case and with ordinary, non-OSS surrogates, this is described in textbooks \citep[e.g.,][Chapter 8.1.5, and Exercises 8.3 \#2 \& 4]{gramacy2020surrogates}. With OSSs for univariate output, \citet{Huang:2018} demonstrate how the same conditioning is computationally tractable even for larger $N_M$ by extending the block diagonal structure to the three-component predictive setup. An updated variation on those equations are provided here as Eq.~(\ref{eq:vnew}) momentarily, extending that setup to the multi-output setting. This is accompanied by predictive results and comparisons for the honeycomb, beginning in the basis space of the first-PC (i.e., via $\mathbf{u}^1$), and then back in original output spaces. } \blu{ \subsection{In-basis} \label{sec:pcpred} } \blu{ Consider first-PC field outputs $\hat{\mathbf{y}}^{M1}_j( \mathbf{x}_{\mathrm{(new)}}, \cdot ) + \hat{\mathbf{b}}^1_j(\mathbf{x}_{\mathrm{(new)}})$ for each of $j=1,\dots, J$, where $J=4$ for the honeycomb. This involves a direct, four-fold independent application of \citet{Huang:2018}. No new methodology is being developed here, however we find this a useful warm-up, thinking ahead to the building blocks required for a full multi-output setting next in Section \ref{sec:orpred} via Eq.~(\ref{eq:lik}). It also enlightening to compare these predictions to the simulation-only ones $\hat{\mathbf{y}}^{M1}_j( \mathbf{x}_{\mathrm{(new)}}, \cdot)$ as a lens into the nature of the bias $\hat{\mathbf{b}}^1_j(\mathbf{x}_{\mathrm{(new)}})$. } \blu{ For example, Figure \ref{fig:pred_bias} shows leave-one-out cross-validated (LOO-CV) predictions for field outputs, $\hat{\mathbf{y}}^{F1}_j( \mathbf{X}^F_{N_F})$, for $K_d$ and $k_c$ ($j=\{1,2\}$), pitting prediction without bias correction $\hat{\mathbf{y}}^{M1}_j( \mathbf{X}^F_{N_F}, \hat{\mathbf{u}}^1 )$ against bias corrected $\hat{\mathbf{y}}^{M1}_j( \mathbf{X}^F_{N_F}, \hat{\mathbf{u}}^1 ) + \hat{\mathbf{b}}^1_j(\mathbf{X}^F_{N_F})$. Here, $\hat{\mathbf{u}}^1$ comes via samples from fully integrated posterior (\ref{eq:lik}). Similar views for the other two outputs may be found in Figure \ref{fig:pred_bias2} of Supplement \ref{sec:pred}. } \begin{figure}[ht!] \centering \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{pred_kd} \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{pred_kc} \caption{LOO-CV first-PC posterior predictive summaries for direct stiffness $K_d$ and cross stiffness $k_c$. Intervals trace out 95\%; black line has intercept zero, slope one.} \label{fig:pred_bias} \end{figure} \blu{ Observe in both cases that predictions without bias correction (blue) exhibit noticeable deviation from the calibration (diagonal) line with relatively large credible intervals, suggesting that a considerable portion of dynamics in the field are not fully captured by simulation. Bias correction (red) is essential, not only for accuracy but also for confidence: both have appropriate coverage, but one has much smaller intervals. Yet these two outputs also exemplify a dual role played by the bias correction term $\hat{\mathbf{b}}^1_j(\mathbf{X}^F_{N_F})$. For $K_d$ (left), discrepancies generally widen as the observed output $K_d$ increases. For $k_c$ (right), the observed model discrepancy demonstrates a more complicated pattern, with relatively larger discrepancies at the two extremes. } \blu{ \subsection{Original outputs} \label{sec:orpred} } \blu{ Poster predictive sampling of the original $J \times K$ outputs involves re-centering, re-scaling, and back-rotation from all the PC bases. Once $\mathbf{u}^1$ values are sampled from Eq.~(\ref{eq:lik}), we can feed these posteriors into multiple $J \times K$ OSSs $\hat{\mathbf{y}}_j^{Mk} (\mathbf{x}, \mathbf{u}^{1})$ with their own bias corrections: \begin{equation} \mathbf{y}_j^{Fk}(\mathbf{x}) = \mathbf{y}_j^{Mk} (\mathbf{x}, \mathbf{u}^{1}) + \mathbf{b}^k_j(\mathbf{x}) + \mathbf{\epsilon}^k_j, \quad j = 1, \dots, J, \; k = 1, \dots K. \label{eq:pckoh2} \end{equation} Fitted discrepancies $\hat{\mathbf{b}}^k_j(\mathbf{x}) $ are required to recover the complete multiple-output prediction on original outputs, while only the lower-dimensional first-PC representations are needed for parameter learning and sampling at the PC (\ref{eq:pckoh}) and fully integrated levels (\ref{eq:lik}). } \blu{ For the $j^{\mathrm{th}}$ output property, raw predictions at new site location $\mathbf{x}_{\mathrm{(new)}}$ are made by separately applying the OSSs kriging equations from \cite{Huang:2018} $K=4$ times, but now in their PC represented spaces, where their raw predictive mean and variances $(\mu^k_{j\mathrm{(new)}}, \Sigma^k_{j\mathrm{(new)}} )$ in the basis spaces are still analytically tractable, \begin{align} \mu^k_{j\mathrm{(new)}} &= \Sigma^{jk}_{N_F', N_F} \mathbf{C}^{-1}(\mathbf{u}^1)[ \mathbf{y}^{Fk}_j - \Sigma^{jk}_{N_F, N_M}(\mathbf{u}^1) (\Sigma^{jk}_{N_M}) ^{-1} \mathbf{y}^{Mk}_j] \nonumber \\ & \quad\quad\quad + \Sigma^{jk\mathrm{(new)}}_{N_F, N_M}(\mathbf{u}^1) (\Sigma^{jk\mathrm{(new)}}_{N_M}) ^{-1} \mathbf{y}^{Mk}_{j\mathrm{(new)}}, \nonumber \\ \Sigma^k_{j\mathrm{(new)}} &= \label{eq:vnew} \Sigma^{jk\mathrm{(new)}}_{N_F'}(\mathbf{u}^1) + \Sigma^{jk\mathrm{(new)}}_{b} - \Sigma^{jk}_{N_F', N_F} \mathbf{C}^{-1}(\mathbf{u}^1) (\Sigma^{jk}_{N_F', N_F} )^\top \\ &\quad\quad\quad -\Sigma^{jk\mathrm{(new)}}_{N_F', N_M'}(\mathbf{u}^1) (\Sigma^{jk\mathrm{(new)}}_{N_M'})^{-1} [\Sigma^{jk\mathrm{(new)}}_{N_F', N_M'}(\mathbf{u}^1) ]^\top. \nonumber \end{align} Details on each new notational element are provided in Supplement \ref{sec:pred}. } \blu{ Seeking insights in an out-of-sample setting, we performed LOO-CV for the honeycomb like in Section \ref{sec:pcpred}, but this time in the original output space. Using Eq.~(\ref{eq:vnew}) we first obtain the full-rank raw predictions in the PC spaces, $$(\mathbf{y}^{Fk}_{j} \mid \mathbf{y}^{Mk}_{-i}, \mathbf{y}^{Fk}_{-i}, \mathbf{y}^{Mk}_{i}, \bm{\Phi}, \mathbf{u}^1) \sim \mathcal{N}_{i}(\mu^k_{ji}, \Sigma^k_{ji} ), \quad i =1, \dots, N_F, \; j = 1, \dots, J, \; k =1, \dots, K.$$ Once $K$ raw predictions for the $j^{\mathrm{th}}$ output property $\mathbf{Y}_j^{FK} = (\mathbf{y}_j^{F1}, \dots, \mathbf{y}_j^{Fk})$ in basis spaces are sampled, they may be rotated back at once using the full matrix $\mathbf{W}_j$ of the centered and scaled eigenvectors through $\mathbf{W}^{-1}_j\mathbf{Y}_j^{FK} = \mathbf{Y}_j^F, j=1, \dots, J$, all at once. After this fashion, predictions thus obtained synthesize more information than a univariate analysis could. Since a common $\mathbf{u}^1$ sample is used for all $k$, predictive uncertainty may be dramatically reduced. We take ten thousand MCMC posterior samples $\mathbf{u}^1$ and calculate predictive moments using the laws of total expectation and variance: \begin{align*} \mathbb{E}(\mathbf{y}^{Fk}_j \mid \cdot ) = \mathbb{E}[\mathbb{E}(\mathbf{y}^{Fk}_j \mid \cdot, \mathbf{u}^1 )], \quad \mbox{and} \quad \mathbb{V}(\mathbf{y}^F_{\text{(new)}} \mid \cdot ) = \mathbb{V}[\mathbb{E}(\mathbf{y}^{Fk}_j \mid \cdot, \mathbf{u}^1)]. + \mathbb{E}[\mathbb{V}(\mathbf{y}^{Fk}_j \mid \cdot, \mathbf{u}^1)]. \end{align*} These quantities then approximate the full posterior (out-of-sample) predictive distribution at any $\mathbf{x}_{\mathrm{(new)}}$: $(\mathbf{y}^{Fk}_{j} \mid \mathbf{y}^{Mk}_{-i}, \mathbf{y}^{Fk}_{-i}, \mathbf{y}^{Mk}_{i}, \bm{\Phi})$, $\forall i,j,k$. } \blu{ Table \ref{tab:pred} summarizes the 16 LOO-CV RMSEs recovered back in the original $J=4$ output properties at $K=4$ frequencies. For example, these fully Bayesian LOO-CV predictions for direct stiffness $K_d$ at 28 Hz across $N_F=292$ sites have RMSE of 1.460. This bests its univariate, separately calibrated counterpart in \citeauthor{Huang:2018} (Table 1) with RMSE of 1.957. Although not surprising, we speculate this is likely due to better parameter identification and discrepancy learning through synthesis of a larger corpus of training data. } \begin{table}[ht!] \centering \begin{tabular}{r | r | r | r | r } Output & 28 Hz & 70 Hz & 126 Hz & 154 Hz \\ \hline $K_d$ & 1.460 & 1.407 & 1.313 & 1.442 \\ $k_c$ & 0.869 & 0.901 & 0.880 & 1.120\\ $C_d$ & 2.994 & 3.049 & 2.742 & 2.762 \\ $c_c$ & 3.867 & 2.492 & 2.028 & 1.535 \\ \end{tabular} \caption{LOO-CV RMSEs using multiple-output approach. Rounded at 3 digits. } \label{tab:pred} \end{table} \blu{ For another view of LOO-CV performance in original outputs, we plotted posterior predictive mean and 95\% intervals over each observed field output. Figure \ref{fig:pred_ori} provides these for direct stiffness $K_d$ with $K=4$ frequencies; results for the other three outputs are provided by Figures \ref{fig:pred_ori2}--\ref{fig:pred_ori4} in Supplement \ref{sec:pred}. Comparing $K_d$ at 28 Hz to its univariate counterpart in Figure 11 of \cite{Huang:2018}, this multiple-output approach yields more closely aligned out-of-sample predictions, with fewer and less substantial deviations from the diagonal (black) line. Moreover, there are no obvious sites with larger level of predictive uncertainty (wider error-bars). Notice in Figure \ref{fig:pred_ori} that there are only a few sites which are mis-predicted and that the error bars are more uniform than those in \citeauthor{Huang:2018} version. Across frequency, predictive uncertainty (error-bar width) decreases as the frequency level increases. The first three outputs $K_d$, $k_c$, and $C_d$ exhibit stable predictive performance. The last output, $c_c$, reflects high measurement error. Refer Figure \ref{fig:pred_ori4} to Figure \ref{fig:pred_bias2}.} \begin{figure}[ht!] \centering \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{kd_1} \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{kd_2} \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{kd_3} \includegraphics[width=.49\linewidth, trim=0 0 0 0,clip=TRUE]{kd_4} \caption{Posterior predictive comparison for direct stiffness $K_d$ at 28, 70, 126, and 154 Hz. Red dots are the predicted mean and black bars are 95 \% credible intervals.} \label{fig:pred_ori} \end{figure} \section{Discussion} \label{sec:discuss} Motivated by a large-scale industrial multivariate calibration problem studding (honeycomb) seal flow dynamics from oil and gas research, we developed a new multivariate calibration method by extending a few features of a parsimonious univariate strategy based on on-site surrogates \citep[OSSs;][]{Huang:2018}. Tailored to effectively capture unique simulation features including high-dimensional input space, local nonstationary, missing simulations, and model fidelity at scale, this method is practical, but it is not without drawbacks. Univariate output applications, when replicated across diverse outputs are not immune to a data-poor Bayesian learning pitfalls for both high-dimensional calibration parameter and ill-posed model discrepancy. \blu{Therefore, in lieu of imposing elaborate cross-site correlation structures, we opted for a simpler approach.} Our solution, toward a multivariate OSS-based calibration, gathered together responses to several features of the honeycomb. \blu{Although this multivariate setup might not be ideal for all applications, especially when the ground truth posterior distribution of parameter $\mathbf{u}$ differ strikingly across different outputs, see e.g. \cite{box}, the focus here is to leverage all available information through one coherent, and tractable modeling framework.} A careful exploratory data analysis (EDA) suggested that a principal component-style basis representation could be effective for handing honeycombs quadruplet of highly linearly dependent output frequencies. We then update the OSS framework for both modular/optimized and fully Bayesian implementations PC-based calibration within the Kennedy \& O'Hagan framework. Our empirical results indicated improved parameter identification and posterior concentration for calibration parameters, compared to the (separate) univariate analog. Then, we designed an independent multi-output PC approach to gather these models across output property. This was motivated by a lack of linear correlation observed in our EDA. The result is a unified analysis for the honeycomb, synthesizing millions of simulation runs in a matter of hours. It might be possible to entertain nonlinear dependency among output properties, as suggested by the differential equation (\ref{eq:trans}) known to govern relationships across outputs of the types studied in honeycomb. On the other hand though, our visual inspections (via EDA) did not reveal any notable patters. So at this time, the merits of such an approach are speculative at best, although extension is certainly possible. \blu{Given posterior draws of the calibration parameter, analytically tractable prediction is within reach. We evaluated empirical out-of-sample performance in PC-basis spaces and back on original outputs. This analysis reinforces the existence of nontrivial model discrepancies for all outputs as well as the integral role of KOH bias correction. Compared to its univariate, separately calibrated counterpart, this unified multiple-output solution enjoys improved out-of-sample accuracy. } \if00{ \subsubsection*{Acknowledgments} Authors JH and RBG are grateful for support from National Science Foundation grants DMS-1521702 and DMS-1821258. JH and RBG also gratefully acknowledge funding from a DOE LAB 17-1697 via subaward from Argonne National Laboratory for SciDAC/DOE Office of Science ASCR and High Energy Physics. We thank Andrea Panizza (BHGE) for early work on this project, and for initiating the line of research, Mirko Libraschi (BHGE) for many interesting discussions, and \blu{thoughtful comments from an AE and two referees.} \fi
1,116,691,501,343
arxiv
\section{INTRODUCTION} Recently, observations and theoretical considerations have linked long-duration GRBs with ultra-bright Type Ib/c supernovae (SNe; Galama et al. 1998, 2000; Bloom et al. 1999). The first candidate was provided by SN 1998bw and GRB 980425, and the recent HETE-II burst GRB 030329 has greatly enhanced the confidence in this association (Stanek et al. 2003; Hjorth et al. 2003). Extremely high energy released in very short time scale suggests that GRBs involve the formation of a black hole (BH) via a catastrophic stellar collapse event or possibly a neutron star merger, implying that an inner engine could be built on an accreting BH. Among a variety of mechanisms of powering GRBs the Blandford-Znajek (BZ) process (Blandford {\&} Znajek, 1977) has its unique advantage in providing ``clean'' (free of baryonic contamination) energy by extracting rotating energy from a BH and transferring it in the form of Poynting flow in the outgoing energy flux (Lee et al. 2000, hereafter Lee00; Li 2000c, hereafter Li00). Not long ago Brown et al. (2000, hereafter B00) worked out a specific scenario for a GRB-SN connection. They argued that the GRB is powered by the BZ process, and the SN is powered by the magnetic coupling (MC) process, which is regarded as one of the variants of the BZ process (Blandford 1999; van Putten 1999; Li 2000b, 2002; Wang, Xiao {\&} Lei 2002, hereafter W02). It is shown in B00 that about $10^{53}ergs$ are available to power both a GRB and a SN. However, they failed to distinguish the fractions of the energy for these two objects. More recently, van Putten and his collaborators (van Putten 2001; van Putten {\&} Levinson 2003, hereafter P03) worked out a poloidal topology for the open and closed magnetic field lines, in which the separatrix on the horizon is defined by a finite half-opening angle. The duration of a GRB is set by the lifetime of the rapid spin of the BH. It is found that GRBs and SNe are powered by a small fraction of the BH spin energy. This result is consistent with observations, i.e., duration of GRBs of tens of seconds, true GRB energies distributed around $5\times 10^{50}ergs$ (Frail et al. 2001), and aspherical SNe kinetic energies of $2\times 10^{51}ergs$ (Hoflich et al. 1999). Very recently, Lei et al. (2005, hereafter Lei05) proposed a scenario for GRBs in type Ib/c SNe invoking the coexistence of the BZ and MC processes. In Lei05 the GRB is powered by the BZ process, and the associated SN is powered by the MC process. The overall time scale of the GRB is fitted by the duration of the open magnetic flux on the horizon. Besides the features of high energy released in very short durations most GRBs are highly variable, showing very rapid variations in flux on a time scale much shorter than the overall duration of the burst. Variability on a time scale of milliseconds has been observed in some long bursts (Norris et al. 1996; McBreen \textit{et al.}, 2001; Nakar and Piran, 2002). Unfortunately, the origin of the variations in the fluxes of GRBs remains unclear. In this paper we intend to discuss the mechanism for producing the variations in the fluxes of GRBs by virtue of the screw instability in BH magnetosphere. It is well known that the magnetic field configurations with both poloidal and toroidal components can be screw-unstable. According to the Kruskal-Shafranov criterion the screw instability will occur, if the toroidal magnetic field becomes so strong that the magnetic field line turns around itself once or more (Kadomtsev 1966; Bateman 1978). Some authors have addressed the screw instabilities in BH magnetosphere. Gruzinov (1999) argued that the magnetic field with a bunch of closed field lines connecting a Kerr BH with a disk can be screw-unstable, resulting in the release of magnetic energy with the flares at the disk. Li (2000a) discussed the screw instability of the magnetic field in the BZ process, leading to a stringent upper bound to the BZ power. Wang et al. (2004, hereafter W04) studied the screw instability in the MC process. They concluded that this instability could occur at some place away from the inner edge of the disk, provided that the BH spin $a_ * $ and the power-law index $n$ for the variation of the magnetic field on the disk are greater than some critical values. In this paper we attempt to combine the screw instability of the magnetic field with the coexistence of the BZ and MC processes. To facilitate the description henceforth we refer to the screw instability of the magnetic field occurring in the BZ and MC processes as SIBZ and SIMC, respectively. It is shown that both SIBZ and SIMC can occur, provided that the following parameters are greater than some critical values: (\ref{eq1}) the BH spin, (\ref{eq2}) the power-law index describing the variation of the magnetic field at the disk, and (\ref{eq3}) the vertical height of the astrophysical load above the equatorial plane of the Kerr BH. The features of several GRB-SNe are well fitted in our model. (\ref{eq1}) The overall duration of the GRBs is fitted by the evolution of the half-opening angles. (\ref{eq2}) The true energies of several GRBs are fitted by the energy extracted in the BZ process, and the energies of associated SNe are fitted by the energy transferred in the MC process. (\ref{eq3}) The variability time scales of tens of msec in the light curves of several GRBs are fitted by two successive flares due to SIBZ. This paper is organized as follows. In $\S$ 2 we derived a criterion of SIBZ based on the Kruskal-Shafranov criterion and some simplified assumptions on the remote load. In $\S$ 3 we discuss the time scale and energy extraction from a Kerr BH in the context of the suspended accretion state. In $\S$ 4 we propose a scenario for the origin of the variation in the light curves of GRBs based on the flares arising from SIBZ. Finally, in $\S$ 5, we summarize the main results and discuss some issues related to our model. Throughout this paper the geometric units $G = c = 1$ are used. \section{SCREW INSTABILITY IN BH MAGNETOSPHERE} In W04 the criterion of SIMC is derived based on the following points: (\ref{eq1}) the Kruskal-Shafranov criterion, (\ref{eq2}) the mapping relation between the angular coordinate on the BH horizon and the radial coordinate on the disk, and (\ref{eq3}) the calculations of the poloidal and toroidal components of the magnetic field at the disk. The criterion of SIBZ can be derived in an analogous way. However, the BZ process involves unknown astrophysical loads, to which both the mapping relation and the calculations for the poloidal and toroidal components of the magnetic field are related. In order to work out an analytical model we present some simplified assumptions as follows. (\ref{eq1}) The magnetosphere anchored in a Kerr BH and its surrounding disk is described in Boyer-Lindquist coordinates, in which the following Kerr metric parameters are involved (MacDonald and Thorne 1982, hereafter MT82). \begin{equation} \label{eq1} \left\{ {\begin{array}{l} \Sigma ^2 = \left( {r^2 + a^2} \right)^2 - a^2\Delta \sin ^2\theta ,\mbox{ }\rho ^2 = r^2 + a^2\cos ^2\theta ,\mbox{ } \\ \Delta = r^2 + a^2 - 2Mr,\mbox{ }\varpi = \left( {\Sigma \mathord{\left/ {\vphantom {\Sigma \rho }} \right. \kern-\nulldelimiterspace} \rho } \right)\sin \theta , \\ \alpha = {\rho \sqrt \Delta } \mathord{\left/ {\vphantom {{\rho \sqrt \Delta } \Sigma }} \right. \kern-\nulldelimiterspace} \Sigma . \\ \end{array}} \right. \end{equation} (\ref{eq2}) The remote load is axisymmetric, being located evenly in a plane with some height above the disk. In Figure 1 the open magnetic field lines connect the BH horizon with the load. The symbol $L_{BZ} $ and $H_c $ represent the critical field line and the height of the remote load above the equatorial plane for the occurrence of SIBZ, respectively. (\ref{eq3}) In Figure 1 the radius $r _{_S} $ is the critical radius of SIMC, which is determined by the criterion of the screw instability given in W04, \begin{equation} \label{eq2} {\left( {{2\pi \varpi _{_D} } \mathord{\left/ {\vphantom {{2\pi \varpi _{_D} } {L_{MC} }}} \right. \kern-\nulldelimiterspace} {L_{MC} }} \right)B_D^p } \mathord{\left/ {\vphantom {{\left( {{2\pi \varpi _{_D} } \mathord{\left/ {\vphantom {{2\pi \varpi _{_D} } {L_{MC} }}} \right. \kern-\nulldelimiterspace} {L_{MC} }} \right)B_D^p } {B_D^T }}} \right. \kern-\nulldelimiterspace} {B_D^T } < 1. \end{equation} \noindent In equation (\ref{eq2}) $L_{MC} $ is the critical length of the poloidal field line for SIMC, and $B_D^p $ and $B_D^T $ are the poloidal and toroidal components of the magnetic field on the disk, respectively, and $\varpi _{_D} $ is the cylindrical radius on the disk and it reads \begin{equation} \label{eq3} \varpi _{_D} = \Sigma_{D} / \rho _{_D} = \xi M\chi _{ms}^2 \sqrt {1 + a_ * ^2 \xi ^{ - 2}\chi _{ms}^{ - 4} + 2a_ * ^2 \xi ^{ - 3}\chi _{ms}^{ - 6} } . \end{equation} \noindent where $\chi _{ms} \equiv \sqrt {{r_{ms} } \mathord{\left/ {\vphantom {{r_{ms} } M}} \right. \kern-\nulldelimiterspace} M} $ is defined by Novikov {\&} Thorne (1973) in terms of the radius of innermost stable circular orbit (ISCO). (\ref{eq4}) The angle $\theta _S $ in Figure 1 is the half-opening angle of the magnetic flux tube on the horizon, which is related by the mapping relation between the angular coordinate on the BH horizon and the radial coordinate on the disk as follows (Wang et al., 2003, hereafter W03), \begin{equation} \label{eq4} \cos \theta - \cos \theta _L = \int_1^\xi {\mbox{G}\left( {a_ * ;\xi ,n} \right)d\xi } , \end{equation} \noindent where \begin{equation} \label{eq5} \mbox{G}\left( {a_ * ;\xi ,n} \right) = \frac{\xi ^{1 - n}\chi _{ms}^2 \sqrt {1 + a_ * ^2 \chi _{ms}^{ - 4} \xi ^{ - 2} + 2a_ * ^2 \chi _{ms}^{ - 6} \xi ^{ - 3}} }{2\sqrt {\left( {1 + a_ * ^2 \chi _{ms}^{ - 4} + 2a_ * ^2 \chi _{ms}^{ - 6} } \right)\left( {1 - 2\chi _{ms}^{ - 2} \xi ^{ - 1} + a_ * ^2 \chi _{ms}^{ - 4} \xi ^{ - 2}} \right)} }. \end{equation} \noindent In equations (\ref{eq4}) and (\ref{eq5}) $\xi \equiv r \mathord{\left/ {\vphantom {r {r_{ms} }}} \right. \kern-\nulldelimiterspace} {r_{ms} }$ is defined as a radial parameter in terms of $r_{ms} $, and $n$ is a power-law index for the variation of the poloidal magnetic field at the disk, i.e., \begin{equation} \label{eq6} B_D^p \propto \xi ^{ - n}. \end{equation} (\ref{eq5}) The suspended accretion state is assumed due to the transfer of angular momentum from the BH to the disk (van Putten {\&} Ostriker, 2001). Analogous to equation (\ref{eq2}) the criterion for SIBZ can be expressed as \begin{equation} \label{eq7} {\left( {{2\pi R} \mathord{\left/ {\vphantom {{2\pi R} {L_{BZ} }}} \right. \kern-\nulldelimiterspace} {L_{BZ} }} \right)B_L^p } \mathord{\left/ {\vphantom {{\left( {{2\pi R} \mathord{\left/ {\vphantom {{2\pi R} {L_{BZ} }}} \right. \kern-\nulldelimiterspace} {L_{BZ} }} \right)B_L^p } {B_L^T }}} \right. \kern-\nulldelimiterspace} {B_L^T } < 1, \end{equation} \noindent where $L_{BZ} $ is the critical length of the poloidal field line for SIBZ, and $B_L^p $ and $B_L^T $ are the poloidal and toroidal components of the magnetic field on the remote load, respectively, and $R$ is the cylindrical radius of the remote load with respect to the symmetric axis of the BH. The toroidal field component $B_L^T $ can be expressed by Ampere's law, \begin{equation} \label{eq8} B_L^T = {2I_L } \mathord{\left/ {\vphantom {{2I_L } R}} \right. \kern-\nulldelimiterspace} R, \end{equation} \noindent where $I_L $ is the electric current flowing in the loop $KM{M}'{K}'$ in Figure 1 and it reads \begin{equation} \label{eq9} I_L = \sqrt {{P_{BZ} } \mathord{\left/ {\vphantom {{P_{BZ} } {Z_L }}} \right. \kern-\nulldelimiterspace} {Z_L }} . \end{equation} The quantities $P_{BZ} $ and $Z_L $ in equation (\ref{eq9}) are the BZ power and the load resistance, respectively. The BZ power has been derived in W02 as follows, \begin{equation} \label{eq10} {P_{BZ} } \mathord{\left/ {\vphantom {{P_{BZ} } {P_0 }}} \right. \kern-\nulldelimiterspace} {P_0 } = 2a_ * ^2 \int_0^{\theta _S } {\frac{k\left( {1 - k} \right)\sin ^3\theta d\theta }{2 - \left( {1 - q} \right)\sin ^2\theta }} , \end{equation} \noindent where $q \equiv \sqrt {1 - a_ * ^2 } $ is a parameter depending on the BH spin, and $k \equiv {\Omega _F } \mathord{\left/ {\vphantom {{\Omega _F } {\Omega _{_H} }}} \right. \kern-\nulldelimiterspace} {\Omega _{_H} }$ is the ratio of the angular velocity of the field lines to that of the BH horizon. The quantity $P_0 $ is defined by \begin{equation} \label{eq11} P_0 \equiv \left( {B_H^p } \right)^2M^2 \approx 6.59\times 10^{50}\times B_{15}^2 \left( {M \mathord{\left/ {\vphantom {M {M_ \odot }}} \right. \kern-\nulldelimiterspace} {M_ \odot }} \right)^2erg \cdot s^{ - 1}, \end{equation} \noindent where $B_{15} $ represents the magnetic field at the BH horizon in terms of $10^{15}gauss$. MT82 argued in a speculative way that the ratio $k$ will be regulated to about 0.5 by the BZ process itself, which corresponds to the optimal BZ power with the impedance matching. Taking the impedance matching into account, we have the remote load resistance equal to the horizon resistance, and they read \begin{equation} \label{eq12} \Delta Z_L = \Delta Z_H = R_H \frac{\rho _{_H} d\theta }{2\pi \varpi _{_H} }, \end{equation} \noindent where $R_H = 4\pi = 377ohm$ is the surface resistivity of the BH horizon (MT82). Thus we have $Z_L $ and $Z_H $ expressed as \begin{equation} \label{eq13} Z_L = Z_H = \int_0^{\theta _S } {R_H \frac{\rho _{_H} d\theta }{2\pi \varpi _{_H} }} = \int_0^{\theta _S } {\frac{2\rho _{_H} d\theta }{\varpi _{_H} }} \end{equation} Incorporating equations (\ref{eq8})---(\ref{eq13}), we can calculate $B_L^T $ in terms of the cylindrical radius $R$. On the other hand, the poloidal magnetic field $B_L^P $ at the radius $R$ of the load can be determined by the conservation of the magnetic flux, i.e., \begin{equation} \label{eq14} B_H^P 2\pi \varpi _{_H} \rho _{_H} d\theta = B_L^P 2\pi RdR. \end{equation} \noindent From equation (\ref{eq14}) we have \begin{equation} \label{eq15} B_L^P = \frac{B_H^P \varpi _{_H} \rho _{_H} }{R}\frac{d\theta }{dR}. \end{equation} Assuming that the height of the planar load above the equatorial plane of the Kerr BH is$ H$, we have an approximate relation between the angle $\theta $ and the radius $R$ as follows, \begin{equation} \label{eq16} \tan \theta = R \mathord{\left/ {\vphantom {R H}} \right. \kern-\nulldelimiterspace} H. \end{equation} \noindent Substituting equation (\ref{eq16}) into equation (\ref{eq15}), we have \begin{equation} \label{eq17} B_L^P = \frac{B_H^P \varpi _{_H} \rho _{_H} \cos ^2\theta }{HR}. \end{equation} \noindent Incorporating equations (\ref{eq8}) and (\ref{eq17}) with the criterion (\ref{eq7}), we have \begin{equation} \label{eq18} \frac{\pi B_H^P \varpi _{_H} \rho _{_H} \sin \theta _S \cos ^2\theta _S }{H\sqrt {{P_{BZ} } \mathord{\left/ {\vphantom {{P_{BZ} } {Z_L }}} \right. \kern-\nulldelimiterspace} {Z_L }} } < 1, \end{equation} \noindent where the relation $\sin \theta _S = R \mathord{\left/ {\vphantom {R {L_{BZ} }}} \right. \kern-\nulldelimiterspace} {L_{BZ} }$ is used. The criterion (\ref{eq18}) implies that SIBZ will occur, provided that the height of the load is greater than the critical height, i.e., $H > H_c $, and we have \begin{equation} \label{eq19} h_c \equiv {H_c } \mathord{\left/ {\vphantom {{H_c } M}} \right. \kern-\nulldelimiterspace} M = \frac{\pi B_H^P \varpi _{_H} \rho _{_H} \sin \theta _S \cos ^2\theta _S }{M\sqrt {{P_{BZ} } \mathord{\left/ {\vphantom {{P_{BZ} } {Z_L }}} \right. \kern-\nulldelimiterspace} {Z_L }} }. \end{equation} As argued in W04 the angle $\theta _S $ can be determined by the criterion of SIMC with the mapping relation between the BH horizon and the disk, and it is a function of the BH spin $a_ * $ and the power-law index $n$, i.e., $\theta _S \left( {a_ * ,n} \right)$. Inspecting equations (\ref{eq1}), (\ref{eq10}) and (\ref{eq19}), we find that $h_c $ is a dimensionless parameter also depending on the parameters $a_ * $ and $n$, i.e., $h_c = h_c \left( {a_ * ,n} \right)$. By using equations (\ref{eq2}) and (\ref{eq19}) we have the contours of $\theta _S \left( {a_ * ,n} \right)$ and $h_c \left( {a_ * ,n} \right)$ in $a_ * - n$ parameter space as shown in Figure 2. Inspecting Figure 2, we find the following features of the contours: (\ref{eq1}) The values of $\theta _S $ increases and those of $h_c $ decreases with the increasing $n$ for the given BH spin $a_ * $, respectively. (\ref{eq2}) The values of $\theta _S $ increases and those of $h_c $ decreases with the increasing $a_ * $ for the given $n$, respectively. (\ref{eq3}) Both SIMC and SIBZ will occur, provided that the parameters $a_ * $, $n$ and $h_c $ are greater than some critical values. As shown in Figure 2c the shaded region indicates the value ranges of $a_ * $ and $n$ in which both $\theta _S > 0$ and $h_c > 100$ are constrained. Thus the occurrence of SIMC and SIBZ is guaranteed by the value ranges of $a_ * $ and $n$ in the shaded region. \section{TIME SCALE OF A GRB AND ENERGY EXTRACTION FROM A ROTATING BH} There are several scenarios to invoke the BZ process for powering GRBs, and the main differences among these scenarios lie in the environment of a spinning BH and the approaches to the duration of a GRB. These scenarios are outlined as follows. \textbf{Model I}: In Lee00 the energy is extracted magnetically from a rotating BH without disk, and the duration of a GRB is estimated as the time for extracting all rotational energy of the central BH via the BZ process. \textbf{Model II}: It is argued that the energy is extracted magnetically from a rotating BH with a transient disk, and the duration of a GRB is estimated as the time for the disk plunged into the BH (Lee {\&} Kim 2002; Wang et al. 2002). \textbf{Model III}: In Li00 the energy is extracted magnetically from a rotating BH with a stationary torus in the state of suspended accretion, and the duration of a GRB is estimated roughly as the time for extracting all rotational energy of the central BH via the BZ process. \textbf{Model IV}: In B00 the energy is extracted magnetically from a rotating BH with a non-stationary disk in the state of suspended accretion, and the duration of a GRB is determined by the presence of the disk. \textbf{Model V}: In P03 the energy is extracted magnetically from a rotating BH with a torus in the state of suspended accretion, and the duration of a GRB is determined by the instability of the disk. \textbf{Model VI}: In Lei05 the energy is extracted magnetically from a rotating BH with a thin disk in the state of suspended accretion, and the duration of a GRB is determined by the lifetime of the half-opening angle. \textbf{This Model:} It is a modified version of Model VI, in which the effects of SIMC and SIBZ are taken into account. Compared with Model VI some features and advantages are given as follows. (\ref{eq1}) The magnetic field configuration in Model VI is built based on the conservation of the closed magnetic flux connecting the BH with the disk with the precedence over the open magnetic flux, resulting in the closed field lines connecting the half-open angle $\theta _{BZ} $ at horizon with the disk extending to infinity. In this Model, however, the closed field lines are confined by SIMC within a region of a limited radius $r _{_S} $ (about a few Schwarzschild radii as shown in Table 4), which is consistent with the collapsar model for GRBs-SNe. (\ref{eq2}) As argued in the next section, the variability time scales of the light curves of GRBs are modulated by two successive flares due to SIBZ. The main features of the above models for GRBs are summarized in Table 1. In Model VI the duration of a GRB is regarded as the lifetime of the half-opening angle $\theta _{BZ} $, which is based on the evolution of the rotating BH. The same procedure can be applied to this model except that the angle $\theta _{BZ} $ is replaced by $\theta _S $ arising from SIMC. It is found that the characteristics of the BH evolution in this model are almost the same as given in Model VI as shown by $a_ * - n$ parameter spaces in Figure 3. For several GRB-SNe, the observed energy $E_\gamma $ and duration $T_{90} $ can be fitted by adjusting the parameters $n$ and $B_{15} $. The energy $E_{SN} $ can be predicted as shown in Table 2, where the values of $n$, $B_{15} $ and $E_{SN} $ fitted to five GRBs invoking Model VI and this model are shown in the left and right sub-columns, respectively. The fractions of extracting energy from a rotating BH via the BZ and MC processes are defined respectively as $f_{BZ} $ and $f_{MC} $, and they read \begin{equation} \label{eq20} f_{BZ} = \frac{E_{BZ} }{E_{BZ} + E_{MC} }, \quad f_{MC} = \frac{E_{MC} }{E_{BZ} + E_{MC} }, \end{equation} \noindent where $E_{BZ} $ and $E_{MC} $ are the energies extracting in the BZ and MC processes, respectively. \begin{equation} \label{eq21} E_{BZ} = \int_0^{t_{BZ} } {P_{BZ} dt} , \quad E_{MC} = \int_0^{t_{BZ} } {P_{MC} dt} . \end{equation} In equation (\ref{eq21}) $t_{BZ} $ is defined as the lifetime of the angle $\theta _S $, which can be calculated by the same procedure given in Lei05.\textbf{ }The MC power in equation (\ref{eq21}) is expressed as (W04) \begin{equation} \label{eq22} {P_{MC} } \mathord{\left/ {\vphantom {{P_{MC} } {P_0 }}} \right. \kern-\nulldelimiterspace} {P_0 } = 2a_ * ^2 \int_{\theta _S }^{\theta _L } {\frac{\beta \left( {1 - \beta } \right)\sin ^3\theta d\theta }{2 - \left( {1 - q} \right)\sin ^2\theta }} , \end{equation} \noindent where the parameter $\beta \equiv {\Omega _F } \mathord{\left/ {\vphantom {{\Omega _F } {\Omega _{_H} }}} \right. \kern-\nulldelimiterspace} {\Omega _{_H} } = {\Omega _{_D} } \mathord{\left/ {\vphantom {{\Omega _{_D} } {\Omega _{_H} }}} \right. \kern-\nulldelimiterspace} {\Omega _{_H} }$ is the ratio of the angular velocity of the magnetic field lines to that of the BH. In Model VI the BH spin $a_\ast ^{GRB} $ corresponds to the time when the half-opening angle $\theta _{BZ} = 0$, and the BZ power $P_{BZ} = 0$, while $a_\ast ^{SN} $ corresponds to the MC power $P_{MC} = 0$. In this model $a_\ast ^{GRB} $ and $a_\ast ^{SN} $ have the same meanings as given in Model VI except that $\theta _{BZ} $ is replaced by $\theta _S $. As shown in Table 3, the values of $a_\ast ^{GRB} $, $f_{BZ} $ and $f_{MC} $ fitted to five GRBs invoking Model VI and this model are shown in the left and right sub-columns, respectively. Inspecting the data in the left and right sub-columns in Table 2 and Table 3, we find that $f_{BZ} $ and $f_{MC} $ in this model are greater and less than the counterparts in Model VI, respectively. This result arises from the effects of SIMC and SIBZ: the half-opening angle $\theta _S $ in this model is greater than the half-opening angle $\theta _{BZ} $ in Model VI as argued in W04. \section{AN EXPLANATION FOR VARIABILITIES IN GRB LIGHT CURVES} As is well known, the bursts are divided into long and short bursts according to their $T_{90}$. Most GRBs are highly variable, showing 100{\%} variations in flux on a time scale much shorter than the overall duration of the burst. The bursts seem to be composed of individual pulses, with a pulse being the ``building block'' of the overall light curve. The variability time scale $\delta t $is much shorter than the GRBs' duration $T_{90}$, the former is more than a factor of 10$^{4}$ smaller than the latter (Piran, 2004). However, the origin of the variability in the light curves of GRBs remains unclear. In this paper, we combine the variability with the screw instability of the magnetic field in the BH magnetosphere, and suggest that the variability could be fitted by a series of flares arising from SIBZ, which accompanies the release of the energy of the toroidal magnetic field. An equivalent circuit $ML{L}'{M}'$ for SIBZ is shown in Figure 4a, which consisting of two adjacent magnetic surfaces $M{M}'$ and $L{L}'$ connecting the BH horizon and the remote load. An inductor is introduced in the equivalent circuit by considering that the toroidal magnetic field threads the loop $ML{L}'{M}'$, and the inductor is represented by the symbol $\Delta L$ in Figure 4a. The inductance $\Delta L$ in the circuit is defined by \begin{equation} \label{eq23} \Delta L = {\Delta \Psi ^T} \mathord{\left/ {\vphantom {{\Delta \Psi ^T} {I_L }}} \right. \kern-\nulldelimiterspace} {I_L }, \end{equation} \noindent where $I_L $ is given by equation (\ref{eq9}), and $\Delta \Psi ^T$ is the flux of the toroidal magnetic field threading the circuit. The flux $\Delta \Psi ^T$ can be integrated over the loop $ML{L}'{M}'$ as follows, \begin{equation} \label{eq24} \Delta \Psi ^T = \oint_{loop} {B^T\sqrt {g_{rr} g_{\theta \theta } } dr} d\theta , \end{equation} \noindent where the toroidal magnetic field measured by ``zero-angular-momentum observers'' is \begin{equation} \label{eq25} B^T = {2I_L } \mathord{\left/ {\vphantom {{2I_L } {\left( {\alpha \varpi } \right)}}} \right. \kern-\nulldelimiterspace} {\left( {\alpha \varpi } \right)}, \end{equation} \noindent where $\alpha $ is the lapse function defined in equation (\ref{eq1}) (MT82). Since the geometric shapes of the magnetic surfaces are unknown, we assume that the surfaces are formed by rotating the two radial segments $M{M}'$ and $L{L}'$, which span the angle $\Delta \theta $ as shown in Figure 4b. Thus the flux $\Delta \Psi ^T$ can be calculated easily by integrating over the region $ML{L}'{M}'$. Incorporating equations (\ref{eq23})---(\ref{eq25}), we obtain $\Delta L$as follows. \begin{equation} \label{eq26} \Delta L = 2\csc \theta \Delta \theta \int_{r_H }^{L_{BZ} } {{\rho ^2dr} \mathord{\left/ {\vphantom {{\rho ^2dr} \Delta }} \right. \kern-\nulldelimiterspace} \Delta } = 2M\Delta \theta \csc \theta _S \int_{\left( {1 + q} \right)}^{{L_{BZ} } \mathord{\left/ {\vphantom {{L_{BZ} } M}} \right. \kern-\nulldelimiterspace} M} {\frac{\left( {\tilde {r}^2 + a_ * ^2 \cos ^2\theta _S } \right)}{\left( {\tilde {r}^2 + a_ * ^2 - 2\tilde {r}} \right)}d\tilde {r}} , \end{equation} \noindent where $\tilde {r}$ is defined as $\tilde {r} \equiv r \mathord{\left/ {\vphantom {r M}} \right. \kern-\nulldelimiterspace} M$. Although the detailed process of SIBZ is still unclear, we suggest that the energy release in one event of SIBZ is roughly divided into two stages: the processes for releasing and retrieving magnetic energy, respectively. The two processes can be simulated as the corresponding processes in the equivalent$ R-L $circuit. The detailed analysis is given as follows. At the first stage the energy of the toroidal magnetic field is released as soon as SIBZ occurs, being dissipated on the load and plasma fluid in the way analogous to a discharging process in an equivalent $R-L$ circuit. The equation governing the discharging process in $R-L$ circuit is \begin{equation} \label{eq27} \Delta L\frac{dI^P}{dt} + \left( {\Delta Z_{PLSM} + \Delta Z_L } \right)I^P = 0, \end{equation} \noindent where $\Delta Z_{PLSM} $ is the resistance of the plasma fluid in the BH magnetosphere. At the second stage the energy of toroidal magnetic field is recovered due to the rotation of the BH, and the process for retrieving magnetic energy is modulated by a charging process in an equivalent $R-L$ circuit. The equation governing the charging process in $R-L$ circuit is \begin{equation} \label{eq28} \Delta L\frac{dI^P}{dt} + \left( {\Delta Z_H + \Delta Z_L } \right)I^p = \Delta \varepsilon _{_H} . \end{equation} In equations (\ref{eq27}) and (\ref{eq28}) $\Delta L$ is the inductance in the circuit $ML{L}'{M}'$, and $\Delta Z_{PLSM} $ is the resistance of the plasma in the BH magnetosphere. Incorporating equations (\ref{eq12}) and (\ref{eq26}), we have \begin{equation} \label{eq29} {\Delta L} \mathord{\left/ {\vphantom {{\Delta L} {\Delta Z_H }}} \right. \kern-\nulldelimiterspace} {\Delta Z_H } = \left( {9.85\times 10^{ - 6}\sec } \right)\frac{\left( {M \mathord{\left/ {\vphantom {M {M_ \odot }}} \right. \kern-\nulldelimiterspace} {M_ \odot }} \right)}{2 - \left( {1 - q} \right)\sin ^2\theta _S }\int_{\left( {1 + q} \right)}^{{L_{BZ} } \mathord{\left/ {\vphantom {{L_{BZ} } M}} \right. \kern-\nulldelimiterspace} M} {\frac{\left( {\tilde {r}^2 + a_ * ^2 \cos ^2\theta _S } \right)}{\left( {\tilde {r}^2 + a_ * ^2 - 2\tilde {r}} \right)}d\tilde {r}} . \end{equation} Combining the initial conditions in the first and second stages, we have the solutions of equations (\ref{eq27}) and (\ref{eq28}) as follows, \begin{equation} \label{eq30} I_{disch}^p = I_{initial}^p e^{ - t \mathord{\left/ {\vphantom {t {\tau _1 }}} \right. \kern-\nulldelimiterspace} {\tau _1 }}, \end{equation} \begin{equation} \label{eq31} I_{ch}^p = I_{steady}^p \left( {1 - e^{ - t \mathord{\left/ {\vphantom {t {\tau _2 }}} \right. \kern-\nulldelimiterspace} {\tau _2 }}} \right). \end{equation} In equations (\ref{eq30}) and (\ref{eq31}) $I_{initial}^p $ and $I_{steady}^p $ are the initial and steady currents, respectively, while $I_{disch}^p $ and $I_{ch}^p $ represent the discharging and charging currents, respectively. The characteristic time scales in equations (\ref{eq30}) and (\ref{eq31}) are given respectively by $\tau _1 $ and $\tau _2 $, and they read \begin{equation} \label{eq32} \tau _1 \equiv {\Delta L} \mathord{\left/ {\vphantom {{\Delta L} {\left( {\Delta Z_{PLSM} + \Delta Z_L } \right)}}} \right. \kern-\nulldelimiterspace} {\left( {\Delta Z_{PLSM} + \Delta Z_L } \right)}, \end{equation} \begin{equation} \label{eq33} \tau _2 \equiv {\Delta L} \mathord{\left/ {\vphantom {{\Delta L} {\left( {\Delta Z_H + \Delta Z_L } \right)}}} \right. \kern-\nulldelimiterspace} {\left( {\Delta Z_H + \Delta Z_L } \right)}. \end{equation} \noindent From equations (\ref{eq32}) and (\ref{eq33}) we have the ratio of $\tau _1 $ to $\tau _2 $ given by \begin{equation} \label{eq34} {\tau _1 } \mathord{\left/ {\vphantom {{\tau _1 } {\tau _2 }}} \right. \kern-\nulldelimiterspace} {\tau _2 } = {2\Delta Z_H } \mathord{\left/ {\vphantom {{2\Delta Z_H } {\left( {\Delta Z_{PLSM} + \Delta Z_H } \right)}}} \right. \kern-\nulldelimiterspace} {\left( {\Delta Z_{PLSM} + \Delta Z_H } \right)}, \end{equation} \noindent where $\Delta Z_H = \Delta Z_L $ is used in deriving equation (\ref{eq34}). In contrast to disk plasma of perfect conductivity, the resistance $\Delta Z_{PLSM} $ cannot be neglected based on the following considerations: (\ref{eq1}) The plasma fluid becomes very tenuous after leaving the inner edge of the disk, augmenting significantly the resistance due to an increasing radial velocity onto the BH; (\ref{eq2}) The conductivity of the plasma fluid is highly anisotropic, i.e., the conductivity in the cross-field direction is greatly impeded by the presence of the strong magnetic threading the BH (Punsly 2001). Although the value of $\Delta Z_{PLSM} $ is unknown, we can estimate roughly the variability time scales of GRBs by combining equation (\ref{eq34}) with different cases given as follows. \textbf{CASE I: }$\Delta Z_{PLSM} > > \Delta Z_H $ leads to $\tau _1 < < \tau _2 $, and the time scale of two successive flares arising from SIBZ is dominated by $\tau _2 $. \textbf{CASE II: }$\Delta Z_{PLSM} \approx \Delta Z_H $ leads to $\tau _1 \approx \tau _2 $, and the time scale of two successive flares arising from SIBZ is about $2\tau _2 $. \textbf{CASE III: }$\Delta Z_{PLSM} < < \Delta Z_H $ leads to $\tau _1 \approx 2\tau _2 $, and the time scale of two successive flares arising from SIBZ is about $3\tau _2 $. Therefore the variability time scales are insensitive to the values of the unknown $\Delta Z_{PLSM} $. From equation (\ref{eq31}) we obtain that the charging current attains 99.3{\%} of $I_{steady}^p $ in the relax time $t_{relax} = 5\tau _2 $, implying the recovery of the toroidal magnetic field, and the variability time scales of GRBs can be estimated as follows, \begin{equation} \label{eq35} \left( {\delta t} \right)_I \equiv \left( {t_{SIBZ} } \right)_I \approx 5\tau _2 ,\quad for \quad \Delta Z_{PLSM} > > \Delta Z_H , \end{equation} \begin{equation} \label{eq36} \left( {\delta t} \right)_{II} \equiv \left( {t_{SIBZ} } \right)_{II} \approx 10\tau _2 ,\quad for \quad \Delta Z_{PLSM} \approx \Delta Z_H , \end{equation} \begin{equation} \label{eq37} \left( {\delta t} \right)_{III} \equiv \left( {t_{SIBZ} } \right)_{III} \approx 15\tau _2 ,\quad for \quad \Delta Z_{PLSM} < < \Delta Z_H . \end{equation} In \textbf{CASE I} we have $\tau _1 < < \tau _2 $, implying that the magnetic energy is released much more rapidly in the first stage compared with the time for the recovery of the magnetic energy in the second stage. Thus \textbf{CASE I} seems more consistent with the feature of the light curves, i.e., an individual pulse is a fast-rise exponential decay (FRED) with an average rise-to-decay ratio of 1:3 (Norris et al. 1996). By using equations (\ref{eq29}), (\ref{eq33}) and (35)---(37) we have the variability time scales in the light curves of four GRBs in the three different cases as shown in Table 4. In addition, we obtain the curves of $\left( {\delta t} \right)_I $ versus $a_ * $ with the fixed values of $n$ for GRB 990712, GRB 991208 and GRB 021216 as shown in Figure 5. Inspecting Table 4, we find that the variability time scales of tens of msec in the light curves of GRBs can be modulated by the two successive flares due to SIBZ, which accompany the BZ process in powering the GRBs. We also find that the variability time scales of the four GRBs are generally three order of magnitude less than the corresponding durations $T_{90} $, which are consistent with the observations. From Figure 5 we find that the curves of $\left( {\delta t} \right)_I $ versus $a_ * $ are almost the same, increasing linearly in a rather small slope with the decreasing $a_ * $. The values of $\left( {\delta t} \right)_I $ remains less than 20 msec during the occurrence of SIBZ for these GRBs. \section{DISCUSSION} \subsection{Mechanism for the recurrent occurrence of SIBZ} In this paper we discuss the possibility of the modulations of SIBZ on the light curves of GRBs. One of the puzzles is what mechanism leads to the recurrent occurrence of SIBZ, and prevents the magnetic field from settling to a screw-stable configuration. As argued in MT82 the BH magnetosphere consists of a series of magnetic surfaces connecting the horizon with the loads, and the total electric current $I$ flowing downward through an \textbf{\textit{m}}-loop is proportional to the toroidal magnetic field $B^T$ by Ampere's law. It is argued that these magnetic surfaces can be regarded as an equivalent circuit, in which each loop consists of two adjacent magnetic surfaces (W02). The total electric current flowing downward through an \textbf{\textit{m}}-loop is exactly equal to the algebraic sum of the poloidal currents flowing in the loops (W03). As shown in Figure 1 the critical magnetic surface (henceforth CMS) for SIBZ is represented by the critical line $M{M}'$. According to the criterion (\ref{eq7}) only the toroidal magnetic field outside CMS is depressed by SIBZ, while the toroidal and poloidal components of the magnetic field within CMS are little affected. On the other hand, the poloidal magnetic field outside CMS still exists in spite of the occurrence of SIBZ, and the toroidal magnetic field outside CMS will be recovered because of the twist of the poloidal magnetic field arising from the rotation of the BH. So, the rotation of the BH is the main mechanism for the recurrent occurrence of SIBZ in the BH magnetosphere. \subsection{Rotation period of BH and time scales of recovery of toroidal magnetic fields} If we take BH mass as 10$M_ \odot $, the rotation period of the BH is only $\sim $1msec for the BH spin required by the criterion of SIBZ. This result implies that toroidal magnetic fields can not be recovered in one period of a rotating BH. How to explain the discrepancy between BH rotation and the time scales required for recovery of toroidal fields ? In spite of lack of detailed knowledge of the screw instability, it is helpful to imagine the magnetic field line as an elastic string. The rotating BH always twists the field line, while the field line tries to untwist itself. Once the toroidal component of the magnetic field is strong enough to satisfy the criterion, the screw instability will occurs, just as a twisted elastic string releases its energy under appropriate conditions. In our model we simulate the process for twisting the field line by a transient process for accumulating magnetic energy in the inductor $\Delta L$ in the equivalent $R-L$ circuit, which corresponds to the increase of the toroidal magnetic field, and the variability time scales of tens of msec in the light curves of several GRBs are fitted as the time interval between two successive flares due to SIBZ. Thus the threshold of toroidal magnetic field (magnetic energy) cannot be recovered in only one rotation of BH, just as a threshold of twisting more than one turn is required for an elastic string to release its energy. \ \subsection{An explanation for GRBs with XRFs and XRRs } Recently, much attention has been paid to the issue of X-ray flashes (XRFs), X-ray-rich gamma-ray bursts (XRRs) and GRBs, since HETE-2 provided strong evidence that the properties of these three kinds of bursts form a continuum, and therefore these objects are probably the same phenomenon (Lamb et al. 2004a, 2004b, 2005). The observations from HETE-2 motivate some authors to seek a unified model of these bursts. The most competitive unified models of these bursts are off-axis jet model (Yamazaki, Ioka, {\&} Nakamura 2002; Lamb et al. 2005) and two-component jet model (Berger et al. 2003; Huang et al. 2004), in which XRFs, XRRs and GRBs arise from the differences in the viewing angles. Unfortunately, a detailed discussion on producing different viewing angles for XRFs, XRRs and GRBs has not been given in the above works. Our argument on SIBZ and SIMC may be helpful to understanding this issue. It is believed that a disk is probably surrounded by a high-temperature corona analogous to the solar corona (Liang {\&} Price 1977; Haardt 1991; Zhang et al 2000). Very recently, some authors argued that the coronal heating in some stars including the Sun is probably related to dissipation of currents, and very strong X-ray emissions arise from variation of magnetic fields (Galsgaard {\&} Parnell 2004; Peter et al. 2004). Analogously, if the corona exists above the disk in our model, we expect that it might be heated by the induced currents due to SIMC and SIBZ. Therefore a very strong X-ray emission would be produced to form XRFs or XRRs. Although our model may be too simplified and idealized with respect to the real situation, it provides a possible scenario for the occurrence of the screw instability in BH magnetosphere, and it may be helpful to understanding some astrophysical observations. We hope to improve our model by combining more observations in the future. \acknowledgments {\bf Acknowledgements:} This work is supported by the National Natural Science Foundation of China under Grant Numbers 10373006, 10573006 and 10121503. The anonymous referee is thanked for his (her) helpful comments and suggestions.
1,116,691,501,344
arxiv
\section{Introduction} With the renewed interest for information-theoretic security, there have been several attempts to develop low-complexity coding schemes achieving the fundamental secrecy limits of the wiretap channel models. In particular, explicit coding schemes based on low-density parity-check codes~\cite{Thangaraj2007,Subramanian2011,Rathi2011}, polar codes~\cite{Mahdavifar11,Sasoglu13,renes2013efficient,Andersson2013}, and invertible extractors~\cite{Hayashi11,Bellare2012} have been successfully developed for special cases of Wyner's model~\cite{Wyner75}, in which the channels are at least required to be symmetric. The recently introduced chaining techniques for polar codes provide, however, a convenient way to construct explicit low-complexity coding schemes for a variety of information-theoretic channel models~\cite{Mondelli14b,Mondelli14} without any restrictions on the channels. In this paper, we develop a low-complexity polar coding scheme for the broadcast channel with confidential messages~\cite{Csiszar78}. Rather than view randomness as a free resource, which could be used to simulate random numbers at arbitrary rate with no cost, we adopt the point of view put forward in~\cite{Watanabe12,Bloch12}, in which any randomness used for stochastic encoding must be explicitly accounted for. In particular, our proposed polar coding scheme exploits the optimal rate of randomness identified in~\cite{Watanabe12} and relies on polar codes for channel prefixing. Results closely related to the present work have been independently developped in~\cite{Gulcu14,Wei14}. However, these works do not consider randomness as a resource and assume that channel prefixing can be performed through other means; in addition~\cite{Wei14} only focuses on weak secrecy. When specialized to Wyner's wiretap model, our scheme also resembles that in~\cite{renes2013efficient}, but with a number of notable distinctions. Specifically, while no pre-shared secret seed is required in~\cite{renes2013efficient}, the coding scheme therein relies on a two-layer construction for which no efficient code construction is presently known~\cite[Section 3.3]{renes2013efficient}. In contrast, our coding scheme requires a pre-shared secret seed, but at the benefit of only using a single layer of polarization. The remaining of the paper is organized as follows. Section~\ref{sec:broadc-chann-with} formally introduces the notation and the model under investigation. Section~\ref{Sec_CS} develops a random binning proof of the results in~\cite{Watanabe12}, which serves as a guideline for the design of the polar coding scheme. Section~\ref{sec:polar-coding-schem} describes the proposed polar coding scheme in details, while Section~\ref{sec:analys-polar-coding} provides its detailed analysis. Section~\ref{sec:conclusion} offers some concluding remarks. \section{Broadcast channel with confidential messages and constrained randomization} \label{sec:broadc-chann-with} \subsection{Notation} We define the integer interval $\llbracket a,b \rrbracket$, as the set of integers between $\lfloor a \rfloor$ and $\lceil b \rceil$. For $n \in \mathbb{N}$ and $N \triangleq 2^n$, we let $G_n \triangleq \left[ \begin{smallmatrix} 1 & 0 \\[0.3em] 1 & 1 \end{smallmatrix} \right]^{\otimes n} $ be the source polarization transform defined in~\cite{Arikan10}. We note the components of a vector, $X^{1:N}$, of size $N$, with superscripts, i.e., $X^{1:N} \triangleq (X^1 , X^2, \ldots, X^{N})$. When the context makes clear that we are dealing with vectors, we write $X^N$ in place of $X^{1:N}$. We note $\mathbb{V}(\cdot, \cdot)$ and $\mathbb{D}(\cdot || \cdot)$ the variational distance and the divergence, respectively, between two distributions. Finally, we note the indicator function $\mathds{1}\{ \omega \}$, which is equal to $1$ if the predicate $\omega$ is true and $0$ otherwise. \subsection{Channel model and capacity region} \label{Sec_PS} We consider the problem of secure communication over a discrete memoryless broadcast channel $(\mathcal{X}, p_{YZ|X}, \mathcal{Y},\mathcal{Z})$ illustrated in Figure~\ref{figBCC}. The marginal probabilities $p_{Y|X}$ and $p_{Z|X}$ define two \acp{DMC} $(\mathcal{X}, p_{Y|X}, \mathcal{Y})$ and $(\mathcal{X}, p_{Z|X}, \mathcal{Z})$, which we refer to as Bob's channel and Eve's channel, respectively. \begin{figure} \centering \includegraphics[width=13cm]{figBCC.pdf} \caption{Communication over a broadcast channel with confidential messages. $O$ is a common message that must be reconstructed by both Bob and Eve. $S$ is a confidential message that must be reconstructed by Bob and kept secret from Eve. $M$ is a private message that must be reconstructed by Bob, but may neither be secret nor reconstructed by Eve. $R$ represents an additional randomization sequence used at the encoder. } \label{figBCC} \end{figure} \begin{defn} A $(2^{NR_O},2^{NR_M},2^{NR_S},2^{NR_R},N)$ code $\mathcal{C}_N$ for the broadcast channel consists of \begin{itemize} \item a common message set $\mathcal{O} \eqdef \llbracket 1 , 2^{NR_O} \rrbracket$ \item a private message set $\mathcal{M} \eqdef \llbracket 1 , 2^{NR_M} \rrbracket$ \item a confidential message set $\mathcal{S} \eqdef \llbracket 1 , 2^{NR_S} \rrbracket$ \item a randomization sequence set $\mathcal{R} \eqdef \llbracket 1 , 2^{NR_R} \rrbracket$ \item an encoding function $f: \mathcal{O} \times \mathcal{M} \times \mathcal{S} \times \mathcal{R} \to \mathcal{X}^N$, which maps the messages $(o,m,s)$ and the randomness $r$ to a codeword $x^N$ \item a decoding function $g: \mathcal{Y}^N \to \mathcal{O} \times \mathcal{M} \times \mathcal{S}$, which maps each observation of Bob's channel $y^N$ to the messages $(\hat{o},\hat{m},\hat{s})$ \item a decoding function $h: \mathcal{Z}^N \to \mathcal{O} $, which maps each observation of Eve's channel $z^N$ to the message~$\hat{\hat{o}}$ \end{itemize} \end{defn} For uniformly distributed $O$, $M$, $S$, and $R$, the performance of a $(2^{NR_O},2^{NR_M},2^{NR_S},2^{NR_R},N)$ code $\mathcal{C}_N$ for the broadcast channel is measured in terms of its probability of error $$ \mathbf{P}_e(\mathcal{C}_N) \triangleq \mathbb{P}\left[ (\widehat{O},\widehat{M},\widehat{S}) \neq (O,M,S) \text{ or } \widehat{\widehat{O}} \neq O \right], $$ and its leakage of information about the confidential message to Eve $$ \mathbf{L}_e(\mathcal{C}_N) \triangleq I(S;Z^N). $$ \begin{defn} A rate tuple $(R_O,R_M,R_S,R_R)$ is achievable for the broadcast channel if there exists a sequence of $(2^{NR_O},2^{NR_M},2^{NR_S},2^{NR_R},N)$ codes $\{ \mathcal{C}_N \}_{N \geq 1}$ such that $$ \lim_{N \to \infty} \mathbf{P}_e(\mathcal{C}_N) =0, (\text{reliability condition}) $$ $$ \lim_{N \to \infty} \mathbf{L}_e(\mathcal{C}_N)=0. (\text{strong secrecy}) $$ The achievable region $\mathcal{R}_{\textup{BCC}}$ is defined as the closure of the set of all achievable rate quadruples. \end{defn} The exact characterization of $\mathcal{R}_{\textup{BCC}}$ was obtained in~\cite{Watanabe12}. \begin{thm}[{\cite{Watanabe12}}] \label{thm:watanabe} $\mathcal{R}_{\textup{BCC}}$ is the closed convex set consisting of the quadruples $(R_O,R_M,R_S,R_R)$ for which there exist auxiliary random variables $(U,V)$ such that $U - V - X - (Y,Z)$ and \begin{align*} R_O &\leq \min[ I(U;Y), I(U;Z) ], \\ R_O + R_M + R_S &\leq I(V;Y|U) + \min[ I(U;Y), I(U;Z)], \\ R_S &\leq I(V;Y|U) - I(V;Z|U), \\ R_M + R_R &\geq I(X;Z|U), \\ R_R &\geq I(X; Z|V). \end{align*} \end{thm} The main contribution of the present work is to develop a polar coding scheme achieving the rates in $\mathcal{R}_{\textup{BCC}}$. \section{A binning approach to code design: from random binning to polar binning} \label{Sec_CS} In this section, we argue that our construction of polar codes for the broadcast channel with confidential messages is essentially the constructive counterpart of a \emph{random binning} proof of the region ${\mathcal{R}}_{\textup{BCC}}$. While \emph{random coding} is often the natural tool to address channel coding problems, random binning is already found in~\cite{Csiszar1996} to establish the strong secrecy of the wiretap channel, and is the tool of choice in quantum information theory~\cite{Renes2011}; there has also been a renewed interest for random binning proofs in multi-user information theory, motivated in part by~\cite{Yassaee2012}. In Section~\ref{sec:rand-binn-secure}, we sketch a random binning proof of the characterization of ${\mathcal{R}}_{\textup{BCC}}$ established in~\cite{Watanabe12}, which may be viewed as a refinement of the analysis in~\cite{Yassaee2012} to obtain a more precise characterization of the stochastic encoder. While the results we derive are not new, we use this alternative proof in Section~\ref{sec:binning-with-polar} to obtain high-level insight into the construction of polar codes. The main benefit is to clearly highlight the crucial steps of the construction in Section~\ref{sec:polar-coding-schem} and of its analysis in Section~\ref{sec:analys-polar-coding}. In particular, the rate conditions developed in the random binning proof of Section~\ref{sec:rand-binn-secure} directly translate into the definition of the polarization sets in Section~\ref{sec:binning-with-polar}. \subsection{Information-theoretic random binning} \label{sec:rand-binn-secure} Information-theoretic random binning proofs rely on the following well-known lemmas. We use the notation $\delta(N)$ to denote an unspecified positive function of $N$ that vanishes as $N$ goes to infinity. \begin{lem}[Source-coding with side information] \label{lm:1} Consider a \ac{DMS} $({\mathcal{X}}\times{\mathcal{Y}},p_{XY})$. For each $x^N\in{\mathcal{X}}^N$, assign an index $\Phi(x^N)\in\intseq{1}{2^{NR}}$ uniformly at random. If $R>\avgH{X|Y}$, then $\exists N_0$ such that $\forall N\geq N_0$, there exists a deterministic function $g_N:\intseq{1}{2^{NR}}\times{\mathcal{Y}}^N\rightarrow {\mathcal{X}}^N:(\Phi(x^N),y^N)\mapsto \hat{x}^N$ such that \begin{align*} \E[\Phi]{\V{p_{X^NX^N},p_{X^Ng_N(Y^N)}}}\leq \delta(N). \end{align*} \end{lem} \begin{lem}[Privacy amplification, channel intrinsic randomness, output statistics of random binning] \label{lm:2} Consider a \ac{DMS} $({\mathcal{X}}\times{\mathcal{Z}},p_{XZ})$ and let $\epsilon>0$. For each $x^N\in{\mathcal{X}}^N$, assign an index $\Psi(x^N)\in\intseq{1}{2^{NR}}$ uniformly at random. Denote by $q_M$ the uniform distribution on $\intseq{1}{2^{NR}}$. If $ R<\avgH{X|Z}$, then $\exists N_0$ such that $\forall N\geq N_0$ \begin{align*} \E[\Psi]{\V{p_{\Psi(X^N)Z^N},q_M p_{Z^N}}}\leq\delta(N). \end{align*} \end{lem} One may obtain more explicit results regarding the convergence to zero in Lemma~\ref{lm:1} and Lemma~\ref{lm:2}, but we ignore this for brevity. The principle of a random binning proof of Theorem~\ref{thm:watanabe} is to consider a \ac{DMS} $({\mathcal{U}}\times{\mathcal{V}}\times{\mathcal{X}}\times{\mathcal{Y}}\times{\mathcal{Z}},p_{UVXYZ})$ such that $U-V-X-YZ$, and to assign two types of indices to source sequences by random binning. The first type identifies subset of sequences that play the roles of codebooks, while the second type labels sequences with indices that can be thought of as messages. As explained in the next paragraphs, the crux of the proof is to show that the binning can be ``inverted,'' so that the sources may be generated from independent choices of uniform codebooks and messages.\smallskip \noindent\textbf{Common message encoding.} We introduces two indices $\psi^U\in\intseq{1}{2^{N\rho_U}}$ and $o\in\intseq{1}{2^{NR_O}}$ by random binning on $u^N$ such that: \begin{itemize} \item $\rho_U>\max\left(\avgH{U|Y},\avgH{U|Z}\right)$, so that Lemma~\ref{lm:1} ensures that the knowledge of $\psi^U$ allows Bob and Eve to reconstruct the sequence $u^N$ with high probability knowing $y^N$ or $z^N$, respectively; \item $\rho_U+R_O<\avgH{U}$, so that Lemma~\ref{lm:2} ensures that the indices $\psi^U$ and $o$ are almost uniformly distributed and independent of each other. \end{itemize} The binning scheme induces a joint distribution $p_{U^N\Psi^UO}$. To convert the binning scheme into a channel coding scheme, Alice operates as follows. Upon sampling indices $\tilde{\psi}^U\in\intseq{1}{2^{N\rho_U}}$ and $\tilde{o}\in\intseq{1}{2^{NR_O}}$ from independent uniform distributions, Alice \emph{stochastically} encodes them into a sequence $\tilde{u}^{N}$ drawn according to $p_{U^N|\Psi^UO}(\tilde{u}^N|\tilde{\psi}^U,\tilde{o})$. The choice of rates above guarantees that the joint distribution $p_{\tilde{U}^N\tilde{\Psi}^U\tilde{O}}$ approximates the distribution $p_{U^N\Psi^UO}$ in variational distance, so that disclosing $\tilde{\psi}^U$ allows Bob and Eve to decode the sequence $\tilde{u}^N$. \smallskip \noindent\textbf{Secret and private message encoding.} Following the same approach, we introduce three indices $\psi^{V|U}\in\intseq{1}{2^{N\rho_{V|U}}}$, $s\in\intseq{1}{2^{NR_S}}$, and $m\in\intseq{1}{2^{NR_M}}$ by random binning on $v^N$ such that \begin{itemize} \item $\rho_{V|U}>\avgH{V|UY}$, to ensure that knowing $\psi^{V|U}$ and $u^N$, Bob may reconstruct the sequence $x^N$; \item $\rho_{V|U}+R_S+R_M<\avgH{V|UZ}$, to ensure that the indices are almost uniformly distributed and independent of each other, as well as of the source sequences $U^N$ and $Z^N$. \end{itemize} The binning scheme induces a joint distribution $p_{V^NU^N\Psi^{V|U}SM}$. To obtain a channel coding scheme, Alice encodes the realizations of independent and uniformly distributed indices $\tilde{\psi}^{V|U}\in\intseq{1}{2^{N\rho_{V|U}}}$, $\tilde{s}\in\intseq{1}{2^{NR_S}}$, $\tilde{m}\in\intseq{1}{2^{NR_M}}$, and the sequence $ \tilde{u}^N$, into a sequence $\tilde{v}^N$ drawn according to the distribution $p_{V^N|U^N\Psi^{V|U}SM}(\tilde{v}^N|\tilde{u}^N,\tilde{\psi}^{V|U},\tilde{s},\tilde{m})$. The resulting joint distribution is again a close approximation of $p_{V^NU^N\Psi^{V|U}SM}$, so that the scheme inherits the reliability and secrecy properties of the random binning scheme upon disclosing $\tilde{\psi}^{V|U}$. \smallskip \noindent\textbf{Channel prefixing.} Finally, we introduce the indices $\psi^{X|V}\in\intseq{1}{2^{N\rho_{V|X}}}$ and $r\in\intseq{1}{2^{NR_R}}$ by random binning on $x^N$ such that \begin{itemize} \item $\rho_{X|V}<\avgH{X|V}$ to ensure that $\psi^{X|V}$ is independent of the source sequences $X^N$ and $Z^N$; \item $\rho_{X|V}+R_R<\avgH{X|V}$ to ensure that the indices are almost uniformly distributed and independent of each other, as well as of the source sequences $V^N$. \end{itemize} The binning scheme induces a joint distribution $p_{X^NV^NU^N\Psi^{X|V}R}$. To obtain a channel prefixing scheme, Alice encodes the realizations of uniformly distributed indices $\tilde{\psi}^{X|V}$ and $\tilde{r}$, and the previously obtained $\tilde{v}^N$ into a sequence $\tilde{x}^N$ drawn according to $p_{X^N|V^N\Psi^{X|V}R}(\tilde{x}^N|\tilde{v}^N\tilde{\psi}^{X|V}\tilde{r})$. The resulting joint distribution induced is once again a close approximation of $p_{X^NV^NU^N\Psi^{X|V}R}$.\smallskip \noindent\textbf{Chaining to de-randomize the codebooks.} The downside of the schemes described earlier is that they require sharing the indices $\tilde{\psi}^{U}$, $\tilde{\psi}^{V|U}$, and $\tilde{\psi}^{X|V}$, identifying the codebooks between Alice, Bob, and Eve; however, the rate cost may be amortized by reusing the \emph{same} indices over sequences of $k$ blocks. Specifically, the union bound shows that the average error probability over $k$ blocks is at most $k$ times that of an individual block, and a hybrid argument shows that the information leakage over $k$ blocks is at most $k$ times that of an individual block. Consequently, for $k$ and $N$ large enough, the impact on the transmission rates is negligible.\smallskip \noindent\textbf{Total amount of randomness.} The total amount of randomness required for encoding includes not only the explicit random numbers used for channel prefixing but also all the randomness required in the stochastic encoding to approximate the source distribution. One can show that the rate randomness specifically used in the stochastic encoding is negligible; we omit the proof of this result for random binning, but this is analyzed precisely for polar codes in Section~\ref{sec:analys-polar-coding}. By combining all the rate constraints above and perform Fourier-Motzkin elimination, one recovers the rates in Theorem~\ref{thm:watanabe}. \subsection{Binning with polar codes} \label{sec:binning-with-polar} The main observation to translate the analysis of Section~\ref{sec:rand-binn-secure} into a polar coding scheme is that Lemma~\ref{lm:1} and Lemma~\ref{lm:2} have the following counterparts in terms of source polarization. \begin{lem}[adapted from~{\cite{Arikan10}}] \label{lm:3} Consider a \ac{DMS} $({\mathcal{X}}\times{\mathcal{Y}},p_{XY})$. For each $x^{1:N}\in\mathbb{F}_2^N$ polarized as $u^{1:N}=G_nx^{1:N}$, let $u^{1:N}[{\mathcal{H}}_{X|Y}]$ denote the high entropy bits of $u^{1:N}$ in positions ${\mathcal{H}}_{X|Y}\eqdef\{i\in\intseq{1}{n}:\avgH{U^i|U^{1:i-1}Y^N}>\delta_N\}$ and $\delta_N\eqdef 2^{-N^\beta}$ with $\beta\in]0,\tfrac{1}{2}[$. For every $i\in\intseq{1}{N}$, sample $\tilde{u}^{1:N}$ from the distribution \begin{align*} \tilde{p}_{U^i|U^{1:i-1}}(\tilde{u}^i|\tilde{u}^{1:i-1}) \eqdef\left\{ \begin{array}{l} \mathds{1}\left\{\tilde{u}^i=u^i\right\}\text{ if $i\in{\mathcal{H}}_{Y|X}$}\\ p_{U^i|U^{1:i-1}Y^N}(\tilde{u}^i|\tilde{u}^{1:i-1}y^N) \text{ if $i\in{\mathcal{H}}_{Y|X}^c$}. \end{array} \right. \end{align*} and create $\tilde{x}^{1:N}=\tilde{u}^{1:N}G_n$. Then, \begin{align*} \V{p_{X^{1:N}X^{1:N}},p_{X^{1:N}\tilde{X}^N}} \leq \delta_N, \end{align*} and $\lim_{N\rightarrow\infty} \card{{\mathcal{H}}_{X|Y}}=\avgH{X|Y}$. \end{lem} In other words, the high entropy bits in positions ${\mathcal{H}}_{X|Y}$ play the same role as the random binning index in Lemma~\ref{lm:1}. However, note that the construction of $\tilde{x}^{1:N}$ in Lemma~\ref{lm:3} is explicitly stochastic. \begin{lem}[adapted from~{\cite{Chou14rev}}] \label{lm:4} Consider a \ac{DMS} $({\mathcal{X}}\times{\mathcal{Z}},p_{XZ})$. For each $x^{1:N}\in\mathbb{F}_2^N$ polarized as $u^{1:N}=G_nx^{1:N}$, let $u^{1:N}[{\mathcal{V}}_{X|Z}]$ denote the very high entropy bits of $u^{1:N}$ in positions ${\mathcal{V}}_{X|Z}\eqdef\{i\in\intseq{1}{n}:\avgH{U^i|U^{1:i-1}Z^{1:N}}>1-\delta_N\}$ and $\delta_N\eqdef 2^{-N^\beta}$ with $\beta\in]0,\tfrac{1}{2}[$. Denote by $q_{U^{1:N}[{\mathcal{V}}_{X|Z}]}$ the uniform distribution of bits in positions ${\mathcal{V}}_{X|Z}$. Then, \begin{align*} \V{p_{U^{1:N}[{\mathcal{V}}_{X|Z}]Z^{1:N}},q_{U^{1:N}[{\mathcal{V}}_{X|Z}]}p_{Z^{1:N}}}\leq \delta_N \end{align*} and $\lim_{N\rightarrow\infty}\card{{\mathcal{V}}_{X|Z}}=\avgH{X|Z}$ by \cite[Lemma 1]{Chou14rev}. \end{lem} The very high entropy bits in positions ${\mathcal{V}}_{X|Z}$ therefore play the same role as the random binning index in Lemma~\ref{lm:2}. This suggests that any result obtained from random binning could also be derived using source polarization as a linear and low-complexity alternative; intuitively, information theoretic constraints resulting from Lemma~\ref{lm:1} translate into the use of ``high entropy'' sets ${\mathcal{H}}$, while those resulting from Lemma~\ref{lm:2} translate into the use of ``very high entropy'' sets ${\mathcal{V}}$. However, unlike the indices resulting from random binning, the high entropy and very high entropy sets may not necessarily be aligned, and the precise design of a polar coding scheme requires more care. In the remainder of the paper, we consider a \ac{DMS} $({\mathcal{U}}\times{\mathcal{V}}\times{\mathcal{X}}\times{\mathcal{Y}}\times{\mathcal{Z}},p_{UVXYZ})$ such that $U-V-X-YZ$, $I(V;Y|U) - I(V;Z|U) >0$, and $|\mathcal{X}| = |\mathcal{U}| = |\mathcal{V}| = 2$. The extension to larger alphabets is obtained following ideas in~\cite{Sasoglu12}. We also assume without loss of generality $I(U;Y) \leq I(U;Z)$, the case $I(U;Y) > I(U;Z)$ is treated similarly. \noindent\textbf{Common message encoding.} Define the polar transform of $U^{1:N}$, as $A^{1:N} \triangleq U^{1:N} G_n$ and the associated sets \begin{align} {\mathcal{H}}_U & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( A^i | A^{1:i-1}) > \delta_N \right\}, \label{eq:common_msg_sets_1}\\ {\mathcal{V}}_U & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( A^i | A^{1:i-1}) > 1- \delta_N \right\},\\ {\mathcal{H}}_{U|Y} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( A^i | A^{1:i-1} Y^{1:N}) > \delta_N \right\},\\ \mathcal{H}_{U|Z} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( A^i | A^{1:i-1} Z^{1:N}) > \delta_N \right\}. \label{eq:common_msg_sets_6} \end{align} If we could guarantee that ${\mathcal{H}}_{U|Z}\subseteq {\mathcal{H}}_{U|Y}\subseteq{\mathcal{V}}_{U}$, then we could directly mimic the information-theoretic random binning proof. We would use random bits in positions ${\mathcal{H}}_{U|Z}$ to identify the code, random bits in positions ${\mathcal{V}}_U\setminus \mathcal{H}_{U|Z}$ for the message, successive cancellation encoding to compute the bits in positions ${\mathcal{V}}_U^c$ and approximate the source distribution, and chaining to amortize the rate cost of the bits in positions ${\mathcal{H}}_{U|Z}$. Unfortunately, the inclusion ${\mathcal{H}}_{U|Y}\subseteq {\mathcal{H}}_{U|Z}$ is not true in general, and one must also use chaining as in~\cite{Mondelli14b} to ``realign'' the sets of indices. Furthermore, only the inclusions ${\mathcal{H}}_{U|Z}\subseteq {\mathcal{H}}_U$ and ${\mathcal{H}}_{U|Y}\subseteq {\mathcal{H}}_U$ are true in general, so that the bits in positions ${\mathcal{H}}_{U|Z}\cap{\mathcal{V}}_U^c$ and ${\mathcal{H}}_{U|Y}\cap{\mathcal{V}}_U^c$ must be transmitted separately. The precise coding scheme is detailed in Section~\ref{sec:comm-mess-encod}. \noindent\textbf{Secret and private messages encoding.} Define the polar transform of $V^{1:N}$ as $B^{1:N} \triangleq V^{1:N} G_n$ and the associated sets \begin{align} \mathcal{V}_{V|U} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( B^i | B^{1:i-1} U^{1:N}) > 1 - \delta_N \right\},\label{eq:private_msg_sets_1}\displaybreak[0]\displaybreak[0]\\ \mathcal{V}_{V|UZ} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( B^i | B^{1:i-1} U^{1:N}Z^{1:N}) > 1 - \delta_N \right\},\displaybreak[0]\\ \mathcal{H}_{V|UY} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( B^i | B^{1:i-1} U^{1:N}Y^{1:N}) > \delta_N \right\},\displaybreak[0]\\ \mathcal{V}_{V|UY} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( B^i | B^{1:i-1} U^{1:N}Y^{1:N}) > 1 - \delta_N \right\}, \displaybreak[0]\\ \mathcal{M}_{UVZ} & \triangleq \mathcal{V}_{V|U}\backslash \mathcal{V}_{V|UZ}. \label{eq:private_msg_sets_5} \end{align} If the inclusion ${\mathcal{H}}_{V|UY}\subseteq {\mathcal{V}}_{V|UZ}$ were true, then we would place random bits identifying the codebook in positions ${\mathcal{H}}_{V|UY}$, random bits describing the secret message in positions ${\mathcal{V}}_{V|UZ}\setminus {\mathcal{H}}_{V|UY}$, random bits describing the private message in positions ${\mathcal{V}}_{V|U}\setminus {\mathcal{V}}_{V|UZ}$, use successive cancellation encoding to compute the bits in positions ${\mathcal{V}}_{V|U}^c$ and approximate the source distribution, and use chaining to amortize the rate cost of the bits in positions ${\mathcal{H}}_{V|UY}$. This is unfortunately again not directly possible in general, and one needs to exploit chaining to realign the indices, and transmit the bits in positions ${\mathcal{H}}_{V|UY}\cap{\mathcal{V}}_{V|U}^c$ separately and secretly to Bob. The precise coding scheme is detailed in Section~\ref{sec:secr-priv-mess}. \noindent\textbf{Channel prefixing.} Finally, define the polar transform of $X^{1:N}$ as $T^{1:N} \triangleq X^{1:N} G_n$ and the associated sets \begin{align} \mathcal{V}_{X|V} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( T^i | T^{1:i-1} V^{1:N}) > 1 - \delta_N \right\}, \label{eq:randomization_msg_sets_1}\\ \mathcal{V}_{X|VZ} & \triangleq \left\{ i \in \llbracket 1, N \rrbracket: H( T^i | T^{1:i-1} V^{1:N}Z^{1:N}) > 1 - \delta_N \right\}. \label{eq:randomization_msg_sets_2} \end{align} One performs channel prefixing by placing random bits identifying the code in positions ${\mathcal{V}}_{X|VZ}$, random bits describing the randomization sequence in positions ${\mathcal{V}}_{X|V}\setminus {\mathcal{V}}_{X|VZ}$, and using successive cancellation encoding to compute the bits in positions ${\mathcal{V}}_{X|V}^c$ and approximate the source distribution. Chaining is finally used to amortize the cost of randomness for describing the code. The precise coding scheme is detailed in Section~\ref{sec:channel-prefixing}. \section{Polar coding scheme} \label{sec:polar-coding-schem} In this section, we describe the details of the polar coding scheme resulting from the discussion of the previous section. Recall that the joint probability distribution $p_{UVXYZ}$ of the original source is fixed and defined as in Section \ref{sec:binning-with-polar}. As alluded to earlier, we perform the encoding over $k$ blocks of size $N$. We use the subscript $i \in \llbracket 1, k \rrbracket$ to denote random variables associated to encoding Block $i$. The chaining constructions corresponding to the encoding of the common, secret, and private messages, and randomization sequence, are described in Section~\ref{sec:comm-mess-encod}, Section~\ref{sec:secr-priv-mess}, and Section~\ref{sec:channel-prefixing}, respectively. Although each chaining is described independently, all messages should be encoded in every block before moving to the next. Specifically, in every block $i\in\intseq{1}{k-1}$, Alice successively encodes the common message, the secret and private messages, and performs channel prefixing, before she moves to the next block $i+1$. \subsection{Common message encoding} \label{sec:comm-mess-encod} In addition to the polarization sets defined in~\eqref{eq:common_msg_sets_1}-\eqref{eq:common_msg_sets_6} we also define \begin{align*} \mathcal{I}_{UY} & \triangleq {\mathcal{V}}_{U} \backslash {\mathcal{H}}_{U|Y},\\ \mathcal{I}_{UZ} & \triangleq {\mathcal{V}}_{U} \backslash {\mathcal{H}}_{U|Z},\\ \mathcal{A}_{UYZ}&\eqdef \text{any subset of $\mathcal{I}_{UZ} \backslash \mathcal{I}_{UY}$ with size $| \mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}|$.} \end{align*} Note that $\mathcal{A}_{UYZ}$ exists since we have assumed $I(U;Y)\leq I(U;Z)$. In fact, \begin{align*} |\mathcal{I}_{UZ} \backslash \mathcal{I}_{UY}| - | \mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}| = |\mathcal{I}_{UZ} |- | \mathcal{I}_{UY}|\label{eq:2}\geq 0. \end{align*} The encoding procedure with chaining is summarized in Figure~\ref{fig_atilde}.\smallskip \begin{figure} \centering \includegraphics[width=14.5cm]{fig_atilde.pdf} \caption{Chaining for the encoding of the $\widetilde{A}_i^{1:N}$'s, which corresponds to the encoding of the common messages.} \label{fig_atilde} \end{figure} In {Block ${1}$}, the encoder forms $\widetilde{U}_1^{1:N}$ as follows. Let ${O}_1$ be a vector of $|\mathcal{I}_{UY}|$ uniformly distributed information bits that represents the common message to be reconstructed by Bob and Eve. Upon observing a realization $o_1$, the encoder samples $\widetilde{a}_1^{1:N}$ from the distribution $\widetilde{p}_{A_1^{1:N}}$ defined as \begin{equation} \label{eq_sim_A_1} \widetilde{p}_{{A}_1^j|{A}_1^{1:j-1}} ({a}_1^j|{a}_1^{1:j-1}) \triangleq \begin{cases} \mathds{1} \left\{ a_1^j = {o_1^j} \right\} & \text{ if } j \in \mathcal{I}_{UY}\\ 1/2 & \text{ if }j \in {\mathcal{V}}_{U} \backslash \mathcal{I}_{UY}\\ {p}_{A^j|A^{1:j-1}} (a_1^j|a_1^{1:j-1}) & \text{ if }j\in {\mathcal{V}}_{U}^c \end{cases}, \end{equation} where the components of $o_1$ have been indexed by the set of indices $\mathcal{I}_{UY}$ for convenience, so that ${O}_1 \eqdef \widetilde{A}_1^{1:N} [\mathcal{I}_{UY}]$. The random bits that identify the codebook and that are required to reconstruct $\widetilde{A}_1^{1:N}$ are $\widetilde{A}_1^{1:N}[ \mathcal{H}_{U|Z}]$ for Eve and $\widetilde{A}_1^{1:N}[ \mathcal{H}_{U|Y}]$ for Bob. Moreover, we note \begin{align*} \Psi^{U}_1 &\triangleq \widetilde{A}_1^{1:N}[ {\mathcal{V}}_{U} \backslash \mathcal{I}_{UY} ]=\widetilde{A}_1^{1:N}[ {\mathcal{V}}_{U} \cap{\mathcal{H}}_{U|Y} ],\\ \Phi^U_1 &\triangleq \widetilde{A}_1^{1:N}[ ({\mathcal{H}}_{U|Y} \cup {\mathcal{H}}_{U|Z}) \cap {\mathcal{V}}_{U}^c]. \end{align*} Both $\Psi^{U}_1$ and $\Phi^{U}_1$ are publicly transmitted to both Bob and Eve. Note that, unlike in the random binning proof, the use of polarization forces us to distinguish the part $\Psi^{U}_1$ that is nearly uniform from the part $\Phi^{U}_1$ that is not. We show later that the rate cost of this additional transmission is negligible. We also write $O_{1} \triangleq [{O}_{1,1}, {O}_{1,2}]$, where ${O}_{1,1}\triangleq \widetilde{A}_1^{1:N} [\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}]$ and ${O}_{1,2}\triangleq \widetilde{A}_1^{1:N} [\mathcal{I}_{UY} \backslash \mathcal{I}_{UZ} ]$. We will retransmit ${O}_{1,2}$ in the next block following the same strategy as in \cite{Mondelli14b}. Finally, we compute $\widetilde{U}_1^{1:N} \triangleq \widetilde{A}_1^{1:N} G_n$. \smallskip In {Block} ${i \in \llbracket 2,k-1\rrbracket}$, the encoder forms $\widetilde{A}_1^{1:N}$ as follows. Let $O_i$ be a vector of $|\mathcal{I}_{UY}|$ uniformly distributed information bits representing the common message in that block. Upon observing the realizations ${o}_i$ and $o_{i-1}$, the encoder draws $\widetilde{a}_i^{1:N}$ from the distribution $\widetilde{p}_{A_i^{1:N}}$ defined as follows. \begin{equation} \label{eq_sim_A_i} \widetilde{p}_{{A}_i^j|{A}_i^{1:j-1}} ({a}_i^j|{a}_i^{1:j-1}) \triangleq \begin{cases} \mathds{1} \left\{ a_i^j = {o_i^j} \right\} & \text{ if } j \in \mathcal{I}_{UY}\\ \mathds{1} \left\{ a_i^j = {o_{i-1,2}^j} \right\} & \text{ if } j \in \mathcal{A}_{UYZ} \\ \mathds{1} \left\{ a_i^j = (\psi^{U}_1)^j\right\} & \text{ if } j \in {\mathcal{V}}_{U} \backslash (\mathcal{I}_{UY} \cup \mathcal{A}_{UYZ}) \\ {p}_{A^j|A^{1:j-1}} (a_i^j|a_i^{1:j-1}) & \text{ if }j\in {\mathcal{V}}_{U}^c \end{cases}, \end{equation} where the components of $o_i$, ${o_{i-1,2}}$, and $\psi^{U}_1$, have been indexed by the set of indices $\mathcal{I}_{UY}$, $\mathcal{A}_{UYZ}$, and $\mathcal{V}_{U} \backslash (\mathcal{I}_{UY} \cup \mathcal{A}_{UYZ})$, respectively. Consequently, note that $${O}_i = \widetilde{A}_i^{1:N} [\mathcal{I}_{UY}] \text{ and }O_{i-1,2} = \widetilde{A}_i^{1:N} [\mathcal{A}_{UYZ}].$$ The random bits that identify the codebook and that are required to reconstruct $\widetilde{A}_i^{1:N}$ are $\widetilde{A}_i^{1:N} [\mathcal{H}_{U|Y}]$ for Bob and $\widetilde{A}_i^{1:N}[ \mathcal{H}_{U|Z}]$ for Eve. Parts of these bits depend on messages in previous blocks. For the others, we define \begin{align*} \Psi^{U}_i &\triangleq \widetilde{A}_i^{1:N}[{\mathcal{V}}_{U} \backslash (\mathcal{I}_{UY} \cup \mathcal{A}_{UYZ})],\\ \Phi^{U}_i &\triangleq \widetilde{A}_i^{1:N}[ ({\mathcal{H}}_{U|Y} \cup {\mathcal{H}}_{U|Z}) \backslash {\mathcal{V}}_{U}]. \end{align*} Note that the bits in $\Psi^{U}_i$ are reusing the bits in $\Psi^{U}_1$; however, it is necessary to make the bits $\Phi^{U}_i$ available to both Bob and Eve, to enable the reconstruction of $O_i$. We show later that this entails a negligible rate cost. Finally, we write ${O}_{i} \triangleq [{O}_{i,1}, {O}_{i,2}]$, where ${O}_{i,1}\triangleq \widetilde{A}_i^{1:N} [\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}]$ and ${O}_{i,2}\triangleq\widetilde{A}_i^{1:N} [\mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}]$, and we retransmit ${O}_{i,2}$ in the next block, We finally compute $\widetilde{U}_i^{1:N} \triangleq \widetilde{A}_i^{1:N} G_n$. \smallskip Finally, the encoder forms $\widetilde{A}_k^{1:N}$ in {Block} ${k}$, as follows. Let $O_k$ be a vector of $|\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}|$ uniformly distributed bits representing the common message in that block. Given realizations ${o}_k$ and $o_{k-1}$, the encoder samples $\widetilde{a}_k^{1:N}$ from the distribution $\widetilde{p}_{A_k^{1:N}}$ defined as follows. \begin{equation} \label{eq_sim_A_k} \widetilde{p}_{{A}_k^j|{A}_k^{1:j-1}} ({a}_k^j|{a}_k^{1:j-1}) \triangleq \begin{cases} \mathds{1} \left\{ a_k^j = {o_k^j} \right\} & \text{ if } j \in \mathcal{I}_{UY} \cap \mathcal{I}_{UZ}\\ \mathds{1} \left\{ a_k^j = {o_{k-1,2}^j} \right\} & \text{ if } j \in \mathcal{A}_{UYZ} \\ \mathds{1} \left\{ a_k^j = (\psi^{U}_1)^j\right\} & \text{ if }j \in {\mathcal{V}}_{U} \backslash ( \mathcal{A}_{UYZ} \cup(\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}) ) \\ {p}_{A^j|A^{1:j-1}} (a_k^j|a_k^{1:j-1}) & \text{ if }j\in {\mathcal{V}}_{U}^c \end{cases}, \end{equation} where the components of $o_k$, ${o_{k-1,2}}$, and $\psi^{U}_1$ have been indexed by the set of indices $\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}$, $\mathcal{A}_{UYZ}$, and ${\mathcal{V}}_{U} \backslash ( \mathcal{A}_{UYZ} \cup(\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}) )$, respectively. Consequently, $${O}_k = \widetilde{A}_k^{1:N} [\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}], \text{ }O_{k-1,2} = \widetilde{A}_k^{1:N} [\mathcal{A}_{UYZ}].$$ The random bits that identify the codebook and that are required to reconstruct $\widetilde{A}_k^{1:N}$ are $\widetilde{A}_k^{1:N} [\mathcal{H}_{U|Y}]$ for Bob and $\widetilde{A}_k^{1:N}[ \mathcal{H}_{U|Z}]$ for Eve. Parts of these bits depend on messages in previous blocks. For the others, we define \begin{align*} \Psi^{U}_k &\triangleq \widetilde{A}_k^{1:N}[{\mathcal{V}}_{U} \backslash ( \mathcal{A}_{UYZ} \cup(\mathcal{I}_{UY} \cap \mathcal{I}_{UZ}) )],\\ \Phi^{U}_k &\triangleq \widetilde{A}_k^{1:N}[({\mathcal{H}}_{U|Y} \cup {\mathcal{H}}_{U|Z}) \backslash {\mathcal{V}}_{U}], \end{align*} and note that $\Psi^{U}_k$ merely reuses the bits of $\Psi^{U}_1$. $\Phi^{U}_k$ is made available to both Bob and Eve to help them reconstruct $O_k$, but this incurs a negligible rate cost.\smallskip The public transmission of $(\Psi^{U}_1,\Phi^{U}_{1:k})$ to perform the reconstruction of the common message is taken into account in the secrecy analysis in Section~\ref{sec:analys-polar-coding}. \subsection{Secret and private message encoding} \label{sec:secr-priv-mess} In addition to the polarization set defined in~\eqref{eq:private_msg_sets_1}-\eqref{eq:private_msg_sets_5}, we also define \begin{align*} \label{eq:1} \mathcal{B}_{V|UY}&\eqdef \text{a fixed subset of $\mathcal{V}_{V|UZ}$ with size $|{\mathcal{V}}_{V|UY} \cup (({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}))|$}\\ \mathcal{M}_{UVZ}&\triangleq \mathcal{V}_{V|U}\backslash \mathcal{V}_{V|UZ}. \end{align*} The encoding procedure with chaining is summarized in Fig.~\ref{fig_btilde}. \begin{figure} \centering \includegraphics[width=14.5cm]{fig_btilde.pdf} \caption{Chaining for the encoding of the $\widetilde{B}_i^{1:N}$'s, which corresponds to the encoding of the private and confidential messages.} \label{fig_btilde} \end{figure} In Block ${1}$, the encoder forms $\widetilde{V}_1^{1:N}$ as follows. Let ${S}_1$ be a vector of $|\mathcal{V}_{V|UZ}|$ uniformly distributed bits representing the secret message and let ${M}_1$ be a vector of $|\mathcal{M}_{UVZ}|$ uniformly distributed bits representing the private message to be reconstructed by Bob. Given a confidential message ${s}_1$, a private message $m_1$, and $\widetilde{u}_1^{1:N}$ resulting from the encoding of the common message, the encoder samples $\widetilde{b}_1^{1:N}$ from the distribution $\widetilde{p}_{B_1^{1:N}}$ defined as follows. \begin{equation} \label{eq_sim_Bv_1} \widetilde{p}_{{B}_1^j|{B}_1^{1:j-1}U_1^{1:N}} ({b}_1^j|{b}_1^{1:j-1} \widetilde{u}_1^{1:N}) \triangleq \begin{cases} \mathds{1} \left\{ b_1^j = {s}_1^j \right\} & \text{ if } j \in \mathcal{V}_{V|UZ}\\ \mathds{1} \left\{ b_1^j = {m}_1^j \right\} & \text{ if } j \in \mathcal{M}_{UVZ}\\ {p}_{B^j|B^{1:j-1}U^{1:N}} (b_1^j|b_1^{1:j-1} \widetilde{u}_1^{1:N}) & \text{ if }j\in {\mathcal{V}}_{V|U}^c \end{cases}, \end{equation} where the components of $s_1$ and $m_1$ have been indexed by the set of indices $\mathcal{V}_{V|UZ}$ and $\mathcal{M}_{UVZ}$, respectively. Consequently, note that $S_1 = \widetilde{B}_1^{1:N} [\mathcal{V}_{V|UZ}] \text{ and }M_1 = \widetilde{B}_1^{1:N} [\mathcal{M}_{UVZ}]$. The random bits that identify the codebook required for reconstruction are those in positions ${\mathcal{H}}_{V|UY}$, which we split as \begin{align*} &\Psi^{V|U}_1 \triangleq \widetilde{B}_1^{1:N}[{\mathcal{V}}_{V|UY} \cup (({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}))] ,\\ &\Phi^{V|U}_1 \triangleq \widetilde{B}_1^{1:N}[({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}^c]. \end{align*} Note that $\Psi^{V|U}_1$ is uniformly distributed but $\Phi^{V|U}_1$ is not. Consequently, we may reuse $\Psi^{V|U}_1$ in the next block but we cannot reuse $\Phi^{V|U}_1$. We instead share $\Phi^{V|U}_1$ secretly between Alice and Bob and we show later that this may be accomplished with negligible rate cost. Finally, define $\widetilde{V}_1^{1:N} \triangleq \widetilde{B}_1^{1:N} G_n$ \smallskip In Block ${i \in \llbracket 2,k\rrbracket}$, the encoder forms $\widetilde{V}_i^{1:N}$ as follows. Let $S_i$ be a vector of $|\mathcal{V}_{V|UZ} \backslash \mathcal{B}_{V|UY}|$ uniformly distributed bits and $M_i$ be a vector of $|\mathcal{M}_{UVZ}|$ uniformly distributed bits that represent the secret and private message in block $i$, respectively. Given a private message $m_i$, a confidential message $s_i$, $\psi^{V|U}_{i-1}$, and $\widetilde{u}_{i}^{1:N}$ resulting from the encoding of the common message, the encoder draws $\widetilde{b}_i^{1:N}$ from the distribution $\widetilde{p}_{B_i^{1:N}}$ defined as follows. \begin{equation}\label{defsimBi} \widetilde{p}_{{B}_i^j|{B}_i^{1:j-1} U_i^{1:N}} ({b}_i^j|{b}_i^{1:j-1}\widetilde{u}_{i}^{1:N}) \triangleq \begin{cases} \mathds{1} \left\{ b_i^j = s_i^j \right\} & \text{ if } j \in \mathcal{V}_{V|UZ} \backslash \mathcal{B}_{V|UY}\\ \mathds{1} \left\{ b_i^j = \left(\psi^{V|U}_{i-1}\right)^j \right\} & \text{ if } j \in \mathcal{B}_{V|UY}\\ \mathds{1} \left\{ b_i^j = m_{i}^j \right\} & \text{ if } j \in \mathcal{M}_{UVZ} \\ {p}_{B^j|B^{1:j-1}U^{1:N}} (b_1^j|b_1^{1:j-1}\widetilde{u}_{i}^{1:N}) & \text{ if }j\in {\mathcal{V}}_{V|U}^c \end{cases}, \end{equation} where the components of $s_i$, $\psi^{V|U}_{i-1}$, and $m_i$ have been indexed by the set of indices $\mathcal{V}_{V|UZ} \backslash \mathcal{B}_{V|UY}$, $ \mathcal{B}_{V|UY}$, and $\mathcal{M}_{UVZ}$ respectively, so that $S_i = \widetilde{B}_i^{1:N} [\mathcal{V}_{V|UZ} \backslash \mathcal{B}_{V|UY}]$, $\Psi^{V|U}_{i-1} = \widetilde{B}_i^{1:N} [ \mathcal{B}_{V|UY}]$, and $M_i = \widetilde{B}_i^{1:N} [\mathcal{M}_{UVZ}]$. The random bits that identify the codebook required for reconstruction are those in positions ${\mathcal{H}}_{U|VY}$, which we split as \begin{align*} &\Psi^{V|U}_i \triangleq \widetilde{B}_i^{1:N}[{\mathcal{V}}_{V|UY} \cup (({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}))] ,\\ &\Phi^{V|U}_i \triangleq \widetilde{B}_i^{1:N}[({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}^c]. \end{align*} Again, $\Psi^{V|U}_i$ is uniformly distributed but $\Phi^{V|U}_i$ is not, so that we reuse $\Psi^{V|U}_i$ in the next block but we share $\Phi^{V|U}_i$ securely between Alice and Bob. We show later that the cost of sharing $\Phi^{V|U}_i$ is negligible. In Block $k$, Alice securely shares $(\Psi^{V|U}_{k},\Phi^{V|U}_{1:k})$ with Bob. Finally, define $\widetilde{V}_i^{1:N} \triangleq \widetilde{B}_i^{1:N} G_n$. \subsection{Channel prefixing} \label{sec:channel-prefixing} The channel prefixing procedure with chaining is illustrated in Fig.~\ref{fig_ttilde}. \begin{figure} \centering \includegraphics[width=12cm]{fig_ttilde.pdf} \caption{Chaining for the encoding of the $\widetilde{T}_i^{1:N}$'s, which corresponds to channel prefixing.} \label{fig_ttilde} \end{figure} \smallskip In Block ${1}$, the encoder forms $\widetilde{X}_1^{1:N}$ as follows. Let ${R}_1$ be a vector of $| \mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}|$ uniformly distributed bits representing the randomness required for channel prefixing. Given a randomization sequence $r_1$ and $\widetilde{v}_1^{1:N}$ resulting from the encoding of secret and private messages, the encoder draws $\widetilde{t}_1^{1:N}$ from the distribution $\widetilde{p}_{T_1^{1:N}}$ defined as follows. \begin{equation} \label{defsimT_1} \widetilde{p}_{T_1^j|T_1^{1:j-1}V_1^{1:N}} (t_1^j|t_1^{1:j-1}\widetilde{v}_1^{1:N}) \triangleq \begin{cases} 1/2 & \text{ if }j \in \mathcal{V}_{X|VZ}\\ \mathds{1} \left\{ t_1^j = r_{1}^j \right\}&\text{ if }j\in \mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}\\ {p}_{T^j|T^{1:j-1}V^{1:N}} (t_1^j|t_1^{1:j-1}\widetilde{v}_1^{1:N}) & \text{ if }j\in \mathcal{V}_{X|V}^c \end{cases}, \end{equation} where the components of $r_1$ have been indexed by the set of indices $\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}$, so that $R_1 = \widetilde{T}_i^{1:N}[\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}]$. The random bits that identify the codebook are those in position ${\mathcal{V}}_{X|VZ}$, which we denote \begin{align*} \Psi^{X|V}_1 \eqdef \widetilde{T}_1^{1:N}[\mathcal{V}_{X|VZ}]. \end{align*} Finally, compute $\widetilde{X}_1^{1:N} \triangleq \widetilde{T}_1^{1:N} G_n$, which is transmitted over the channel $W_{YZ|X}$. We note $Y_1^{1:N}$, $Z_1^{1:N}$ the corresponding channel outputs. In {Block} ${i \in \llbracket 2,k\rrbracket}$, the encoder forms $\widetilde{X}_i^{1:N}$ as follows. Let ${R}_i$ be a vector of $| \mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}|$ uniformly distributed bits representing the randomness required for channel prefixing in block $i$. Given a randomization sequence $r_i$ and $\widetilde{v}_i^{1:N}$ resulting from the encoding of secret and private messages, the encoder draws $\widetilde{t}_i^{1:N}$ from the distribution $\widetilde{p}_{T_i^{1:N}}$ defined as follows. \begin{equation} \label{defsimTi} \widetilde{p}_{T^j_i|T_i^{1:j-1}V_i^{1:N}} (t_i^j|t_i^{1:j-1}\widetilde{v}_i^{1:N}) \triangleq \begin{cases} \mathds{1} \left\{ t^j_i = \widetilde{t}^j_{i-1} \right\} & \text{ if } j\in \mathcal{V}_{X|VZ} \\ \mathds{1} \left\{ t_i^j = r_{i}^j \right\} & \text{ if }j \in \mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}\\ {p}_{T^j|T^{1:j-1}V^{1:N}} (t_i^j|t_i^{1:j-1}\widetilde{v}_i^{1:N}) & \text{ if }j \in \mathcal{V}_{X|V}^c \end{cases}, \end{equation} where the components of $r_i$ have been indexed by the set of indices $\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}$, so that $R_i = \widetilde{T}_i^{1:N}[\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}]$. Note that the random bits describing the codebook are $ \Psi^{X|V}_{i} \eqdef \widetilde{T}_i^{1:N}[\mathcal{V}_{X|VZ}]$, and are reused from the previous block. Finally, define $\widetilde{X}_i^{1:N} \triangleq \widetilde{T}_i^{1:N} G_n$ and transmit it over the channel $W_{YZ|X}$. We note $Y_i^{1:N}$, $Z_i^{1:N}$ the corresponding channel outputs. \subsection{Decoding} \label{sec:decoding} The decoding procedure is as follows. \noindent \textbf{Reconstruction of the common message by Bob.} Bob forms the estimate $\widehat{A}_{1:k}^{1:N}$ of $\widetilde{A}_{1:k}^{1:N}$ as follows. In Block 1, Bob knows $(\Psi^U_1,\Phi^U_1)$, which contains all the bits $\widetilde{A}_1^{1:N} [ \mathcal{H}_{U|Y}]$ by construction. Bob runs the successive cancellation decoder for source coding with side information of~\cite{Arikan10} using $Y_1^{1:N}$ and $\widetilde{A}_1^{1:N} [ \mathcal{H}_{U|Y}]$. In Block $i \in \llbracket 2, k\rrbracket$, Bob estimates $\widetilde{A}_i^{1:N} [ \mathcal{H}_{U|Y}]$ with $(\Psi^U_1,\widehat{A}_{i-1}^{1:N}[ \mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}], \Phi^U_i)$, and uses this estimate along with $Y_i^{1:N}$ to run the successive cancellation decoder for source coding with side information.\smallski \noindent\textbf{Reconstruction of the common message by Eve.} Eve forms the estimate $\widehat{\widehat{A}}_{1:k}^{1:N}$ of $\widetilde{A}_{1:k}^{1:N}$ starting from Block $k$ and going backwards as follows. In Block $k$, Eve knows $(\Psi^U_k,\Phi^U_k)$, which contains all the bits in $ \widetilde{A}_k^{1:N} [ \mathcal{H}_{U|Z}]$ by construction. Eve runs the successive cancellation decoder for source coding with side information using $Z_k^{1:N}$ and $\widetilde{A}_k^{1:N} [ \mathcal{H}_{U|Z}]$. For $i \in \llbracket 1, k-1 \rrbracket$, Eve estimates $\widetilde{A}_{k-i}^{1:N}[ \mathcal{H}_{U|Z} ] $ with $(\Psi^U_1,\smash{\widehat{\widehat{A}}}_{k-i+1}^{1:N}[ \mathcal{A}_{UYZ} ], \Phi^U_{k-i} )$, and uses this estimate along with $Z_{k-i}^{1:N}$ to run the successive cancellation decoder for source coding with side information.\smallskip \noindent\textbf{Reconstruction of the private and confidential messages by Bob.} Bob forms the estimate $\widehat{B}_{1:k}^{1:N}$ of $\widetilde{B}_{1:k}^{1:N}$ as follows starting with Block $k$. In Block $k$, given $(\Psi^{V|U}_{k},\Phi^{V|U}_{k} , Y_{k}^{1:N}, \widehat{U}_{k}^{1:N}) $, Bob estimates $\widetilde{B}_k^{1:N}$ with the successive cancellation decoder for source coding with side information. From $\widetilde{B}_k^{1:N}$, an estimate $\widehat{\Psi}^{V|U}_{k-1}\eqdef\widehat{B}_k^{1:N} [{\mathcal{V}}_{V|UY}] $ of $\Psi^{V|U}_{k-1}$ is formed. For $i \in \llbracket 1,k-1 \rrbracket$, given $(\widehat{\Psi}^{V|U}_{k-i}, \Phi^{V|U}_{k-i} , Y_{k-i}^{1:N}, \widehat{U}_{k-i}^{1:N}) $, Bob estimates $\widetilde{B}_{k-i}^{1:N}$ with the successive cancellation decoder for source coding with side information. From $\widetilde{B}_{k-i}^{1:N}$, an estimate of ${\Psi}^{V|U}_{k-i-1}$ is formed. Once all the estimates $\widehat{B}_{1:k}^{1:N}$ have been formed, Bob extracts the estimates $\widehat{S}_{1:k}$ and $\widehat{M}_{1:k}$ of $S_{1:k}$ and $M_{1:k}$, respectively. \section{Analysis of Polar coding scheme} \label{sec:analys-polar-coding} We now analyze in details the characteristics and performances of the polar coding scheme described in Section~\ref{sec:polar-coding-schem}. Specifically, we show the following. \begin{thm} \label{Thprep} Consider a discrete memoryless broadcast channel $(\mathcal{X}, p_{YZ|X}, \mathcal{Y},\mathcal{Z})$. The coding scheme of Section~\ref{Sec_CS}, whose complexity is $O(N \log N)$ achieves the region $\mathcal{R}_{\textup{BCC}}$. \end{thm} The result of Theorem~\ref{Thprep}, follows in four steps. First, we show that the polar coding scheme of Section~\ref{sec:polar-coding-schem} approximates the statistics of the original \ac{DMS} $({\mathcal{U}}\times{\mathcal{V}}\times{\mathcal{X}}\times{\mathcal{Y}}\times{\mathcal{Z}},p_{UVXYZ})$ from which the polarization sets were defined. Second, we show that the various messages rates are indeed those in $\mathcal{R}_{\textup{BCC}}$. Third, we show that the probability of decoding error vanishes with the block length. Finally, we show that the information leakage vanishes with the block length. \subsection{Approximation of original \ac{DMS} statistics} \label{sec:appr-stat} Recall that the vectors $\widetilde{A}_i^{1:N}$, $\widetilde{B}_i^{1:N}$, $\widetilde{V}_i^{1:N}$, and $\widetilde{X}_i^{1:N}$, generated in block $i \in \llbracket 1,k\rrbracket$ do not have the exact joint distribution of the vectors ${A}^{1:N}$, ${B}^{1:N}$, ${V}^{1:N}$, and ${X}^{1:N}$, induced by the source polarization of the original \ac{DMS} $({\mathcal{U}}\times{\mathcal{V}}\times{\mathcal{X}}\times{\mathcal{Y}}\times{\mathcal{Z}},p_{UVXYZ})$. However, the following lemmas show that the joint distributions are close to one another, which is crucial for the subsequent reliability and secrecy analysis. \begin{lem} \label{lem_dist_A} For $i \in \llbracket 1,k \rrbracket$, we have \begin{align*} \mathbb{D}(p_{U^{1:N}}, \widetilde{p}_{U_i^{1:N}})=\mathbb{D}(p_{A^{1:N}}, \widetilde{p}_{A_i^{1:N}}) & \leq N \delta_N. \end{align*} Hence, by Pinsker's inequality \begin{align*} \mathbb{V}(p_{A^{1:N}}, \widetilde{p}_{A_i^{1:N}}) & \leq \delta_N^{(U)}, \end{align*} where $\delta_N^{(U)} \triangleq \sqrt{2\log 2} \sqrt{ N \delta_N }.$ \end{lem} \begin{proof} See Appendix \ref{App_lem_dist_A}. \end{proof} \begin{lem} \label{lem_dist_BV} For $i \in \llbracket 1,k \rrbracket$, we have \begin{align*} \mathbb{D}(p_{V^{1:N}U^{1:N}} || \widetilde{p}_{V_i^{1:N}U_i^{1:N}}) =\mathbb{D}(p_{B^{1:N}U^{1:N}} || \widetilde{p}_{B_i^{1:N}U_i^{1:N}}) & \leq 2 N \delta_N. \end{align*} Hence, by Pinsker's inequality \begin{align*} \mathbb{V}(p_{B^{1:N}U^{1:N}}, \widetilde{p}_{B_i^{1:N}U_i^{1:N}}) & \leq \delta_N^{(UV)}, \end{align*} where $\delta_N^{(UV)} \triangleq 2\sqrt{\log 2} \sqrt{ N \delta_N }.$ \end{lem} \begin{proof} See Appendix \ref{App_lem_dist_BV}. \end{proof} \begin{lem} \label{lemprefix} For $i \in \llbracket 1,k \rrbracket$, we have \begin{align*} \mathbb{D}(p_{X^{1:N}V^{1:N}} || \widetilde{p}_{X_i^{1:N}V_i^{1:N}}) = \mathbb{D}(p_{T^{1:N}V^{1:N}} || \widetilde{p}_{T_i^{1:N}V_i^{1:N}}) & \leq 3 N \delta_N. \end{align*} Hence, by Pinsker's inequality \begin{align*} \mathbb{V}(p_{X^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}V_i^{1:N}}) & \leq \delta_N^{(XV)}, \end{align*} where $\delta_N^{(XV)} \triangleq \sqrt{2\log 2} \sqrt{ 3 N \delta_N }.$ \end{lem} \begin{proof} See Appendix \ref{App_lemprefix}. \end{proof} Combining the three previous lemmas, we obtain the following. \begin{lem} \label{lemdist_joint} For $i \in \llbracket 1,k \rrbracket$, we have \begin{align*} \mathbb{V}(p_{U^{1:N}V^{1:N}X^{1:N}Y^{1:N}Z^{1:N}}, \widetilde{p}_{U_i^{1:N}V_i^{1:N}X_i^{1:N}Y_i^{1:N}Z_i^{1:N}}) & \leq \delta_N^{(P)}. \end{align*} where $ \delta_N^{(P)} \triangleq \sqrt{2\log 2} \sqrt{N \delta_N}(2 \sqrt{ 2 } + \sqrt{3}).$ \end{lem} \begin{proof} See Appendix \ref{App_lemdist_joint}. \end{proof} As noted in \cite{Goela13}, upper-bounding the divergence with a chain rule is easier than directly upper-bounding the variational distance as in \cite{Korada10,Honda13}. \subsection{Transmission rates} \label{sec:coding-rates} We now analyze the rate of common message, confidential message, private message, and randomization sequence, used at the encoder, as well as the different sum rates and the rate of additional information sent to Bob and Eve. \noindent\textbf{Common message rate.} The overall rate $R_O$ of common information bits transmitted satisfies \begin{align*} R_O & = \frac{ (k-1)|\mathcal{I}_{UY}|+ |\mathcal{I}_{UY}\cap \mathcal{I}_{UZ}|} {kN} \\ & = \frac{|\mathcal{I}_{UY}|}{N} - \frac{ |\mathcal{I}_{UY}\backslash \mathcal{I}_{UZ}|} {kN} \\ & \geq \frac{|\mathcal{I}_{UY}|}{N} - \frac{ |\mathcal{I}_{UY}|} {kN} \\ & \xrightarrow{N \to \infty} I(Y;U) - \frac{ I(Y;U)} {k}\\ & \xrightarrow{k \to \infty} I(Y;U), \end{align*} where we have used \cite{Arikan10}. Since we also have $R_O \leq \frac{|\mathcal{I}_{UY}|}{N} \xrightarrow{N \to \infty} I(Y;U)$, we conclude $$ R_O \xrightarrow{N \to \infty, k \to \infty} I(Y;U). $$ \noindent\textbf{Confidential message rate.} First, observe that \begin{align*} |\Psi^{V|U}_1| & = |{\mathcal{V}}_{V|UY} \cup (({\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY})\cap {\mathcal{V}}_{V|U}))| \\ & \leq | \mathcal{V}_{V|UY}| + |\mathcal{H}_{V|UY} \backslash \mathcal{V}_{V|UY}|\\ & = | \mathcal{V}_{V|UY}| + |\mathcal{H}_{V|UY} | - |\mathcal{V}_{V|UY}| \\ & \leq |\mathcal{H}_{V|UY} |, \end{align*} and $|\Psi^{V|U}_1| \geq |\mathcal{V}_{V|UY}|$. Hence, since $\lim_{N \to \infty} |\mathcal{V}_{V|UY} |/N =H(V|UY)$ by \cite[Lemma 1]{Chou14rev} and\\ $\lim_{N \to \infty} |\mathcal{H}_{V|UY} |/N =H(V|UY)$ by \cite{Arikan10}, we have $$ \lim_{N \to \infty} \frac{|\Psi^{V|U}_1|}{N} =H(V|UY). $$ Then, the overall rate $R_S$ of secret information bits transmitted is \begin{align*} R_S = & \frac{ |\mathcal{V}_{V|UZ}|+ (k-1) |\mathcal{V}_{V|UZ} \backslash \mathcal{B}_{V|UY}|} {kN} \\ & = \frac{ |\mathcal{V}_{V|UZ}|+ (k-1) (|\mathcal{V}_{V|UZ} |- | \mathcal{B}_{V|UY}|)} {kN} \\ & = \frac{ |\mathcal{V}_{V|UZ} |- | \mathcal{B}_{V|UY}|} {N} + \frac{ |\mathcal{B}_{V|UY}|}{kN} \\ &= \frac{ |\mathcal{V}_{V|UZ} |- | \Psi^{V|U}_1|} {N} + \frac{ |\Psi^{V|U}_1|}{kN} \\ & \xrightarrow{N \to \infty} I(V;Y|U) - I(V;Z|U) + \frac{ H(V|UY)}{k}\\ & \xrightarrow{k \to \infty} I(V;Y|U) - I(V;Z|U) . \end{align*} \noindent\textbf{Private message rate.} The overall rate $R_M$ of private information bits transmitted is \begin{align*} R_M &= \frac{ k |\mathcal{M}_{UVZ}|} {kN} \\ &= \frac{ |\mathcal{V}_{V|U} \backslash \mathcal{V}_{V|UZ}|} {N} \\ &= \frac{ |\mathcal{V}_{V|U} |-| \mathcal{V}_{V|UZ}|} {N} \\ & \xrightarrow{N \to \infty} I(V;Z|U) , \end{align*} where we have used \cite[Lemma 1]{Chou14rev}. \noindent\textbf{Randomization rate.} The uniform random bits used in the stochastic encoder includes those of the randomization sequence for channel prefixing, as well as those required to identify the codebooks and run the successive cancellation encoding. Using~\cite[Lemma 1]{Chou14rev}, we find that the rate required to identify the codebook for the common message is \begin{align*} \frac{ |\mathcal{V}_{U} \backslash \mathcal{I}_{UY}|} {kN} \leq \frac{ |\mathcal{V}_{U} |} {kN} \xrightarrow{N \to \infty} \frac{H(U|Y)}{k}\xrightarrow{k \to \infty} 0. \end{align*} Similarly, the rate required to identify the codebook for the secret and private messages corresponds to the rate of $(\Psi^{V|U}_{k},\Phi^{V|U}_{k})$, which is transmitted to Bob to allow him to reconstruct $\widetilde{B}_{1:k}^{1:N}$, \begin{align*} & \frac{ |(\Psi^{V|U}_{k},\Phi^{V|U}_{k})|} {kN} \\ & = \frac{ |\widetilde{B}_{k}^{1:N}[\mathcal{H}_{V|UY}]|} {kN} \\ & \xrightarrow{N \to \infty} \frac{ H(V|UY)}{k} \\ & \xrightarrow{k \to \infty} 0, \end{align*} where we have used \cite{Arikan10}. The randomization sequence rate used in channel prefixing is \begin{align*} & \frac{ |\mathcal{V}_{X|V}| + (k-1) |\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}|} {kN} \\ & = \frac{ |\mathcal{V}_{X|V} \backslash \mathcal{V}_{X|VZ}|} {N} + \frac{ |\mathcal{V}_{X|VZ}|} {kN} \\ & = \frac{ |\mathcal{V}_{X|V}|- | \mathcal{V}_{X|VZ}|} {N} + \frac{ |\mathcal{V}_{X|VZ}|} {kN} \\ & \xrightarrow{N \to \infty} I(X;Z|V) + \frac{ H(X|VZ)} {k}, \\ & \xrightarrow{k \to \infty} I(X;Z|V), \end{align*} where we have used \cite[Lemma 1]{Chou14rev}. We now show that the rate of uniform bits required for successive cancellation encoding in (\ref{eq_sim_A_1}), (\ref{eq_sim_A_i}), (\ref{eq_sim_A_k}), (\ref{eq_sim_Bv_1}), (\ref{defsimBi}), (\ref{defsimT_1}), (\ref{defsimTi}) is negligible trough a series of lemmas. \begin{lem} \label{lemrandA} For $i \in \llbracket 1 , k \rrbracket$, we have $$\lim_{N \to \infty} \frac{1}{N}\sum_{j \in \mathcal{V}_{U}^c} H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i) = 0.$$ \end{lem} \begin{proof} See Appendix \ref{App_lemrandA}. \end{proof} \begin{lem} \label{lemrandB} For $i \in \llbracket 1 , k \rrbracket$, we have $$\lim_{N \to \infty} \frac{1}{N}\sum_{j \in \mathcal{V}_{V|U}^c} H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i) = 0.$$ \end{lem} \begin{proof} See Appendix \ref{App_lemrandB}. \end{proof} \begin{lem} \label{lemrand} For $i \in \llbracket 1 , k \rrbracket$, we have $$\lim_{N \to \infty} \frac{1}{N}\sum_{j \in \mathcal{V}_{X|V}^c} H(\widetilde{T}^{j}_i|\widetilde{T}^{1:j-1}_i \widetilde{V}^{1:N}_i) = 0.$$ \end{lem} The proof of Lemma~\ref{lemrand} is similar to that of Lemma~\ref{lemrandB} using Lemma~\ref{lemprefix} in place of Lemma~\ref{lem_dist_BV}. \smallskip Hence, the overall randomness rate $R_R$ used at the encoder is asymptotically $$R_R \xrightarrow{N \to \infty,k \to \infty} I(X;Z|V).$$ \noindent\textbf{Sum rates}. The sum of the private message rate $R_M$ and the randomness rate $R_R$ is asymptotically \begin{align*} R_M +R_R \xrightarrow{N \to \infty, k \to \infty} & I(V;Z|U) + I(X;Z|V) \\ & \stackrel{(a)}{=} H(Z|U) - H(Z|UV) + H(Z|V) - H(Z|XV)\\ & = H(Z|U) - H(Z|XV)\\ & \stackrel{(b)}{=} H(Z|U) - H(Z|XU)\\ & = I(X;Z|U), \end{align*} where $(a)$ and $(b)$ hold by $U - V - X - Z$. Moreover, the sum of the common message rate $R_O$, the private message rate $R_M$, and the confidential message rate $R_S$ is asymptotically \begin{align*} R_O + R_M +R_S \xrightarrow{N \to \infty, k \to \infty} & I(Y;U) + I(V;Z|U) + I(V;Y|U) - I(V;Z|U)\\ & = I(Y;U) + I(V;Y|U) . \end{align*} \noindent\textbf{Seed Rate.} The rate of the secret sequence that must be shared between the legitimate users to initialize the coding scheme is \begin{align*} &\frac{ |\Psi^{V|U}_k| + k|\Phi^{V|U}_1| }{kN}\\ & = \frac{ |\Psi^{V|U}_k| }{kN} + \frac{ |\Phi^{V|U}_1| }{N}\\ & \leq \frac{ |{\mathcal{H}}_{V|UY}| }{kN} + \frac{ |{\mathcal{H}}_{V|UY} \backslash {\mathcal{V}}_{V|UY}| }{N} \\ & \leq \frac{ |{\mathcal{H}}_{V|UY}| }{kN} + \frac{ |{\mathcal{H}}_{V|UY}| - |{\mathcal{V}}_{V|UY}| }{N} \\ & \xrightarrow{N \to \infty} \frac{H(V|Y)}{k} \\ &\xrightarrow{k \to \infty} 0, \end{align*} where we have used \cite[Lemma 1]{Chou14rev} and \cite{Arikan10}. Moreover the rate of public communication from Alice to both Bob and Eve is \begin{align*} &\frac{ |\Psi^{U}_1| + |\Phi^{U}_{1:k}| }{kN}\displaybreak[0]\\ & \leq \frac{ |\Psi^{U}_1| + k|{\mathcal{H}}_{U} \backslash {\mathcal{V}}_{U}| }{kN}\displaybreak[0]\\ & = \frac{ |\mathcal{V}_{U} \backslash \mathcal{I}_{UY}| + k(|{\mathcal{H}}_{U} | - | {\mathcal{V}}_{U}| )}{kN}\displaybreak[0]\\ & \leq \frac{ |\mathcal{H}_{U|Y}| + k(|{\mathcal{H}}_{U} | - | {\mathcal{V}}_{U}|) }{kN}\displaybreak[0]\\ & = \frac{ |\mathcal{H}_{U|Y}| }{kN} + \frac{ |{\mathcal{H}}_{U} | - | {\mathcal{V}}_{U}| }{N}\displaybreak[0]\\ & \xrightarrow{N \to \infty} \frac{H(U|Y)}{k} \displaybreak[0]\\ &\xrightarrow{ k \to \infty} 0. \end{align*} \subsection{Average probability of error} \label{sec:aver-prob-error} We first show that Eve and Bob can reconstruct the common messages $O^{1:N}_{1:k}$ with small probability. For $i \in \llbracket 1,k \rrbracket$, consider an optimal coupling~\cite{Aldous83,Korada10} between $\widetilde{p}_{U_i^{1:N}Y_i^{1:N}}$ and $p_{U^{1:N}Y^{1:N}}$ such that $\mathbb{P} [\mathcal{E}_{U_i,Y_i}] = \mathbb{V}(\widetilde{p}_{U_i^{1:N}Y_i^{1:N}} ,p_{U^{1:N}Y^{1:N}})$, where $\mathcal{E}_{U_i,Y_i} \triangleq \{ (\widetilde{U}_i^{1:N}, \widetilde{Y}_i^{1:N}) \neq ({U}^{1:N} , {Y}^{1:N})\}$. Define also for $i \in \llbracket 2 , k \rrbracket$, $\mathcal{E}_{i} \triangleq \{ \widehat{A}_{i-1}^{1:N} [\mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}] \neq \widetilde{A}_{i-1}^{1:N}[\mathcal{I}_{UY} \backslash \mathcal{I}_{UZ}]\}$. We have \begin{align} &\mathbb{P}[ O_{i} \neq \widehat{O}_{i}] \nonumber \\ \nonumber & = \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i}]\\ \nonumber & = \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i} |\mathcal{E}_{U_i,Y_i}^c\cap \mathcal{E}_i^c] \mathbb{P}[ \mathcal{E}_{U_i,Y_i}^c\cap \mathcal{E}_i^c] + \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i} |\mathcal{E}_{U_i,Y_i} \cup \mathcal{E}_i] \mathbb{P}[ \mathcal{E}_{U_i,Y_i}\cup \mathcal{E}_i] , \\ \nonumber & \leq \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i} |\mathcal{E}_{U_i,Y_i}^c \cap \mathcal{E}_i^c] + \mathbb{P}[ \mathcal{E}_{U_i,Y_i} \cup \mathcal{E}_i] \\ \nonumber & \stackrel{(a)}{\leq} N \delta_N + \mathbb{P}[ \mathcal{E}_{U_i,Y_i}] + \mathbb{P}[ \mathcal{E}_i] \\ \nonumber & \stackrel{(b)}{\leq} N \delta_N + \delta_N^{(P)} + \mathbb{P}[ \mathcal{E}_i]\\ \nonumber & \leq N \delta_N + \delta_N^{(P)} + \mathbb{P}[ \widehat{U}^{1:N}_{i-1} \neq \widetilde{U}^{1:N}_{i-1}]\\ \nonumber & \stackrel{(c)}{\leq} (i-1)(N \delta_N + \delta_N^{(P)} )+ \mathbb{P}[ \widehat{U}^{1:N}_{1} \neq \widetilde{U}^{1:N}_{1}] \\ & \stackrel{(d)}{\leq} i(N \delta_N + \delta_N^{(P)} ), \label{eq_err_utilde} \end{align} where $(a)$ follows from the error probability of source coding with side information \cite{Arikan10} and the union bound, $(b)$ holds by the optimal coupling and Lemma~\ref{lemdist_joint}, $(c)$ holds by induction, $(d)$ holds similarly to the previous inequalities. We thus have by the union bound and (\ref{eq_err_utilde}) \begin{align*} \mathbb{P}[ O^{1:N}_{1:k} \neq \widehat{O}^{1:N}_{1:k}] & \leq \sum_{i=1}^k \mathbb{P}[ O_{i} \neq \widehat{O}_{i}] \\ & \leq \frac{ k(k+1)}{2} (N \delta_N + \delta_N^{(P)}). \end{align*} We similarly obtain for Eve \begin{align*} \mathbb{P}[ O^{1:N}_{1:k} \neq \widehat{\widehat{O}}^{1:N}_{1:k}] & \leq \frac{ k(k+1)}{2} (N \delta_N + \delta_N^{(P)}). \end{align*} Next we show how Bob can recover the secret and private messages. Informally, the decoding process of the confidential and private messages $(M_{1:k},S_{1:k})$ for Bob is as follows. Reconstruction starts with Block $k$. Given $(\Psi^{V|U}_{k}, \Phi^{V|U}_k, Y_{k}^{1:N},\widehat{U}^{1:N}_{k}) $, Bob can reconstruct $\widetilde{V}_k^{1:N}$, from which $\Psi^{V|U}_{k-1}$ is deduced. Then, for $i \in \llbracket 1 , k-1\rrbracket$, given $(\Psi^{V|U}_{k-i}, \Phi^{V|U}_{k-i}, Y_{k-i}^{1:N},\widehat{U}^{1:N}_{k-i}) $, Bob can reconstruct $\widetilde{V}_{k-i}^{1:N}$, from which $\Psi^{V|U}_{k-i-1}$ is deduced. Finally, $S_{1:k}$ can be recovered from $\widetilde{V}_{1:k}^{1:N}$. Formally, the analysis is as follows. For $i \in \llbracket 1 , k \rrbracket$, consider an optimal coupling~\cite{Aldous83,Korada10} between $\widetilde{p}_{U_i^{1:N}V_i^{1:N}Y_i^{1:N}}$ and $p_{U^{1:N}V^{1:N}Y^{1:N}}$ such that $\mathbb{P} [\mathcal{E}_{U_i,V_i,Y_i}] = \mathbb{V}(\widetilde{p}_{U_i^{1:N}V_i^{1:N}Y_i^{1:N}} ,p_{U^{1:N}V^{1:N}Y^{1:N}})$, where $\mathcal{E}_{U_i,V_i,Y_i} \triangleq \{ (\widetilde{U}_i^{1:N}, \widetilde{V}_i^{1:N},{Y}_i^{1:N}) \neq ({U}^{1:N} , {V}^{1:N},{Y}^{1:N})\}$. Define also for $i \in \llbracket 1 , k-1 \rrbracket$, $\mathcal{E}_{\Psi^{V|U}_i} \triangleq \{ \widehat{\Psi}^{V|U}_i \neq \Psi^{V|U}_i\}$, $\mathcal{E}_{\widetilde{U}_i} \triangleq \{ \widehat{U}_i^{1:N} \neq \widetilde{U}_i^{1:N}\}$, and $\mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i} \triangleq \mathcal{E}_{\Psi^{V|U}_i} \cup \mathcal{E}_{\widetilde{U}_i}$. For $i \in \llbracket 1 , k-1 \rrbracket$, we have \begin{align*} & \mathbb{P} [(M_{i}, S_i) \neq (\widehat{M}_{i},\widehat{S}_{i})] \displaybreak[0]\displaybreak[0]\\ & \stackrel{(a)}{=} \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i}] \displaybreak[0]\\ & = \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} | \mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c ] \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c] \\ & \phantom{mmmmmmmmm}+ \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} |\mathcal{E}_{U_i,V_i,Y_i} \cup \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i} ] \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} \cup \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}] \displaybreak[0]\\ & \leq \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} |\mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c ] + \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} \cup \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}] \displaybreak[0]\\ & \leq \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} |\mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c] + \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} ] + \mathbb{P}[ \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}] \displaybreak[0]\\ & \leq \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} |\mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c] + \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} ] + \mathbb{P}[ \mathcal{E}_{\Psi^{V|U}_i}] + \mathbb{P}[ \mathcal{E}_{\widetilde{U}_i}] \displaybreak[0]\\ & \stackrel{(b)}{\leq} \mathbb{P} [\widetilde{V}_{i} \neq \widehat{V}_{i} |\mathcal{E}_{U_i,V_i,Y_i}^c \cap \mathcal{E}_{\Psi^{V|U}_i,\widetilde{U}_i}^c] + \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} ] + \mathbb{P} [\widetilde{V}_{i+1} \neq \widehat{V}_{i+1}] + \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i}] \displaybreak[0]\\ & \stackrel{(c)}{\leq} N \delta_N + \mathbb{P}[\mathcal{E}_{U_i,V_i,Y_i} ] + \mathbb{P} [\widetilde{V}_{i+1} \neq \widehat{V}_{i+1}] + \mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i}] \displaybreak[0]\\ & \stackrel{(d)}{\leq} N \delta_N + \delta_N^{(P)} + \mathbb{P} [\widetilde{V}_{i+1} \neq \widehat{V}_{i+1}] +\mathbb{P}[ \widehat{U}^{1:N}_{i} \neq \widetilde{U}^{1:N}_{i}] \displaybreak[0]\\ & \stackrel{(e)}{\leq} (i+1) \left( N \delta_N + \delta_N^{(P)} \right) + \mathbb{P} [\widetilde{V}_{i+1} \neq \widehat{V}_{i+1}] \displaybreak[0]\\ & \stackrel{(f)}{\leq} (i+1) (k -i ) \left( N \delta_N + \delta_N^{(P)} \right) + \mathbb{P} [\widetilde{V}_{k} \neq \widehat{V}_{k}] \displaybreak[0]\\ & \stackrel{(g)}{\leq} (i+1)(k -i +1 ) \left( N \delta_N + \delta_N^{(P)} \right) \end{align*} where $(a)$ holds because $\widetilde{V}_{i}$ contains $(M_{i}, S_i ,\Psi^{V|U}_{i-1})$ by construction, $(b)$ holds because $\widetilde{V}_{i+1}$ contains $\Psi^{V|U}_{i}$ by construction, $(c)$ follows from the error probability of lossless source coding with side information~\cite{Arikan10}, $(d)$ holds by the optimal coupling and Lemma~\ref{lemdist_joint}, $(e)$ holds by (\ref{eq_err_utilde}), $(f)$ holds by induction, $(g)$ is obtained similarly to the previous inequalities. Hence, \begin{align} & \mathbb{P} [(M_{1:k},S_{1:k}) \neq (\widehat{M}_{1:k},\widehat{S}_{1:k})] \nonumber \\ \nonumber & \leq \sum_{i=1}^k \mathbb{P} [(M_{i}, S_i) \neq (\widehat{M}_{i},\widehat{S}_{i})] \\ \nonumber & \leq \sum_{i=1}^k (i+1) (k -i +1 ) \left( N \delta_N + \delta_N^{(P)} \right)\\ & = \left(\frac{k (k+1) (k+2)}{6} + k \right) \left( N \delta_N + \delta_N^{(P)} \right) \end{align} \subsection{Information leakage} The functional dependence graph for the coding scheme of Section \ref{Sec_CS} is given in Figure \ref{figFGD2}. \begin{figure} \centering \includegraphics[width=13cm]{fgd3.pdf} \caption{Functional dependence graph of the block encoding scheme. For Block $i$, $O_i$ is the common message, $M_i$ is the private message, $S_i$ is the confidential message. $\Psi^{V|U}_i$ is the side information retransmitted in the next block to allow Bob to reconstruct $M_i$ and $S_i$ given $\Phi^{V|U}_i$ and its observations $Y^{1:N}_{1:k}$. $\Psi^{U}_i$ is the randomness used to form $\widetilde{U}_i^{1:N}$, $\Psi^{U}_i \subset \Psi^{U}_1$ is reused from the previous block. ${R}_i$ and $\Psi^{X|V}_i$ represent the randomness necessary at the encoder to form $\widetilde{X}_i^{1:N}$ where $\Psi^{X|V}_i = \Psi^{X|V}_1$ is reused from the previous block. Finally, $\Phi^{U}_i$ is information, whose rate is negligible, sent to Bob and Eve to allow them to reconstruct the common messages. } \label{figFGD2} \end{figure} For the secrecy analysis the following term must be upper bounded $$ I(S_{1:k};\Psi^{U}_1\Phi^{U}_{1:k}Z_{1:k}^N). $$ Note that we have introduced $(\Psi^{U}_1,\Phi^{U}_{1:k})$, since these random variables have been made available to Eve. Recall that $\Phi^{U}_{1:k}$ is additional information transmitted to Bob and Eve to reconstruct the common messages $O_{1:k}$. Recall also that $\Psi^{U}_1 \supset \Psi^{U}_i$, $i \in \llbracket 2 , k \rrbracket$, as it is the randomness reused among all the blocks that allows the transmission of the common messages $O_{1:k}$. We start by proving that secrecy holds for a given block $i \in \llbracket 2 , k \rrbracket$ in the following lemma. \begin{lem} \label{lem1c} For $i \in \llbracket 2 , k \rrbracket$ and $N$ large enough, $$I(S_i \Psi^{V|U}_{i-1} ; Z_i^{1:N} \Phi^{U}_i \Psi^{U}_1) \leq \delta_N^{(*)},$$ where $\delta_N^{(*)} \triangleq \sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3}) ( N - \log_2 (\sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3})) )$. \end{lem} \begin{proof} See Appendix~\ref{App_lem1c}. \end{proof} Recall that for channel prefixing in the encoding process we reuse some randomness $\Psi^{X|V}_1$ among all the blocks so that $\Psi^{X|V}_1 = \Psi^{X|V}_i$, $i \in \llbracket 2 , k \rrbracket$. We show in the following lemma that $\Psi^{X|V}_1$ is almost independent from $(Z_{i}^{1:N}, \Psi^{V|U}_{i-1}, S_{i}, \Phi^{U}_i, \Psi^{U}_i)$. This fact will be useful in the secrecy analysis of the overall scheme. \begin{lem} \label{lem4} For $i \in \llbracket 2 , k \rrbracket$ and $N$ large enough, $$I( \Psi^{X|V}_1 ; Z_{i}^{1:N} \Psi^{V|U}_{i-1} S_{i} \Phi^{U}_i \Psi^{U}_i ) \leq \delta_N^{(*)},$$ where $\delta_N^{(*)}$ is defined as in Lemma \ref{lem1c}. \end{lem} \begin{proof} See Appendix~\ref{App_lem4}. \end{proof} Using Lemmas \ref{lem1c} and \ref{lem4}, we show in the following lemma a recurrence relation that will make the secrecy analysis over all blocks easier. \begin{lem} \label{lemdifc} Let $i \in \llbracket 1 , k-1 \rrbracket$. Define $\widetilde{L}_{i} \triangleq I(S_{1:k}; \Psi^{U}_1 \Phi^{U}_{1:i} Z_{1:i}^{1:N})$. We have $$ \widetilde{L}_{i+1} - \widetilde{L}_{i} \leq 3\delta_N^{(*)}. $$ \end{lem} \begin{proof} See Appendix~\ref{App_lemdifc}. \end{proof} We then have \begin{align*} \widetilde{L}_{1} & = I(S_{1:k}; \Psi^{U}_1 \Phi^{U}_{1} Z_{1}^{1:N}) \\ & = I(S_1 ; \Psi^{U}_1 \Phi^{U}_{1} Z_1^{1:N}) + I(S_{2:k}; \Psi^{U}_1 \Phi^{U}_{1} Z_{1}^{1:N} |S_1)\\ & \stackrel{(a)}{\leq} \delta_N^{(*)} +I(S_{2:k}; \Psi^{U}_1 \Phi^{U}_{1} Z_{1}^{1:N} |S_1)\\ & \leq \delta_N^{(*)} +I(S_{2:k}; \Psi^{U}_1 \Phi^{U}_{1} Z_{1}^{1:N}S_1 )\\ & \stackrel{(b)}{=} \delta_N^{(*)}, \end{align*} where $(a)$ follows from Lemma \ref{lem1c}, $(b)$ follows from independence of $S_{2:k}$ and the random variables of Block 1. Hence, strong secrecy follows from Lemma \ref{lemdifc} by remarking that \begin{align*} I(S_{1:k};\Psi^{U}_1\Phi^{U}_{1:k}Z_{1:k}^N) & = \widetilde{L}_{1} + \sum_{i=1}^{k-1} (\widetilde{L}_{i+1} - \widetilde{L}_{i}) \\ & \leq \delta_N^{(*)} + (k-1) (3\delta_N^{(*)}) \\ & = (3k-2) \delta_N^{(*)}. \end{align*} \section{Conclusion} \label{sec:conclusion} Our proposed polar coding scheme for the broadcast channel with confidential messages and constrained randomization provides an explicit low-complexity scheme achieving the capacity region of~\cite{Watanabe12}. Although the presence of auxiliary random variables and the need to re-align polarization sets through chaining introduces rather involved notation, the coding scheme is conceptually close to a binning proof of the capacity region, in which polarization is used in place of random binning. We believe that a systematic use of this connection will effectively allow one to translate any results proved with output statistics of random binning\cite{Yassaee2012} into a polar coding scheme. It is arguable whether the resulting schemes are truly practical, as the block length $N$ and the number of blocks $k$ are likely to be fairly large. In addition work remains to be done to circumvent the need for sharing random seeds between the transmitter and receivers. \appendices \section{Proof of Lemma~\ref{lem_dist_A}} \label{App_lem_dist_A} Let $i \in \llbracket 2,k-1\rrbracket$. We have \begin{align} & \mathbb{D}(p_{U^{1:N}} || \widetilde{p}_{U_i^{1:N}}) \nonumber \displaybreak[0]\\ & \stackrel{(a)}{=} \mathbb{D}(p_{A^{1:N}} || \widetilde{p}_{A_i^{1:N}}) \nonumber \displaybreak[0]\\ & \stackrel{(b)}{=} \sum_{j=1}^N \mathbb{D}(p_{A^{j}|A^{1:j-1}} || \widetilde{p}_{A_i^j|A_i^{1:j-1}}) \nonumber\displaybreak[0]\\ & \stackrel{(c)}{=} \sum_{j\in \mathcal{V}_{U}} \mathbb{D}(p_{A^{j}|A^{1:j-1}} || \widetilde{p}_{A_i^j|A_i^{1:j-1}}) \nonumber \displaybreak[0]\\ & \stackrel{(d)}{=} \sum_{j \in \mathcal{V}_{U}} ( 1 -H(A^{j}|A^{1:j-1}) ) \nonumber\displaybreak[0]\\ \nonumber & \stackrel{(e)}{\leq} |\mathcal{V}_{U}| \delta_N \displaybreak[0]\\ & \leq N \delta_N, \end{align} where $(a)$ holds by invertibility of $G_n$, $(b)$ holds by the chain rule, $(c)$ holds by (\ref{eq_sim_A_i}), $(d)$ holds by (\ref{eq_sim_A_i}) and uniformity of $O_i$ and $O_{i-1,2}$, $(e)$ holds by definition of $\mathcal{V}_{U}$. Similarly for $i \in \{ 1, k \}$, using (\ref{eq_sim_A_1}) and (\ref{eq_sim_A_k}) we also have \begin{align} \mathbb{D}(p_{U^{1:N}} || \widetilde{p}_{U_i^{1:N}}) \leq N \delta_N. \end{align} \section{Proof of Lemma~\ref{lem_dist_BV}} \label{App_lem_dist_BV} Let $i \in \llbracket 2,k \rrbracket$. We have \begin{align} & \mathbb{D}(p_{B^{1:N}|U^{1:N}} || \widetilde{p}_{B_i^{1:N}|U_i^{1:N}}) \nonumber \\ \nonumber & \stackrel{(a)}{=} \sum_{j=1}^N \mathbb{D}(p_{B^{j}|B^{1:j-1}U^{1:N}} || \widetilde{p}_{B_i^j|B_i^{1:j-1}U_i^{1:N}}) \\ \nonumber & \stackrel{(b)}{=} \sum_{j\in \mathcal{V}_{V|U}} \mathbb{D}(p_{B^{j}|B^{1:j-1}U^{1:N}} || \widetilde{p}_{B_i^j|B_i^{1:j-1}U_i^{1:N}}) \\ \nonumber & \stackrel{(c)}{=} \sum_{j\in \mathcal{V}_{V|U}} ( 1 -H(B^{j}|B^{1:j-1}U^{1:N}) ) \\ \nonumber & \stackrel{(d)}{\leq} |\mathcal{V}_{V|U}| \delta_N\\ & \leq N \delta_N, \label{eqsupbv1} \end{align} where $(a)$ holds by the chain rule, $(b)$ holds by~(\ref{defsimBi}), $(c)$ holds by~(\ref{defsimBi}) and uniformity of $\Psi^{V|U}_{i-1}$, $S_i$, and $M_i$, $(d)$ holds by definition of $\mathcal{V}_{V|U}$. Then, \begin{align*} & \mathbb{D}(p_{V^{1:N}U^{1:N}} || \widetilde{p}_{V_i^{1:N}U_i^{1:N}}) \\ & \stackrel{(a)}{=} \mathbb{D}(p_{B^{1:N}U^{1:N}} || \widetilde{p}_{B_i^{1:N}U_i^{1:N}}) \\ & \stackrel{(b)}{=} \mathbb{D}(p_{B^{1:N}|U^{1:N}} || \widetilde{p}_{B_i^{1:N}|U_i^{1:N}}) + \mathbb{D}(p_{U^{1:N}}|| \widetilde{p}_{U_i^{1:N}}) \\ & \stackrel{(c)}{\leq} 2 N \delta_N, \end{align*} where $(a)$ holds by invertibility of $G_n$, $(b)$ holds by the chain rule, $(c)$ holds by (\ref{eqsupbv1}) and Lemma~\ref{lem_dist_A}. Similarly, using (\ref{eq_sim_Bv_1}) and Lemma~\ref{lem_dist_A}, we have \begin{align*} \mathbb{D}(p_{V^{1:N}U^{1:N}}|| \widetilde{p}_{V_1^{1:N}U_1^{1:N}}) {\leq} 2 N \delta_N. \end{align*} \section{Proof of Lemma~\ref{lemprefix}} \label{App_lemprefix} Let $i \in \llbracket 2,k \rrbracket$. We have \begin{align} & \mathbb{D}(p_{T^{1:N}|V^{1:N}} || \widetilde{p}_{T_i^{1:N}|V_i^{1:N}}) \nonumber \\ \nonumber & \stackrel{(a)}{=} \sum_{j=1}^N \mathbb{D}(p_{T^{j}|T^{1:j-1}V^{1:N}}|| \widetilde{p}_{T_i^j|T_i^{1:j-1}V_i^{1:N}}) \\ \nonumber & \stackrel{(b)}{=} \sum_{j\in \mathcal{V}_{X|V}} \mathbb{D}(p_{T^{j}|T^{1:j-1}V^{1:N}} || \widetilde{p}_{T_i^j|T_i^{1:j-1}V_i^{1:N}}) \\ \nonumber & \stackrel{(c)}{=} \sum_{j\in \mathcal{V}_{X|V}} ( 1 -H(T^{j}|T^{1:j-1}V^{1:N}) ) \\ \nonumber & \stackrel{(d)}{\leq} |\mathcal{V}_{X|V}| \delta_N\\ & \leq N \delta_N, \label{eqsupd1XV} \end{align} where $(a)$ holds by the chain rule, $(b)$ holds by~(\ref{defsimTi}), $(c)$ holds by~(\ref{defsimTi}) and uniformity of the bits in $\widetilde{T}_i^{1:N}[ \mathcal{V}_{X|V}]$, $(d)$ holds by definition of $\mathcal{V}_{X|V}$. Then, \begin{align*} & \mathbb{D}(p_{X^{1:N}V^{1:N}} || \widetilde{p}_{X_i^{1:N}V_i^{1:N}}) \\ & \stackrel{(a)}{=} \mathbb{D}(p_{T^{1:N}V^{1:N}} || \widetilde{p}_{T_i^{1:N}V_i^{1:N}}) \\ & \stackrel{(b)}{=} \mathbb{D}(p_{T^{1:N}|V^{1:N}} || \widetilde{p}_{T_i^{1:N}|V_i^{1:N}}) + \mathbb{D}(p_{V^{1:N}} || \widetilde{p}_{V_i^{1:N}}) \\ & \stackrel{(c)}{\leq} 3 N \delta_N, \end{align*} where $(a)$ holds by invertibility of $G_n$, $(b)$ holds by the chain rule, $(c)$ holds by (\ref{eqsupd1XV}) and Lemma~\ref{lem_dist_BV}. Similarly, using (\ref{defsimT_1}) and Lemma~\ref{lem_dist_BV}, we have \begin{align*} \mathbb{D}(p_{X^{1:N}V^{1:N}} || \widetilde{p}_{X_1^{1:N}V_1^{1:N}}) {\leq} 3 N \delta_N. \end{align*} \section{Proof of Lemma~\ref{lemdist_joint}} \label{App_lemdist_joint} We have \begin{align} & \mathbb{V}(p_{U^{1:N}V^{1:N}X^{1:N}Y^{1:N}Z^{1:N}}, \widetilde{p}_{U_i^{1:N}V_i^{1:N}X_i^{1:N}Y_i^{1:N}Z_i^{1:N}}) \nonumber \displaybreak[0]\\ \nonumber & = \mathbb{V}(p_{Y^{1:N}Z^{1:N}|U^{1:N}V^{1:N}X^{1:N}}p_{U^{1:N}V^{1:N}X^{1:N}}, \widetilde{p}_{Y_i^{1:N}Z_i^{1:N}|U_i^{1:N}V_i^{1:N}X_i^{1:N}}\widetilde{p}_{U_i^{1:N}V_i^{1:N}X_i^{1:N}} ) \displaybreak[0]\\ \nonumber & \stackrel{(a)}{=} \mathbb{V}(p_{Y^{1:N}Z^{1:N}|X^{1:N}}p_{U^{1:N}V^{1:N}X^{1:N}},\widetilde{p}_{Y_i^{1:N}Z_i^{1:N}|X_i^{1:N}} \widetilde{p}_{U_i^{1:N}V_i^{1:N}X_i^{1:N}} ) \displaybreak[0]\\ \nonumber & \stackrel{(b)}{=} \mathbb{V}(p_{U^{1:N}V^{1:N}X^{1:N}}, \widetilde{p}_{U_i^{1:N}V_i^{1:N}X_i^{1:N}} ) \displaybreak[0]\\ \nonumber & = \mathbb{V}(p_{X^{1:N}|U^{1:N}V^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|U_i^{1:N}V_i^{1:N}} \widetilde{p}_{U_i^{1:N}V_i^{1:N}} ) \displaybreak[0]\\ \nonumber & \stackrel{(c)}{=} \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} \widetilde{p}_{U_i^{1:N}V_i^{1:N}} ) \displaybreak[0]\\ \nonumber & \stackrel{(d)}{\leq} \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{U^{1:N}V^{1:N}} ) + \mathbb{V}(\widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} \widetilde{p}_{U_i^{1:N}V_i^{1:N}} ) \displaybreak[0]\\ \nonumber & = \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{U^{1:N}V^{1:N}} ) + \mathbb{V}( p_{U^{1:N}V^{1:N}}, \widetilde{p}_{U_i^{1:N}V_i^{1:N}} ) \displaybreak[0]\\ \nonumber & \stackrel{(e)}{\leq} \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{U^{1:N}V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{U^{1:N}V^{1:N}} ) + \delta_N^{(UV)} \displaybreak[0]\\ \nonumber & = \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{V^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{V^{1:N}} ) + \delta_N^{(UV)} \displaybreak[0]\\ \nonumber & \stackrel{(f)}{\leq} \mathbb{V}(p_{X^{1:N}|V^{1:N}} p_{V^{1:N}}, \widetilde{p}_{X_i^{1:N}V_i^{1:N}} ) + \mathbb{V}(\widetilde{p}_{X_i^{1:N}V_i^{1:N}}, \widetilde{p}_{X_i^{1:N}|V_i^{1:N}} p_{V^{1:N}} )+ \delta_N^{(UV)} \displaybreak[0]\\ \nonumber & = \mathbb{V}(p_{X^{1:N}V^{1:N}} , \widetilde{p}_{X_i^{1:N}V_i^{1:N}} ) + \mathbb{V}(\widetilde{p}_{V_i^{1:N}}, p_{V^{1:N}} )+ \delta_N^{(UV)} \displaybreak[0]\\ \nonumber & \leq \mathbb{V}(p_{X^{1:N}V^{1:N}} , \widetilde{p}_{X_i^{1:N}V_i^{1:N}} ) + \mathbb{V}( p_{U^{1:N}V^{1:N}}, \widetilde{p}_{U_i^{1:N}V_i^{1:N}} )+ \delta_N^{(UV)} \displaybreak[0]\\ \nonumber & \stackrel{(g)}{\leq} 2 \delta_N^{(UV)} + \delta_N^{(XV)} , \end{align} where $(a)$ and $(c)$ follow from the Markov condition $U \to V \to X \to (YZ)$ and $\widetilde{U}_i^{1:N} \to \widetilde{V}_i^{1:N} \to \widetilde{X}_i^{1:N} \to (Y_i^{1:N}Z_i^{1:N})$ , $(b)$ follows from $p_{Y^{1:N}Z^{1:N}|X^{1:N}} =\widetilde{p}_{Y_i^{1:N}Z_i^{1:N}|X_i^{1:N}}$ and \cite[Lemma 17]{Cuff09}, $(d)$ holds by the triangle inequality, $(e)$ holds by Lemma \ref{lem_dist_BV}, $(f)$ hold by the triangle inequality, $(g)$ holds by Lemmas \ref{lem_dist_BV} and \ref{lemprefix}. \section{Proof of Lemma~\ref{lemrandA}} \label{App_lemrandA} We have for $i \in \llbracket 1, k \rrbracket$, for $j \in \mathcal{V}_{U}^c$, \begin{align*} &|H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i) - H({A}^{j}|{A}^{1:j-1}) | \\ & \leq |H(\widetilde{A}^{1:j}_i) - H({A}^{1:j}) | + |H(\widetilde{A}^{1:j-1}_i) - H({A}^{1:j-1}) | \\ & \stackrel{(a)}{\leq} \mathbb{V}( p_{{A}^{1:j}}, \widetilde{p}_{{A}_i^{1:j}} ) \log \frac{2^{j}}{\mathbb{V}( p_{{A}^{1:j}}, \widetilde{p}_{{A}_i^{1:j}} )} + |H(\widetilde{A}^{1:j-1}_i) - H({A}^{1:j-1}) | \\ & \stackrel{(b)}{\leq} \delta_N^{(U)} \left( N - \log_2 \delta_N^{(U)} \right) + |H(\widetilde{A}^{1:j-1}_i) - H({A}^{1:j-1}) | \\ & \leq 2 \delta_N^{(U)} \left( N - \log_2 \delta_N^{(U)} \right)\\ & \triangleq \delta_N^{(A)}, \end{align*} where $(a)$ holds by \cite{bookCsizar}, $(b)$ holds by Lemma \ref{lem_dist_A} and because $x \mapsto x \log x$ is decreasing for $x>0$ small enough. Hence, we obtain \begin{align*} & \sum_{j \in \mathcal{V}_{U}^c} H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i)\\ & = \sum_{j \in \mathcal{H}_{U}^c} \sum_{j \in \mathcal{H}_{U} \backslash \mathcal{V}_{U}} H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i)\\ & \leq |\mathcal{H}_{U} \backslash \mathcal{V}_{U}| + \sum_{j \in \mathcal{H}_{U}^c} H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i)\\ & = |\mathcal{H}_{U} | -| \mathcal{V}_{U}| + \sum_{j \in \mathcal{H}_{U}^c} H(\widetilde{A}^{j}_i|\widetilde{A}^{1:j-1}_i)\\ & \leq |\mathcal{H}_{U} | -| \mathcal{V}_{U}| + \sum_{j \in \mathcal{H}_{U}^c} (H({A}^{j}|{A}^{1:j-1}) + \delta_N^{(A)})\\ & \leq |\mathcal{H}_{U} | -| \mathcal{V}_{U}| + |\mathcal{H}_{U}^c| (\delta_N + \delta_N^{(A)})\\ & \leq |\mathcal{H}_{U} | -| \mathcal{V}_{U}| + N (\delta_N + \delta_N^{(A)}), \end{align*} and we obtain the result by \cite[Lemma 1]{Chou14rev} and \cite{Arikan10}. \section{Proof of Lemma~\ref{lemrandB}} \label{App_lemrandB} We have for $i \in \llbracket 1, k \rrbracket$, for $j \in \mathcal{V}_{V|U}^c$, \begin{align*} &|H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i) - H({B}^{j}|{B}^{1:j-1} {U}^{1:N}) | \\ & \leq |H(\widetilde{B}^{1:j}_i \widetilde{U}^{1:N}_i) - H({B}^{1:j} {U}^{1:N}) | + |H(\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i) - H({B}^{1:j-1} {U}^{1:N}) | \\ & \stackrel{(a)}{\leq} \mathbb{V}( p_{{B}^{1:j} {U}^{1:N}}, \widetilde{p}_{{B}_i^{1:j} {U}_i^{1:N}} ) \log \frac{2^{j+N}}{\mathbb{V}( p_{{B}^{1:j} {U}^{1:N}}, \widetilde{p}_{{B}_i^{1:j} {U}_i^{1:N}} )} + |H(\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i) - H({B}^{1:j-1} {U}^{1:N}) | \\ & \stackrel{(b)}{\leq} \delta_N^{(UV)} \left( 2N - \log_2 \delta_N^{(UV)} \right) + |H(\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i) - H({B}^{1:j-1} {U}^{1:N}) | \\ & \leq 2 \delta_N^{(UV)} \left( 2N - \log_2 \delta_N^{(UV)} \right)\\ & \triangleq \delta_N^{(B)}, \end{align*} where $(a)$ holds by \cite{bookCsizar}, $(b)$ holds by Lemma \ref{lem_dist_BV} and because $x \mapsto x \log x$ is decreasing for $x>0$ small enough. Then, \begin{align*} & \sum_{j \in \mathcal{V}_{V|U}^c} H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i)\\ & = \sum_{j \in \mathcal{H}_{V|U}^c} \sum_{j \in \mathcal{H}_{V|U} \backslash \mathcal{V}_{V|U}} H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i)\\ & \leq |\mathcal{H}_{V|U} \backslash \mathcal{V}_{V|U}| + \sum_{j \in \mathcal{H}_{V|U}^c} H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i)\\ & = |\mathcal{H}_{V|U} |-| \mathcal{V}_{V|U}| + \sum_{j \in \mathcal{H}_{V|U}^c} H(\widetilde{B}^{j}_i|\widetilde{B}^{1:j-1}_i \widetilde{U}^{1:N}_i)\\ & \leq |\mathcal{H}_{V|U} |-| \mathcal{V}_{V|U}| + \sum_{j \in \mathcal{H}_{V|U}^c} (H({B}^{j}|{B}^{1:j-1} {U}^{1:N}) + \delta_N^{(B)})\\ & \leq |\mathcal{H}_{V|U} |-| \mathcal{V}_{V|U}| + |\mathcal{H}_{V|U}^c| (\delta_N + \delta_N^{(B)})\\ & \leq |\mathcal{H}_{V|U} |-| \mathcal{V}_{V|U}| + N (\delta_N + \delta_N^{(B)}), \end{align*} and we obtain the result by \cite[Lemma 1]{Chou14rev} and \cite{Arikan10}. \section{Proof of Lemma~\ref{lem1c}} \label{App_lem1c} We have% \begin{align} & \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}},\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{ U_i^{1:N} Z_i^{1:N}}) \nonumber\\ \nonumber & \leq \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}},{p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} p_{ U^{1:N} Z^{1:N}}) + \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} p_{ U^{1:N} Z^{1:N}},\widetilde{p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{ U^{1:N} Z^{1:N}}) \\ \nonumber & \stackrel{(a)}{\leq} \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}},{p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} p_{ U^{1:N} Z^{1:N}}) + \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} ,\widetilde{p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} ) + \mathbb{V}(p_{ U^{1:N} Z^{1:N}},\widetilde{p}_{ U^{1:N} Z^{1:N}}) \\ \nonumber & \stackrel{(b)}{\leq} \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}},{p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} p_{ U^{1:N} Z^{1:N}}) + 2 \delta_N^{(P)} \\ \nonumber & \stackrel{(d)}{\leq} \sqrt{2 \log2} \sqrt{ \mathbb{D}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}} || {p}_{B^{1:N}[\mathcal{V}_{V|UZ}]} p_{U^{1:N} Z^{1:N}})} + 2 \delta_N^{(P)} \\ \nonumber & = \sqrt{2 \log2} \sqrt{ I( B^{1:N}[\mathcal{V}_{V|UZ}] ;U^{1:N} Z^{1:N} )} + 2 \delta_N^{(P)} \\ & \stackrel{(c)}{\leq} \sqrt{2\log 2} \sqrt{N \delta_N} + 2\delta_N^{(P)}, \label{eq_sec_int2} \end{align} where $(a)$ follows from the triangle inequality, $(b)$ holds by Lemma \ref{lemdist_joint}, $(c)$ holds by Pinsker's inequality, $(d)$ holds because using the fact that conditioning reduces entropy we have \begin{align*} & I( B^{1:N}[\mathcal{V}_{V|UZ}] ; U^{1:N} Z^{1:N} )\\ & = H( B^{1:N}[\mathcal{V}_{V|UZ}] ) - H({B}^{1:N}[ \mathcal{V}_{V|UZ} ]| U^{1:N} Z^{1:N})\\ & \leq |\mathcal{V}_{V|UZ}|- \sum_{j \in \mathcal{V}_{V|UZ} } H(B^{j} | B^{1:j-1} U^{1:N} Z^{1:N})\\ & \leq |\mathcal{V}_{V|UZ}| + |\mathcal{V}_{V|UZ}| (\delta_N -1)\\ & \leq N \delta_N. \end{align*} We then obtain \begin{align} &\mathbb{V}(\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}] U_i^{1:N} Z_i^{1:N}},\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{U_i^{1:N} Z_i^{1:N}}) \nonumber \\ \nonumber & \stackrel{(a)}{\leq} \mathbb{V}(\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}] U_i^{1:N} Z_i^{1:N}},{p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}}) + \mathbb{V}({p}_{B^{1:N}[\mathcal{V}_{V|UZ}] U^{1:N} Z^{1:N}},\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{ U_i^{1:N} Z_i^{1:N}}) \\ & \stackrel{(b)}{\leq} \sqrt{2\log 2} \sqrt{N \delta_N} + 3\delta_N^{(P)}, \label{eq_sec_int3} \end{align} where $(a)$ holds by the triangle inequality, $(b)$ holds by Lemma \ref{lemdist_joint}, and (\ref{eq_sec_int2}). Then, for $N$ large enough by \cite{bookCsizar}, \begin{align*} & I(S_i \Psi^{V|U}_{i-1} ; Z_i^{1:N} \Phi^{U}_i \Psi^{U}_i) \\ & \leq I (\widetilde{B}_i^{1:N}[ \mathcal{V}_{V|UZ}] ; Z_i^{1:N} \widetilde{U}_i^{1:N})\\ & \leq \mathbb{V}(\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}] U_i^{1:N} Z_i^{1:N}},\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{U_i^{1:N} Z_i^{1:N}}) \\ & \phantom{mmmmm} \times \log_2 \frac{| \mathcal{V}_{V|UZ}|}{\mathbb{V}(\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}] U_i^{1:N} Z_i^{1:N}},\widetilde{p}_{B_i^{1:N}[\mathcal{V}_{V|UZ}]} \widetilde{p}_{ U_i^{1:N} Z_i^{1:N}})} \\ & \leq \sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3}) ( N - \log_2 (\sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3})) ), \end{align*} where we have used (\ref{eq_sec_int3}) and that $x \mapsto x \log x$ is decreasing for $x>0$ small enough. \section{Proof of Lemma~\ref{lem4}} \label{App_lem4} By the triangle inequality we can write \begin{align} & \mathbb{V}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}]U^{1:N} V^{1:N} Z^{1:N}},\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{ U_i^{1:N} V_i^{1:N} Z_i^{1:N}}) \nonumber\\ \nonumber & \leq \mathbb{V}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}] U^{1:N} V^{1:N} Z^{1:N}} ,{p}_{T^{1:N}[\mathcal{V}_{X|VZ}]} {p}_{ U^{1:N} V^{1:N} Z^{1:N}}) \\ \nonumber & \phantom{mmmmmmm}+ \mathbb{V}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}]} {p}_{ U^{1:N} V^{1:N} Z^{1:N}} ,\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{U_i^{1:N} V_i^{1:N} Z_i^{1:N}}) \\ \nonumber & \stackrel{(a)}{\leq} \mathbb{V}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}] U^{1:N} V^{1:N} Z^{1:N}} ,{p}_{T^{1:N}[\mathcal{V}_{X|VZ}]} {p}_{U^{1:N} V^{1:N} Z^{1:N}}) + 2\delta_N^{(P)} \\ \nonumber & \stackrel{(b)}{\leq} \sqrt{2 \log2} \sqrt{ \mathbb{D}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}] U^{1:N} V^{1:N} Z^{1:N}} ,{p}_{T^{1:N}[\mathcal{V}_{X|VZ}]} {p}_{ U^{1:N} V^{1:N} Z^{1:N}})} + 2 \delta_N^{(P)} \\ \nonumber & = \sqrt{2 \log2} \sqrt{ I (T^{1:N}[ \mathcal{V}_{X|VZ}]; Z^{1:N} U^{1:N} V^{1:N}) } + 2\delta_N^{(P)} \\ & \stackrel{(c)}{\leq} \sqrt{2\log 2} \sqrt{N \delta_N}+ 2\delta_N^{(P)}, \label{eq_sec_int2b} \end{align} where $(a)$ holds by the triangle inequality and Lemma \ref{lemdist_joint}, $(b)$ holds by Pinsker's inequality, $(c)$ holds because using the fact that conditioning reduces entropy and $U - V - X$ we have \begin{align*} & I (T^{1:N}[ \mathcal{V}_{X|VZ}]; Z^{1:N} U^{1:N} V^{1:N})\\ & \leq |\mathcal{V}_{X|VZ}|- \sum_{j \in \mathcal{V}_{X|VZ} } H(T^{j} | T^{1:j-1} Z^{1:N} U^{1:N} V^{1:N})\\ & = |\mathcal{V}_{X|VZ}|- \sum_{j \in \mathcal{V}_{X|VZ} } H(T^{j} | T^{1:j-1} Z^{1:N} V^{1:N})\\ & \leq |\mathcal{V}_{X|VZ}| + |\mathcal{V}_{X|VZ}| (\delta_N -1)\\ & \leq N\delta_N. \end{align*} Hence, \begin{align} &\mathbb{V}(\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}] U_i^{1:N} V_i^{1:N}Z_i^{1:N}},\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{ U_i^{1:N} V_i^{1:N} Z_i^{1:N}}) \nonumber \\ \nonumber & \leq \mathbb{V}(\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}] U_i^{1:N} V_i^{1:N}Z_i^{1:N}},{p}_{T^{1:N}[\mathcal{V}_{X|VZ}] U^{1:N} V^{1:N} Z^{1:N}}) \\ \nonumber & \phantom{mmmm} + \mathbb{V}({p}_{T^{1:N}[\mathcal{V}_{X|VZ}] U^{1:N} V^{1:N} Z^{1:N}},\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{ U_i^{1:N} V_i^{1:N} Z_i^{1:N}}) \\ & \leq \sqrt{2\log 2} \sqrt{N \delta_N}+ 3\delta_N^{(P)}, \label{eq_sec_int3b} \end{align} where $(a)$ holds by the triangle inequality, $(b)$ holds by Lemma \ref{lemdist_joint}, and (\ref{eq_sec_int2b}). Then, for $N$ large enough by \cite{bookCsizar}, \begin{align*} & I( \Psi^{X|V}_i ; Z_{i}^{1:N} \Psi^{V|U}_{i-1} S_{i} \Phi^{U}_i \Psi^{U}_i) \\ & = I(\widetilde{T}_i^{1:N}[ \mathcal{V}_{X|VZ}] ; Z_{i}^{1:N} \widetilde{B}_{i}^{1:N}[ \mathcal{H}_{V|UZ}] \Phi^{U}_i \Psi^{U}_i )\\ & \leq I(\widetilde{T}_i^{1:N}[ \mathcal{V}_{X|VZ}] ; Z_{i}^{1:N} \widetilde{B}_{i}^{1:N} \widetilde{U}_{i}^{1:N})\\ & \stackrel{(a)}{=} I(\widetilde{T}_i^{1:N}[ \mathcal{V}_{X|VZ}] ; Z_{i}^{1:N} \widetilde{V}_{i}^{1:N} \widetilde{U}_{i}^{1:N})\\ & \leq \mathbb{V}(\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}] U_i^{1:N} V_i^{1:N}Z_i^{1:N}},\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{ U_i^{1:N} V_i^{1:N} Z_i^{1:N}}) \\ & \phantom{mmmmm}\times \log_2 \frac{|\mathcal{V}_{X|VZ}|}{\mathbb{V}(\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}] U_i^{1:N} V_i^{1:N}Z_i^{1:N}},\widetilde{p}_{T_i^{1:N}[\mathcal{V}_{X|VZ}]} \widetilde{p}_{ U_i^{1:N} V_i^{1:N} Z_i^{1:N}})} \\ & \stackrel{(b)}{\leq} \sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3}) ( N - \log_2 (\sqrt{2\log 2} \sqrt{N \delta_N}(1+ 6 \sqrt{ 2 } + 3 \sqrt{3})) ), \end{align*} where $(a)$ holds by invertibility of $G_n$, $(b)$ holds by (\ref{eq_sec_int3b}) and because $x \mapsto x \log x$ is decreasing for $x>0$ small enough. \section{Proof of Lemma~\ref{lemdifc}} \label{App_lemdifc} Let $i \in \llbracket 1 , k-1 \rrbracket$. We have \begin{align*} & \widetilde{L}_{i+1} - \widetilde{L}_{i} \\ & = I(S_{1:k}; \Psi^{U}_1 \Phi^{U}_{1:i+1} Z_{1:i+1}^{1:N}) - I(S_{1:k}; \Psi^{U}_1 \Phi^{U}_{1:i} Z_{1:i}^{1:N})\\ & = I(S_{1:k}; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{U}_1 \Phi^{U}_{1:i} Z_{1:i}^{1:N}) \\ & = I(S_{1:i+1}; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 \Phi^{U}_{1:i} Z_{1:i}^{1:N}) +I(S_{i+2:k}; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{U}_1 \Phi^{U}_{1:i} Z_{1:i}^{1:N} S_{1:i+1}) \\ & \stackrel{(a)}{\leq} I(S_{1:i+1} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 ) +I(S_{i+2:k}; \Phi^{U}_{1:i+1} Z_{1:i+1}^{1:N} S_{1:i+1} \Psi^{U}_1 ) \\ & \stackrel{(b)}{=} I(S_{1:i+1} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 ) \\ & = I(S_{i+1} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 ) + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 S_{i+1} ) \\ & \leq I(S_{i+1} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} \Psi^{U}_1 ) + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 S_{i+1} ) \\ & \stackrel{(c)}{\leq} \delta_N^{(*)} + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} |\Psi^{U}_1 S_{i+1} ) \\ & \leq \delta_N^{(*)} + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1}|\Psi^{U}_1 ) \displaybreak[0] \\ & \stackrel{(d)}{\leq} \delta_N^{(*)} + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N} \Psi^{V|U}_i \Psi^{X|V}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1} |\Psi^{U}_1 ) \\ & = \delta_N^{(*)} + I( \Psi^{V|U}_i\Psi^{X|V}_i; \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1} |\Psi^{U}_1 ) + I(S_{1:i} \Phi^{U}_{1:i} Z_{1:i}^{1:N}; \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1} |\Psi^{V|U}_i\Psi^{X|V}_i\Psi^{U}_1) \\ & \stackrel{(e)}{=} \delta_N^{(*)} + I( \Psi^{V|U}_i\Psi^{X|V}_i; \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1} |\Psi^{U}_1 ) \displaybreak[0] \\ & \leq \delta_N^{(*)} + I( \Psi^{V|U}_i\Psi^{X|V}_i \Psi^{U}_1; S_{i+1} ) + I( \Psi^{V|U}_i\Psi^{X|V}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{U}_1 S_{i+1} ) \displaybreak[0] \\ & \stackrel{(f)}{=} \delta_N^{(*)} + I( \Psi^{V|U}_i\Psi^{X|V}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{U}_1 S_{i+1} ) \displaybreak[0] \\ & = \delta_N^{(*)} + I( \Psi^{V|U}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{U}_1 S_{i+1} ) + I( \Psi^{X|V}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} | \Psi^{V|U}_i \Psi^{U}_1 S_{i+1} ) \\ & \leq \delta_N^{(*)} + I( \Psi^{V|U}_i S_{i+1} ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} \Psi^{U}_1 ) + I( \Psi^{X|V}_i ; \Phi^{U}_{i+1} Z_{i+1}^{1:N} \Psi^{V|U}_i \Psi^{U}_1 S_{i+1} ) \\ & \stackrel{(g)}{\leq} 3\delta_N^{(*)}, \end{align*} where $(a)$ holds by the chain rule and positivity of mutual information, $(b)$ holds by independence of $S_{i+2:k}$ with all the random variables of the previous blocks, $(c)$ holds by Lemma \ref{lem1c}, in $(d)$ we introduce the random variable $\Psi^{V|U}_i$ and $\Psi^{X|V}_i$ to be able to break the dependencies between the random variables of block $(i+1)$ and the random variables of the previous blocks, $(e)$ holds because $S_{1:i}\Phi^{U}_{1:i}Z_{1:i}^{1:N} \rightarrow \Psi^{V|U}_i \Psi^{X|V}_i \Psi^{U}_1 \rightarrow \Phi^{U}_{i+1} Z_{i+1}^{1:N} S_{i+1}$, $(f)$ holds because $(\Psi^{V|U}_i,\Psi^{X|V}_i,\Psi^{U}_i)$ is independent of $S_{i+1}$, $(g)$ holds by Lemmas~\ref{lem1c}, \ref{lem4} and because $\Psi^{X|V}_i$ is constant equal to $\Psi^{X|V}_1$ \bibliographystyle{IEEEtran}
1,116,691,501,345
arxiv
\section{Introduction.} Surface layering at metallic liquid-vapor interfaces was suggested in the early eighties by Monte Carlo simulations of alkali metal liquid surfaces \cite{develyn:prl47:44}. Subsequently, more elaborate Monte Carlo techniques (see \cite{rice:ms29:93} and references therein), experimental measurements \cite{magnussenetal:prl74:44,reganetal:prl75:98, reganetal:prb55:74,tostmannetal:prb59:83, tostmannetal:prb61:84,dimasietal:prl86:38, shpyrkoetal:prb67:05,shpyrkoetal:prb70:06} and recent ab initio molecular dynamics (MD) simulations \cite{fabriciusetal:prb60:83,walker:jpcm16:75, gonzalez:prl92:01,gonzalez:prl94:01,gonzalez:jcp123:01}, have established that the ionic density profile of liquid metals across the interface shows oscillations that decay into the bulk liquid after, on average, three to four layers. The use of ab initio MD simulations in these studies is important for two reasons. First, being ab initio, the valence electrons and the ions are treated on the same footing, with the ions reacting consistently to the large spatial variations undergone by the electronic density when moving from the liquid to the vapor. Second, being MD simulations, a study of the motion of the atoms can be carried out directly and changes across the interface can be analyzed. This type of study has not yet been undertaken, as far as we know, basically because most of the simulations of liquid metallic surfaces were carried out using the Monte Carlo method, which gives no information about the dynamic properties of the system studied. The main aim of this paper is to fill this gap, reporting a study of the atomic motion in the liquid-vapor interfaces of Li, Na, K, Rb, Cs, Mg, Ba, Al, Tl, Si, and the Na$_3$K$_7$ alloy, using slabs with 2000 atoms. Layering also appears for metals at the solid-liquid interface \cite{jesson:jcp113:35}, for liquids in contact with a hard wall \cite{huismanetal:nat390:79,yuetal:prl82:26}, or in confined geometries \cite{teng:prl90:04}. Moreover, for ultrathin films of large molecules in contact with a Si wall \cite{schuster:europolymerj40:93} and for confined dusty plasma liquids \cite{teng:prl90:04} it has been possible to follow experimentally the molecular motion, whereas in the solid-liquid interface ab initio MD simulations allowed to study the atomic diffusion at the interface \cite{jesson:jcp113:35}. In all cases reduced diffusion with respect to the bulk was found. These oscillating profiles are in contrast with those of the liquid-vapor interfaces of one-component dielectrics, like water or Lennard-Jones (LJ) systems, or those of liquid-liquid interfaces of immiscible mixtures, which show monotonic profiles with no layering. Taking the archetypical example of the one-component LJ system, we have found in the literature a large amount of computer simulation studies of the coexisting densities, the interfacial width and the surface tension, as a function of the temperature, the particular LJ model used (truncated, truncated and shifted, full potential) and the lateral area simulated (see, for instance, \cite{gloor:jcp123:03} and references therein). However, to our knowledge, an analysis of the atomic motion in the interface has not been performed yet. Surprisingly, the situation is somewhat different for more complex systems, like water, or mixtures, where some (scarce) studies of atomic diffusion in the interfacial region have indeed been performed \cite{meyer:jcp89:67,buhn:flpheq224:21,benjamin:jcp97:32,liu:jpcb108:95}. In the case of mixtures, some anisotropy in diffusion was detected, either due to an increase in the diffusion coefficient parallel to the interface, $D_T$, with respect to the bulk value \cite{meyer:jcp89:67,buhn:flpheq224:21}, or to a decrease of the diffusion coefficient normal to the interface, $D_N$, \cite{benjamin:jcp97:32}. Following a thorough analysis of the effect of the inhomogeneity of the interface on the values (and even the definition) of $D_N$, molecular dynamics simulations of the liquid-vapor interface of water \cite{liu:jpcb108:95} revealed an increase in $D_T$ (3.5-fold) and also in $D_N$ (2-fold), which was attributed to the reduction of the number of hydrogen bonds in the interface. Moreover, it was argued that the same qualitative behavior would occur for other simple liquids \cite{liu:jpcb108:95}. Although surface layering is common to the liquid-vapor interface of metals and the systems mentioned above, there are also important differences. Even though the atoms in the outer layer of the liquid-vapor interface rarely leave the liquid surface (especially at temperatures near their triple points), they are not geometrically confined. Moreover, in liquids confined, or on a wall, or at the interface with their solid phase, a strong interaction between the substrate and the first layers of the liquid has an important influence on the structure and dynamics of the liquid, leading to strong oscillations in the profile and to reduced diffusion. It is also worth analyzing if the metallic liquid-vapor interface, in particular the outer layer, behaves like a quasi two-dimensional system. There are some liquid alloys (Ga with small amounts of Tl, Pb or Bi) where the minority component, which has a high melting temperature and segregates to the surface, displays this kind of behavior (see, e. g., references \cite{yang:prb67:03,issanin:cpl394:20,issanin:jcp121:05}). For one component metals or other type of alloys, a possible measure of the two-dimensional character of the outer layer could be some distinctive feature of the density distribution function (DDF) of the time of residence of the atoms in that layer, for instance its mean value. \section{Simulation details.} Because of the the periodic boundary conditions used in most simulation methods, a slab geometry is usually adopted in studies of the liquid-vapor interface, with two free surfaces perpendicular to one of the axes, taken here as the $z$ axis. These two surfaces should be well separated to minimize interaction between them. For the metallic systems we have performed ab initio molecular dynamics simulations, whereas for the LJ system standard classical molecular dynamics have been used. Ab initio simulations based on the Kohn-Sham formulation of Density Functional Theory (DFT) \cite{kohn:pr140:33}, pose huge computational demands, and the only metallic liquid surfaces studied so far, Si \cite{fabriciusetal:prb60:83} and Na \cite{walker:jpcm16:75}, used small samples (96 and 160 particles respectively) where the two surfaces are rather close (16 and 20 \AA\ respectively). Orbital free ab initio molecular dynamics (OFAIMD) simulations reduce somewhat these computational demands, by returning to the original Hohenberg-Kohn formulation of DFT \cite{hohenberg:pr136:64}, and adopting an explicit but approximate density functional for the electron kinetic energy so that the whole energy is a functional of the electron density. OFAIMD simulations of liquid metal surfaces have recently been performed \cite{gonzalez:prl92:01,gonzalez:prl94:01,gonzalez:jcp123:01} for Li, Na, Mg, Al, Si and Na$_{3}$K$_{7}$ using 2000 particles and for Li$_{4}$Na$_{6}$ using 3000 particles, which led to simulation boxes big enough to reliably represent a macroscopic interface. Details about the formalism and the electron-ion interactions can be found elsewhere \cite{gonzalez:jpcm13:01,gonzalez:prb65:01}. Even though the OFAIMD method has been applied successfully to bulk metals and alloys, it might be argued that this is not a sufficient validation of the method, as surfaces are different from bulk systems. However, it must be stressed that the OFAIMD studies of Na and Si, produced results very similar to those obtained by the (in principle) more accurate Kohn-Sham {\em ab initio} simulations: the wavelength of the oscillations in the profiles, which is recovered exactly, the number of nearest neighbors of a Si particle across the interface, and the surface tension of liquid Na, are all well reproduced by the OFAIMD approach. Further confidence in the ability of the methd to tackle metallic surfaces can be obtained from studies for finite systems, where the surface plays an essential role. For instance, a long standing and previously unexplained anomalous variation of the melting temperatures of Na clusters with size \cite{Haberlandetal} has been reproduced and rationalized for the first time in terms of the surface geometry and stability \cite{Aguado} using the same OFAIMD method as in this paper. Even furher confidence in the capabilities of the method, concerning specifically semiintinite surfaces, can be gained from preliminary results \cite{AlMg} obtained for the temperature dependent surface relaxation of Al($110$) and Mg($10\bar{1}0$), which reproduce qualitatively both experimental data and previous Kohn-Sham calculations. The LJ system has been simulated at a reduced temperature $T^*=k_BT/\epsilon=0.73$, which is near its triple point, in order to compare with the metallic systems in a similar thermodynamic state. We have made simulations with 1960 particles, similar to the metals, and also with 15680 particles in order to reassess the results for the smaller system. The lateral side for the small system is $11.75\sigma$, which is already large enough to suppress the unrealistic oscillatory behaviour of the interfacial properties due to periodic boundary conditions \cite{Oreaetal}. For the large system the lateral side is doubled ($23.5\sigma$) so the expected effects on the interfacial properties are only those of the enhanced capillary waves which should produce a wider interface than in the small system. For all systems, the number of $NVE$ equilibrium configurations used for averaging ranged between 20000 and 30000. \section{Layers definition.} Figure \ref{layers} shows the ionic density profile obtained for liquid Si and the partial and total ionic density profiles for the liquid alloy Na$_3$K$_7$, while that of the LJ system is depicted in figure \ref{LJ}. The layers where the ionic motion was studied, which for metals were located between consecutive minima in the total ionic density profile, are also indicated. The outermost layer (numbered 1) comprises from the last minimum to the inflection point of the decaying profile. For layers 1, 2 and 3 the results of the two layers on opposite sides of the slab were averaged. Further structural details of the interfaces of these systems will be given elsewhere \cite{prbinpress}, but we note here that the relative amplitudes of the outermost oscillation for the alkalis and alkaline-earths are rather similar (maxima from 1.00 to 1.13, minima from 0.80 to 0.88) while they increase significantly for the systems of valence 3 and 4 (maxima 1.29, 1.47 and 1.56, minima 0.73, 0.69 and 0.54 for Al, Si and Tl respectively). The width of the different layers is very small, of the order of one atomic diameter. Therefore it is not appropriate to talk about diffusion along the direction normal to the interface within a layer, as there is no room for a particle within the layer to reach that type of motion. Figure \ref{movim} shows the time evolution of the $z$ coordinate of one particular Al atom and an oscillatory-type motion within the layers can be seen followed by jumps into adjacent layers. The trajectory of the particle, projected onto the $xz$ and $yz$ planes is also shown in the figure. To provide a meaningful comparison among the results for different systems, with diverse masses and potentials, and consequently different characteristic times, a reference time, $\tau$, has been defined for each system which is to be used as a time unit (see table \ref{tablilla}). Specifically, $\tau$ has been taken as the first value of $t$ for which the mean square displacement (MSD) in the bulk liquid has an inflection point, which is an indication of the atomic motion changing from free-particle to diffusive like behavior. \begin{figure} \begin{center} \mbox{\psfig{file=layersnew.ps,angle=-90,width=85mm}} \end{center} \caption{Ionic density profiles normal to the liquid-vapor interfaces of Si and Na$_3$K$_7$. The different regions where the atomic motion has been studied are also shown.} \label{layers} \end{figure} \begin{figure}[h] \begin{center} \mbox{\psfig{file=LJ.ps,angle=-90,width=85mm,clip}} \end{center} \caption{Ionic density profile normal to the liquid-vapor interfaces of the LJ system at $T^*=0.73$, and the ``layers" defined for this interface. Only half the slab is shown.} \label{LJ} \end{figure} \begin{figure} \begin{center} \mbox{\psfig{file=mov_1part.ps,angle=-90,width=85mm}} \end{center} \caption{Time evolution of the $z$ coordinate of an Al atom and its projected trajectory onto the $xz$ (full line) and the $yz$ (dotted line) planes.} \label{movim} \end{figure} The ionic density profile for the large LJ system is shown in figure \ref{LJ}. For this type of system, the definition of ``layers" in order to analyze atomic motion is basically arbitrary. We have chosen to define them in a similar way as we have done for metals in order to make the comparison as fair as possible. First we have fitted the simulation results to an error function profile, $$\rho(z)=\frac12(\rho_{\ell}+\rho_v)+\frac12(\rho_{\ell}-\rho_v) \mbox{erf}(\sqrt{\pi} (z_0-z)/w) $$ (where $\rho_{\ell}$ and $\rho_v$ are the coexisting densities of the liquid and the vapor, $z_0$ is the position of the interface, which coincides with the inflection point of the profile, and $w$ its width), which appears to be more adequate than the usual hyperbolic tangent one \cite{Sidesetal}. The widths for the small and large LJ systems are $w=2.25\sigma$ and $w=2.47\sigma$ respectively, the latter being larger as expected from the increased number of capillary waves. In the following we will present the results obtained for the large system, which, appart from this interfacial width, are practically coincident with those of the small system. Layer number 1 is then defined as a slice of width $w/2$ from the inflection point towards the liquid. Further slices of width $w/2$ towards the liquid are then taken as layers 2 and 3. Note that, defined this way, the layers of the LJ system are also very thin ($1.235\sigma$), so we are in a similar situation as in metals, where we consider that diffusion is only possible within the layers parallel to the interface. The reference time $\tau$ for the LJ system is finally defined in the same way as for metals, and is also shown in table \ref{tablilla}. \section{Results and discussion.} In order to analyze the diffusive motion within the layers, the MSD along the $x$ and $y$ directions has been calculated for those particles that remain inside the layer for a large enough time ($t_m$), a criterion somewhat different from, although in the same spirit as others used in previous works \cite{benjamin:jcp97:32,liu:jpcb108:95}. The selection of $t_m$ is somewhat delicate, as $t_m$ must be large enough so that diffusive motion has already set in, but the number of particles that remain in a layer for at least $t_m$ decreases as $t_m$ increases, leading to poorer statistics. A good compromise was found to be $t_m=t_{20}$, defined so that the probability of residence in the layer for times larger than $t_{20}$ is 20 \%. All the data shown below were obtained with this criterion, but checks showed that other reasonable choices for $t_m$ led to statistically equivalent results. In order to compute $t_{20}$, and also to characterize the motion along the $z$-direction, the residence time of particles in each of the layers was studied. \begin{figure} \begin{center} \mbox{\psfig{file=distr_rt.ps,angle=-90,width=85mm}} \end{center} \caption{Distribution function of the residence time, $p(t)$, in layer 1 (full line) and layer 3 (dotted line) for liquid Ba. The inset shows the integrated probability distribution function, $P(t)$, together with the 20 \% level used to define $t_{20}$. The dashed line denotes layer 2.} \label{probab} \end{figure} \subsection{Motion perpendicular to the interface.} For all the systems considered, including the metals and the LJ system, the DDF of the residence time, $p(t)$, has been computed for the three outer layers (the case of Ba is shown in figure \ref{probab}), and some common features can be identified. The maximum of $p(t)$ occurs at very small $t$-values, ($\approx$ $\tau$), suggesting that most of the particles attempting to enter the layer are bounced back. Also, the DDF exhibits a long time tail, which gives rise to mean residence times (MRT), $t_{\rm av}$, which increase when moving from the bulk liquid towards the outer layers (see table \ref{tablilla}). In the LJ system this increase is rather small, amounting to approximately a 10 \%. For the metals, however, the MRT at the outermost layer is strongly enhanced, with increases between 75 \% (for Na) and 246 \% (for Tl), while the values taken at layer 2 show a more modest increase of around 10 \%. This large discrepancy between the behavior of the LJ system and that of metals precludes any simple geometrical interpretation of the results and underlines that the change of interactions at the liquid-vapor interface of metals does have an important influence on some dynamic properties of the surface. Moreover, these results indicate that the outermost layer in metals behaves more bidimensional-like than the inner ones. It is interesting to note that the MRT is around 20$\tau$ for the alkalis and alkaline-earths, whereas it increases in Al ($\approx 40\tau$) and Tl ($\approx 60\tau$), but not in Si. In order to rationalize these differences among metals, we consider how the atoms move from one layer to another. The minima in the density profiles suggest the existence of an interlayer potential barrier, which would be higher the lower the minimum of the density profile. Therefore this barrier would increase significantly from the alkalis and alkaline-earths to Al, to Si, and finally to Tl. The probability of overcoming this barrier would be related both to its height and to the frequency with which the atoms attempt to cross. This frequency has been estimated as proportional to the Einstein frequency along the $z$ direction, $\Omega_z$, which is shown in table \ref{tablilla} together with the corresponding frequency in the direction parallel to the interface, $\Omega_T$, for completeness. These have been obtained from a short time (up to $t=\tau$) expansion of the corresponding MSD \cite{Balubook}. We find, in terms of $\tau$, a rather universal value for all the systems except for Si, whose more open structure leads to higher Einstein frequencies. Therefore, the increase in the MRT of Al and Tl is related to the increased barrier height, whereas in Si the increased barrier height is counterbalanced by an increased Einstein frequency, leading to MRT similar to those of alkalis and alkaline-earths. The absence of well defined layers in the LJ profile suggests that the motion perpendicular to the interface is much easier than in metals, as no barriers are present, and therefore the position of our arbitrarily defined ``layers" has very little influence on the MRT. \begin{figure} \begin{center} \mbox{\psfig{file=r2t.ps,angle=-90,width=85mm}} \end{center} \caption{Mean square displacements for layers 1 and 2 and the slab center for liquid K.} \label{r2t} \end{figure} Similar trends are also obtained for the liquid Na$_3$K$_7$ alloy, although surface segregation leads to an outer layer of almost pure K so that Na-related quantities are really irrelevant. Indeed the outermost Na layer comprises regions 1 and 2, and therefore the numbers shown in table \ref{tablilla} for Na in the alloy span columns 1 and 2 together. The integral $P(t)=\int_t^{\infty} p(u)du$, gives the probability that having entered the layer, a particle remains there longer than $t$, and is used to obtain $t_{20}$ (see the inset of figure \ref{probab}). Values for $t_{20}$ shown in table \ref{tablilla} correlate rather well with the MRT. \begin{widetext} \begin{table} \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|c|c|} \hline \multicolumn{2}{|c}{~} & \multicolumn{3}{|c}{$t_{\rm av}/\tau$} & \multicolumn{3}{|c}{$t_{20}/\tau$} & \multicolumn{3}{|c}{$D_T/D_{\rm center}$} & \multicolumn{3}{|c|}{$\Omega_z(\Omega_T)\tau$} \\ System & $\tau$ &Layer 1 & Layer 2& Layer 3 & Layer 1& Layer 2 &Layer 3 & Layer 1& Layer 2 &Layer 3 & Layer 1 & Layer 2 & Center \\ \hline LJ & 0.167 & 11.1 & 10.6 & 10.5 & 18.7 & 17.7 & 17.2 & 1.94 & 1.13 & 0.96 & ---(---) & ---(---) & --- \\ \hline Li & 0.038 & 24.2 & 14.2 & 13.2 & 32.4 & 19.7 & 17.4 & 2.01 & 1.19 & 1.09 & 1.27(1.18) & 1.58(1.51) & 1.56 \\ \hline Na & 0.105 & 17.6 & 11.0 & 10.1 & 26.3 & 16.6 & 14.6 & 2.16 & 1.18 & 1.12 & 1.37(1.29) & 1.65(1.59) & 1.60\\ \hline K & 0.170 & 19.5 & 11.6 & 10.8 & 26.8 & 16.8 & 15.0 & 2.19 & 1.21 & 1.08 & 1.35(1.28) & 1.64(1.57) & 1.59 \\ \hline Rb & 0.284 & 19.5 & 11.3 & 10.6 & 27.7 & 16.9 & 15.6 & 2.10 & 1.22 & 1.08 & 1.36(1.29) & 1.67(1.59) & 1.61\\ \hline Cs & 0.352 & 20.3 & 12.0 & 10.6 & 32.2 & 17.0 & 15.3 & 1.81 & 1.13 & 1.07 & 1.41(1.35) & 1.73(1.65) & 1.67\\ \hline Mg & 0.056 & 22.5 & 12.0 & 11.1 & 33.7 & 17.7 & 15.2 & 2.27 & 1.29 & 1.09 & 1.33(1.33) & 1.65(1.58) & 1.61\\ \hline Ba & 0.158 & 24.6 & 13.5 & 12.8 & 25.1 & 16.5 & 14.5 & 2.02 & 1.22 & 1.08 & 1.30(1.28) & 1.63(1.56) & 1.55\\ \hline Al & 0.045 & 37.3 & 17.6 & 14.4 & 68.0 & 25.6 & 19.3 & 2.04 & 1.40 & 1.33 & 1.32(1.46) & 1.66(1.62) & 1.65\\ \hline Tl & 0.187 & 64.7 & 26.1 & 18.7 & 108.2 & 39.4 & 27.0 & 1.24 & 1.00 & 1.00 & 1.32(1.46) & 1.65(1.60) & 1.62\\ \hline Si & 0.058 & 21.4 & 9.5 & 7.8 & 37.4 & 16.3 & 13.3 & 1.24 & 1.00 & 1.00 & 1.68(1.87) & 2.05(1.94) & 1.97\\ \hline Na@Na$_3$K$_7$ & 0.126 & \multicolumn{2}{c|}{18.7} & 10.5 & \multicolumn{2}{c|}{17.4} & 14.7 & \multicolumn{2}{c|}{1.28} & 1.05 & \multicolumn{2}{c|}{1.61(1.57)} & 1.61\\ \hline K@Na$_3$K$_7$ & 0.176 & 18.9 & 10.2 & 9.8 & 29.4 & 15.2 & 14.4 & 2.24 & 1.25 & 1.06 & 1.39(1.28) & 1.71(1.64) & 1.67\\ \hline \end{tabular} \caption{Reference time, $\tau$, mean residence time, $t_{\rm av}$, twenty percent time (see text), $t_{20}$, ratio between $D_T$ and the diffusion coefficient in the center of the slab, $D_{\rm center}$, and Einstein frequencies for the systems considered and the different regions. The units of $\tau$ for the LJ system are standard reduced units, and picoseconds for all the other systems.} \label{tablilla} \end{table} \end{widetext} \subsection{Motion parallel to the interface.} The $t_{20}$ times have been used to analyse the atomic motion within the layers. The MSD have been evaluated up to $0.8 \times t_{20}$, in order to allow an adequate averaging over the time origins. Figure \ref{r2t} depicts the MSD for layers 1 and 2 and for the slab center in liquid K. There is a clear increase in the slope as the interface is approached. This is common to all the systems, including the LJ one, and is quantified in table \ref{tablilla} which shows the ratio between the diffusion coefficient parallel to the interface, $D_T$, in the different layers and the bulk diffusion coefficient in the slab center (where diffusion is isotropic). We attribute this $\approx$ 100 \% increase in the diffusion at the outermost layer to the reduced coordination of the atoms in the interface. In all the pure metals, except Si, the number of nearest neighbors is $\approx$ 12 at the center of the slab and reduces to $\approx$ 8 at the outer layer. In the LJ system the cordination number again decreases from $\approx$ 12 to $\approx$ 9. For Si the coordination number is $\approx$ 6 at the center and $\approx$ $4.5$ at the outer layer. This smaller decrease is reflected in a smaller enhancement of the diffusion coefficient at the interface of only 24 \%. A similar explanation also holds for the Na$_3$K$_7$ alloy. The total number of neighbors around a K (Na) atom is roughly constant at $\approx$ 12.5 (10.0) up to regions 1, where it sharply decreases. Consequently, $D_T$ for K increases rapidly in the outermost layer, whereas the value for Na in its outermost layer (regions 1 and 2) does not change significantly because very few Na atoms in regions 1 are affected by this decrease of coordination. The case of Tl deserves special attention because the number of neighbors decreases from 12 to 8 but $D_T$ increases by only 24 \% in the outermost layer. We attribute this modest increase in $D_T$ in this strongly layered system to a large influence of the second layer on the atoms of the outermost one, an effect similar to that exerted by a wall on a liquid which leads to strong layering and reduced diffusion. \section{Conclussions.} In summary, we have analyzed through ab initio simulations the atomic motion in the liquid-vapor interfaces of several metals and compared it with that of a LJ system in a similar thermodynamic state. Although the layered structure is similar to other systems such as liquids on a solid or in confined geometries, the dynamic behavior within the layers is much more similar to the liquid-vapor interface of dielectrics, like the LJ system or water, showing an enhanced diffusion in the parallel direction at the interface, which is attributed to the reduced coordination of an atom which favours the transverse movement. In the perpendicular direction, the layers are too thin to regard the motion as atomic diffusion, and instead the MRT associated to each layer has been determined. The dynamic behavior of metallic systems along this direction is far different from that of LJ systems. The value of the MRT for metals is clearly larger at the outermost layer, contrary to the case of the LJ system, and increases significantly for the polyvalent metals with closed structures. Finally, the reference time $\tau$ is found to be an excellent time unit, since it reveals universal values in several dynamic properties of different systems. We acknowledge the financial support of the DGICYT (MAT2005-03415) and the EU FEDER program.
1,116,691,501,346
arxiv
\section{Introduction} Semiclassical methods represent one of the main tools to investigate non-perturbative phenomena in Quantum Field Theory (QFT). Vacuum decay \cite{Kobzarev:1974cp,Coleman:1977py}, instantons \cite{tHooft:1976snw}, topological defects and multiparticle production \cite{Rubakov:1995hq,Son:1995wz} are just a partial list of the different declinations of this methodology, but there recently came up an interesting addition \cite{Hellerman:2015nra}. That is in the context of conformal field theory (CFT), where it was shown that operators carrying a large conserved quantum number $Q$ admit a universal description in terms of a finite density superfluid state, with $1/Q$ controlling the semiclassical expansion. In practice the superfluid state is described by an effective field theory (EFT) for the hydrodynamic excitations. In particular that implies that there exists a non trivial correspondence between large charge operators and the hydrodynamic excitations in a superfluid. While the original application focussed on the operator spectrum, in \cite{Monin:2016jmo} it was later shown how the methodology straightforwardly extends to the computation of correlators. That motivated exploring large charge operators using instead the conformal bootstrap \cite{Jafferis:2017zna}. Perfect agreement was found, thus remarkably showing that the superfluid phase dynamics is somewhat encapsulated in the bootstrap constraints at large $Q$. The above results define robust and universal features of generic CFTs with conserved global symmetries. For specific CFTs that admit a definition within perturbation theory, through the large $N$- or $\varepsilon$-expansions, the semiclassical approach is even more powerful \cite{Orlando:2019hte,Alvarez-Gaume:2019biu,Badel:2019oxl,Badel:2019khk}. In \cite{Badel:2019oxl,Badel:2019khk} focussing on the $U(1)$ symmetric Wilson-Fisher fixed point in $4-\varepsilon$ and $3-\varepsilon$, this was elucidated by considering the properties of the simplest charge $n$ operator $\phi^n$. In particular, given the coupling $\lambda$, it was found it is the combination $\lambda n$ that controls the convergence of the standard Fenynman diagram approach: only for $\lambda n\ll1 $ is perturbation theory applicable. On the other hand, the large charge semiclassical approach, applies as long as $n\gg 1$, for any $\lambda n$. Remarkably there then exists a non-trivial overlap for the application of the two methods, which was exploited in \cite{Badel:2019oxl} both to validate the semiclassical computation and to boost the available finite order Feynman loop computations. Amusingly the parameter $\lambda n$ shares some features with the 't Hooft coupling in AdS/CFT \cite{Maldacena:1997re}. In particular, $\lambda n\gg 1$ corresponds to the regime where all the modes beside the hydrodynamic ones are gapped and can be integrated out, very much like in AdS/CFT the large 't Hooft coupling allows to integrate out the string modes to obtain the supergravity limit. Another interesting aspect of Wilson-Fisher models is that, at least for $\lambda n\ll 1$, the operator spectrum can be explicitly constructed both in terms of fields and derivatives and in terms of hydrodynamic modes around the semiclassical saddle. This clearly invites to see how the hydrodynamic Fock space structure emerges in the ordinary construction based on the elementary fields and their derivatives. Otherwise stated, the semiclassical approach delivers the operators spectrum, but it does so somewhat formally, without telling concretely what these operators look like. It is the main goal of this paper to investigate this issue, as we now explain in more detail. \subsection{$\varepsilon$-expansion at large charge\label{sec:epsilonIntro}} In this paper we will mostly consider weakly coupled CFTs, focusing on either $U(1)$ invariant $\phi^4$ theory in $d= 4-\varepsilon$ \begin{equation} \mathcal L_4 = \partial \bar \phi \partial \phi + \frac{\lambda}{4} (\bar \phi \phi)^2 \label{eq:phi4} \end{equation} at the Wilson-Fisher fixed point \cite{Kleinert:2001ax} \begin{equation} \label{eq:phi4FixedPoint} \frac{\lambda_*}{(4\pi)^2} = \frac{\varepsilon}{5} + \frac{3}{25}\varepsilon^2 + \mathcal O (\varepsilon^3), \end{equation} or its $\phi^6$ cousin in $d=3-\varepsilon$ dimensions \begin{equation} \mathcal L_6 = \partial \bar \phi \partial \phi + \frac{\lambda^2}{36} (\bar \phi \phi)^3 \label{eq:phi6} \end{equation} at the Wilson-Fisher fixed point \cite{Pisarski:1982vz} \begin{equation} \label{eq:phi6FixedPoint} \frac{\lambda_*^2}{ (4\pi)^2 } = \frac{3}{7} \varepsilon + \mathcal O(\varepsilon^2) . \end{equation} As made evident by equations \REf{eq:phi4} -- \REf{eq:phi6FixedPoint} both theories are in the perturbative regime provided $\varepsilon \ll 1$. As long as the coupling $\lambda$ is the only relevant parameter, observables can be reliably computed -- putting aside the usual asymptotic nature of perturbative series -- through Feynman diagrams. Things are however different when other parameters enter the game. In particular, the correlators of operators with sufficiently large charge $n$, more precisely satisfying $\lambda n \mathrel{\rlap{\lower4pt\hbox{\hskip1pt$\sim$} 1$, are not computable via standard perturbation theory. We believe the same is true for operators with large spin. This situation is rather generic when large quantum numbers are present. In practice, standard perturbation theory breaks down because large combinatoric factors render the effective expansion parameter large (see~\cite{Voloshin:1994yp,Rubakov:1995hq,Libanov:1997nt} for a review in the context of multi-particle scattering). Nevertheless, it is generally believed that, as long as the coupling is small, observables involving large quantum numbers can still be computed by an alternative perturbative expansion, performed around a non-trivial saddle. Identifying this saddle presents a difficult problem in general. However, for the case at hand, CFT correlators (or any observable for that matter) involving large charge operators, can be computed in a double scaling limit \begin{equation} n \gg 1, ~~ \lambda \ll 1, ~~ \lambda n =\textrm{fixed}, \label{eq:doubleScaling} \end{equation} by finding the saddle explicitly and expanding around it. Computations are simplified due to the enhanced symmetry of the problem. In particular, the operator-state correspondence allows to map the theory on the cylinder, where the saddle point corresponds to a homogeneous superfluid state with spontaneously broken $U(1)$ group whose properties can be systematically computed. For instance, the scaling dimension of $\phi ^n$, which is the lightest operator in the sector of charge $n$, is given by the superfluid ground state energy. In the saddle point approximation it is written as a power series in $\lambda$ with coefficients that are themselves functions of $\lambda n$ \begin{equation} \Delta_{\phi^n} = \frac{1}{\lambda} \Delta_{-1} (\lambda n) + \Delta_{0} (\lambda n)+ \lambda \Delta_{1} (\lambda n) + \dots \label{eq:phinDimension} \end{equation} A more detailed discussion of this result can be found in~\cite{Badel:2019oxl,Badel:2019khk}, where the first two coefficients in \REf{eq:phinDimension} are also explicitly computed. Notice that, taking $\lambda n$ as a fixed parameter, the expansion in $\lambda$ is equivalent to an expansion in $1/n$. \footnote{ Notice that here and in what follows, when we write $\lambda$ we indeed mean $\lambda_*$, as away from the fixed point the notion of scaling dimension is ill-defined beyond the lowest order.} Similarly, one finds that (see Section~\ref{sec:saddle} for a recap) the excitations of the superfluid are given by phonons of spin $\ell$ and energies \begin{equation} \omega^2_{A,B} (\ell)= J_\ell+\Omega^2 \mp \sqrt{4 J_\ell \mu^2 +\Omega^4}, \label{eq:phiSpectrum} \end{equation} where \begin{equation} \label{eq:defJl} J_\ell = \ell (\ell+d-2), \end{equation} is the $SO(d)$ Casimir, and moreover \begin{equation} \label{eq:Omega_phi} \Omega^2 = \left \{ \begin{array}{ll} 3 \mu_4^2 - m^2 , &\text{ for } (\bar \phi \phi)^2, \\ 2 \mu_6^2 - m^2 , &\text{ for } (\bar \phi \phi)^3, \end{array} \right. \end{equation} with $m = \frac{d}{2}-1$ and \begin{align} \text{ for } (\bar \phi \phi)^2: \mu_4(\lambda n,d) & = \frac{(d-2)}{2}\frac{\left( 3^{1/3}+ \left[ \frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3} - \sqrt{ \left(\frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3}\right)^2-3 }\right]^{2/3} \right) } {3^{2/3} \left[ \frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3} - \sqrt{ \left(\frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3}\right)^2-3 }\right]^{1/3} } , \\ \text{ for } (\bar \phi \phi)^3: \mu_6(\lambda n,d) & = \frac{(d-2)}{2} \frac{\sqrt{1+\sqrt{1+ \frac{\lambda^2 n^2 \Gamma(d/2)^2}{3 \pi^d (d-2)^4} }}}{\sqrt{2}} . \end{align} The Fock space of phonon excitations corresponds to the space of operators with charge $n$, whose spectrum of scaling dimensions at next to leading order (NLO) is then given by \footnote{As $\Delta_{\phi^n}$ is $O(n)$ and the $\omega_{A,B}(\ell)$ are $O(1)$, the tree level frequencies are sufficient to compute the dimension $\Delta\left (\{k^A\},\{k^B\} \right )$ at NLO. On the other hand, in order to compute the splittings at NLO, one would need to perform a full 1-loop computation.} \begin{equation} \Delta\left (\{k^A\},\{k^B\} \right )=\Delta_{\phi^n} +\sum_{\ell = 1}^\infty k^A_{\ell}\omega_A(\ell)+\sum_{\ell'=0}^\infty k^B_{\ell'}\omega_B(\ell'), \label{eq:primaryspectrum} \end{equation} with $k^A_\ell$ and $k^B_\ell$ non-negative integers. The above result applies for states with a finite number of phonons and finite spin as $n\to \infty$. For large enough total spin, one expects a non-homogeneous configuration to dominate the path integral (see for instance \cite{vortices1}). We call $A$- and $B$-type the phonons with energy $\omega_A$ and $\omega_B$ respectively. Notice that primary operators correspond to states with $k_1^A=0$, and that descendants are obtained by adding spin-1 $A$-type phonons. Compatibly with that, and with the accuracy of \eqref{eq:primaryspectrum}, one indeed has \begin{equation} \label{eq:omega-1} \omega_A(1) = 1+O(\varepsilon)\,. \end{equation} \subsection{Motivation and goals \label{sec:motivation}} The approach outlined above provides the spectrum of the operators but it does not say anything about their explicit form in terms of elementary fields and derivatives. Establishing such form is one of the goals of this paper. Notice though that the explicit form of composite operators depends on the renormalization procedure and that, moreover, for large enough $\lambda n$ we do not possess such a procedure. We will thus content ourselves with the construction of the operators in the free field theory limit $\lambda\to 0$ and with their correspondance to superfluid excitations. As we shall see, the tree level result is already structurally informative. Indeed, the properties of the operator spectrum vary continuously with $\lambda$ (in truth with $\varepsilon$): by varying $\lambda$ we obtain operator {\it families} $\mathcal O^{(n,\ell,{\mathbb \alpha})}_\lambda(x)$, with $\mathbb \alpha$ a discrete label characterizing the phonon composition (the $k^A$ and $k^B$ mentioned in the previous section). As qualitatively depicted in Fig.~\ref{fig:norms}, the dimensions $\Delta$, and OPE coefficients, of the $\mathcal O^{(n,\ell,{\mathbb \alpha})}_\lambda(x)$ are continuous functions of $\lambda$. Our tree level construction will thus correspond to the starting point at $\lambda=0$ of each trajectory. Such endpoints, however, fully characterize the families non-perturbatively, even if indirectly. \begin{figure}[h] \centering \includegraphics[width=7cm]{norms.png} \caption{\label{fig:norms} Scaling dimension of families of operators as a function of $\lambda$. Each family can be labeled with the corresponding ``seed'' operator in free theory (at $\lambda = 0$).} \end{figure} This paper is organized as follows. In section \ref{sec:vac_fluctuations}, we discuss the classification of operators with charge $n$ in 3D free field theory quantized around $\phi=0$. We explain how to identify primary operators, and provide a systematic construction for a sub-class of them. Section \ref{sec:superfluid_fluctuations} starts with a recap of the semiclassical expansion around the superfluid field configuration, followed by the construction of the mapping between superfluid Fock states and operators. We also discuss the identification of primary operators in this picture. In section \ref{sec:large_spin}, we discuss the breakdown of the homogeneous superfluid description as the spin gets large. In this paper, besides analyzing the operator spectrum, we apply the saddle point expansion to the computation of correlators between large charge operators and neutral operators of the form $ \mathcal O =(\bar \phi \phi)^k$, with $k$ finite. In particular, in Section~\ref{sec:3ptfunction}, we compute the 3-point functions $\langle \bar \phi^n \mathcal O \phi^n \rangle$ at next-to-leading (NLO) order in the $1/n$ expansion. In Section~\ref{sec:4ptfunction} we compute at NLO the 4-point functions $\langle \bar \phi^n \mathcal O \mathcal O\phi^n \rangle$ and study at the same order the $\mathcal O \times \phi^n$ operator product expansion (OPE). \section{Operators corresponding to vacuum fluctuations in free theory\label{sec:vac_fluctuations}} Our first goal is to classify the families of large charge operators by focussing on their representatives in the free limit, as sketched in figure \ref{fig:norms}. As explained in section \ref{sec:motivation}, the first step is the classification of the operators of free field theory in terms of conformal multiplets. This amounts to identifying the conformal primaries. In CFT every local operator corresponds to a state and vice versa (operator-state correspondence). In particular primary states, i.e. those annihilated by the special conformal generators, correspond to primary operators. The goal of this section is to set up the methodology for identifying these states. To make things explicit we will fully construct a subclass of the operators. Working in radial quantization we will now, in turn, construct the Fock space of vacuum fluctuations, derive the state-operator correspondence and write the conformal group generators. We will then write down in closed form a subset of primary states, also showing by a combinatoric argument that it forms a complete basis of the subspace of primary operators with a number of derivatives smaller than the charge. \subsection{Fock space of vacuum fluctuations} Let us consider a free complex scalar field in $d=3$ Euclidean dimensions \begin{equation} \mathcal L = \partial \bar \phi \partial \phi. \label{eq:FreeLagrangian} \end{equation} As usual in CFT, it is beneficial to put the theory \REf{eq:FreeLagrangian} on the cylinder $\mathbb R \times \mathbb S^2$ by redefining the coordinates \begin{equation} \label{eq:PlaneCylinderCoords} x^\mu = r n^\mu, ~~ r = e^\tau, ~~ \vec n = (\sin \theta \cos\varphi, \sin \theta \sin \varphi, \cos\theta) \end{equation} and the field \begin{equation} \label{eq:PlaneCylinderField} \hat \phi (\tau,\theta,\varphi) = e^{\tau/2} \phi (x). \end{equation} As a result we have the following action on the cylinder \begin{equation} S = \int d\tau d\Omega_2 \left [ g^{\mu \nu} \partial_\mu \hat { \bar \phi} \partial_\nu \hat \phi + \frac{1}{4}\hat {\bar\phi} \hat \phi \right ], ~~ g_{\mu \nu} =\mathrm{diag}(1,1,\sin^2\theta) . \end{equation} Time translations on the cylinder are generated by the corresponding Hamiltonian $H$ in the following way \begin{equation} \label{HeisenbergCylinder} \hat \phi(\tau,\theta,\phi) = e^{H \tau} \hat \phi(0,\theta,\phi) e^{-H\tau}, \end{equation} and are related to dilatations on the plane, which are generated by $D$ \begin{equation} \label{HeisenbergPlane} e^{D \lambda} \phi(x) e^{- D \lambda} = e^{\lambda/2 }\phi(e^\lambda x) . \end{equation} This implies \begin{equation} H = D, \end{equation} so that operator dimensions are in one to one correspondence with energy levels on the cylinder. Hermitian conjugation in radial quantization of the parent Euclidean field theory implies $\hat {\bar \phi}(0, \theta,\varphi)=\hat \phi(0, \theta,\varphi)^\dagger $, which at arbitary $\tau$ on the cylinder and arbitrary $x$ on the plane implies respectively \begin{equation} \hat {\bar \phi}(\tau, \theta,\varphi)=\hat \phi(-\tau, \theta,\varphi)^\dagger \qquad{\mathrm {and}}\qquad \bar\phi(x)=|x|^{-1}\phi(x^{-1})^\dagger\,. \label{HermiteanConjugation} \end{equation} Quantization proceeds by expanding the fields in spherical harmonics $Y_{\ell m}$ \begin{equation} \hat \phi(\tau, \theta,\varphi)=\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \frac{1}{\sqrt{2\omega_\ell}} \left [ a^\dagger_{\ell m} e^{\omega_\ell \tau} Y^*_{\ell m}(\theta,\varphi) + b_{\ell m} e^{-\omega_\ell \tau} Y_{\ell m}(\theta,\varphi) \right ], \label{eq:aaCylinderFieldComplex} \end{equation} and\footnote{ Notice $\hat {\bar \phi}(\tau, \theta,\varphi)=\hat \phi(-\tau, \theta,\varphi)^\dagger $ in accordance with (\ref{HermiteanConjugation}).} \begin{equation} \label{eq:aaCylinderFieldComplexConj} \hat {\bar \phi}(\tau, \theta,\varphi)=\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \frac{1}{\sqrt{2\omega_\ell}} \left [ b^\dagger_{\ell m} e^{\omega_\ell \tau} Y^*_{\ell m}(\theta,\varphi) + a_{\ell m} e^{-\omega_\ell \tau} Y_{\ell m}(\theta,\varphi)\right ], \end{equation} with energies \begin{equation} \omega_\ell = \ell+\frac{1}{2}. \end{equation} The corresponding canonically conjugated momenta are given by\footnote{There appears an ``$i$'' in front of the time derivatives because we work in Euclidean time.} \begin{equation} p_{\hat\phi}(\tau, \theta,\varphi)= i \partial_\tau \hat{\bar\phi} =i\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell\sqrt{\frac{\omega_\ell}{2}} \left [ b^\dagger_{\ell m} e^{\omega_\ell \tau} Y^*_{\ell m}(\theta,\varphi) - a_{\ell m} e^{-\omega_\ell \tau} Y_{\ell m}(\theta,\varphi)\right ], \label{eq:aaCylinderMomentumComplex} \end{equation} and \begin{equation} p_{\hat{\bar\phi}}(\tau, \theta,\varphi)=i\partial_\tau \hat\phi =i\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\frac{\omega_\ell}{2}}\left [ a^\dagger_{\ell m} e^{\omega_\ell \tau} Y^*_{\ell m}(\theta,\varphi) - b_{\ell m} e^{-\omega_\ell \tau} Y_{\ell m}(\theta,\varphi) \right ]. \label{eq:aaCylinderMomentumComplexConj} \end{equation} Creation and annihilation operators, satisfying the usual commutation relations \begin{equation} [ a_{\ell m},a^\dagger_{\ell'm'}] = [ b_{\ell m},b^\dagger_{\ell'm'}] = \delta_{\ell \ell'}\delta_{mm'}, \end{equation} allow us to build the Hilbert space. Defining the vacuum state $| 0 \rangle$ as \begin{equation} a_{\ell m} | 0 \rangle =b_{\ell m} | 0 \rangle =0, \qquad \forall\, \ell, m \end{equation} states featuring a string of creation operators acting on the vacuum \begin{equation} \prod_{i= 1}^{n_a} a^\dagger_{\ell_i m_i} \prod_{j=1}^{n_b} b^\dagger_{\ell'_j m'_j} | 0 \rangle \label{eq:FockSpaceStates} \end{equation} provide a basis of the Hilbert space, and give it the standard Fock space structure. The $U(1)$ charge of these states is determined by the charge operator \begin{equation} \label{eq:Qaabb} Q = \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \left( a_{\ell m}^\dagger a_{\ell m} - b_{\ell m}^\dagger b_{\ell m} \right) . \end{equation} \subsection{Operator-state correspondence \label{sec:oppStateCorr}} Combining (\ref{eq:aaCylinderFieldComplex}) with (\ref{eq:aaCylinderMomentumComplexConj}) and using the orthonormality of the $Y_{\ell m}$ (\ref{eq:orthonormalityHarmonics}) we get \begin{equation} a_{\ell m}^\dagger = \frac{e^{-\omega_\ell \tau}}{\sqrt{2 \omega _\ell}} \int d\Omega_2 \, Y_{\ell m} \left ( \partial_\tau \hat\phi(\tau) +\omega_\ell \hat \phi (\tau) \right ), \end{equation} which is valid at any finite $\tau$. Remembering the change of coordinates (\ref{eq:PlaneCylinderCoords}) and the relation between fields on the plane and on the cylinder \REf{eq:PlaneCylinderField}, this expression can be rewritten as \begin{equation} a_{\ell m}^\dagger = \frac{r^{-\ell }}{\sqrt{2 \omega_\ell}} \int d\Omega_2 \, Y_{\ell m} \big ( x^\mu \partial_\mu \phi (x) + (\ell +1) \phi(x) \big) , \end{equation} where the integral is over the unit sphere and $x^\mu = r n^\mu$. Acting on the vacuum and Taylor expanding around the origin\footnote{As can be seen in (\ref{eq:aaCylinderFieldComplex}), the field is singular at the origin, $r\to 0$ or $\tau\to -\infty$ due to negative-frequency $b_{\ell m}$ modes. However, in $\phi(x) | 0 \rangle$ the singular terms drop and Taylor expansion is legitimate.} we get \begin{equation} \phi(x)|0\rangle = \sum_{\ell'=0}^\infty \frac{1}{\ell'!} x^{\{\mu_{1}}\cdots x^{\mu_{\ell'}\}} \partial_{\mu_1}\cdots \partial_{\mu_{\ell'}} \phi(0)|0\rangle\,, \end{equation} where by $\{\dots\}$ we indicate the traceless symmetric combination, which arises because of the equation of motion $\partial^2 \phi(x) = 0$. Noting that \begin{equation} \int d\Omega_2 \, Y_{\ell m} x^{\{\mu_{1}}\cdots x^{\mu_{\ell'}\}} = 0,~~ \ell'\neq \ell, \end{equation} the expansion results in \begin{equation} a_{\ell m}^\dagger | 0 \rangle = \frac{\sqrt{2\ell+1}}{\ell!} \int\! d\Omega_2 \, Y_{\ell m} n^{\mu_1} \cdots n^{\mu_\ell} \partial_{\mu_1}\cdots \partial_{\mu_\ell} \phi(0) | 0 \rangle. \label{eq:aVsDerivatives} \end{equation} The integral can be easily done, yielding \begin{equation} \label{eq:stateOperatorMapA} a_{\ell m}^\dagger |0\rangle = \mathcal Y_{\ell m}^{\mu_1\dots\mu_\ell} \partial_{\mu_1} \cdots \partial_{\mu_\ell} \phi(0)|0\rangle, \end{equation} with (see Appendix \ref{sec:appYlmCoeff}) \begin{equation} \label{eq:defineYlmCoeff} \mathcal Y_{\ell m}^{\mu_1\dots\mu_\ell} = \frac{\sqrt{2\ell+1}}{\ell!} \int d\Omega_2 \, Y_{\ell m} n^{\mu_1} \cdots n^{\mu_\ell} . \end{equation} Repeating the same steps starting with (\ref{eq:aaCylinderFieldComplexConj}) and (\ref{eq:aaCylinderMomentumComplex}), we get similarly \begin{equation} \label{eq:stateOperatorMapB} b_{\ell m}^\dagger |0\rangle = \mathcal Y_{\ell m}^{\mu_1\dots\mu_\ell} \partial_{\mu_1} \cdots \partial_{\mu_\ell} \bar\phi(0)|0\rangle . \end{equation} In particular, we obtain \begin{equation} \mathcal Y_{\ell\ell}^{\mu_1 \dots \mu_\ell} = \frac{(-1)^\ell 2^{\frac{\ell}{2}+1}\sqrt{\pi}}{\sqrt{(2\ell)!}} \delta^{\mu_1}_- \cdots \delta^{\mu_\ell}_- , \end{equation} \begin{equation} a_{\ell\ell}^\dagger |0\rangle = \frac{(-1)^\ell 2^{\frac{\ell}{2}+1}\sqrt{\pi}}{\sqrt{(2\ell)!}} \big( \partial_- \big)^{\ell} \phi(0) |0\rangle , \label{eq:a_llFields} \end{equation} where the following change of variables was performed \begin{equation} \label{eq:x+-0} x_\pm=\frac{x_1\pm i x_2}{\sqrt{2}}, ~~ x_0=x_3. \end{equation} This generalizes to multi-particle Fock states (\ref{eq:FockSpaceStates}). For example\footnote{In field products acting on the vacum the singular terms at the origin are eliminated by normal-ordering. In the rest of the paper, normal ordering will always be intended and we will drop the ``$:$'' symbol.}, \begin{equation} a_{\ell_1 m_1}^\dagger b_{\ell_2 m_2}^\dagger |0\rangle = \mathcal Y_{\ell_1 m_1}^{\mu_1 \dots \mu_{\ell_1}} \mathcal Y_{\ell_2 m_2}^{\nu_1 \dots \nu_{\ell_2}} : \partial_{\mu_1} \!\cdots \partial_{\mu_{\ell_1}}\! \phi(0)\, \partial_{\nu_1}\!\cdots \partial_{\nu_{\ell_2}}\! \bar\phi(0) : |0\rangle . \end{equation} Hermitian conjugation of (\ref{eq:aVsDerivatives}), together with (\ref{HermiteanConjugation}), implies \begin{equation} \langle 0 | a_{\ell m} = \frac{\sqrt{2\ell+1}}{\ell!} \int \! d\Omega_2 \, Y_{\ell m}^* n^{\mu_1} \cdots n^{\mu_\ell} \lim_{x\to\infty} \langle 0 | \partial_{\mu_1}^{(1/x)}\cdots\partial_{\mu_\ell}^{(1/x)} \big( |x| \bar\phi(x) \big) , \end{equation} where we defined \begin{equation} \partial_\mu^{(1/x)} = \left( x^2 \delta_{\mu\nu} - 2 x^\mu x^\nu \right) \partial_\nu^{(x)} . \end{equation} Notice, this time the field is evaluated at infinity, because hermitian conjugation in radial quantization involves a space inversion. \subsection{Conformal generators} In order to proceed with the classification and construction of the operators we first need the explicit expression of the conformal group generators in terms of the ladder operators. We provide them in this section. In $d=3$, defining \begin{equation} J_i = \frac{1}{2} \varepsilon_{ijk}J_{jk}, ~~ J_\pm = J_1\pm iJ_2 \end{equation} and \begin{equation} P_\pm = \frac{1}{\sqrt{2}} \left ( P_1\pm i P_2 \right ), ~~ K_\pm = \frac{1}{\sqrt{2}} \left ( K_1\pm i K_2 \right ), ~~ P_0\equiv P_3, ~~ K_0=K_3, \end{equation} such that \begin{equation} P_\pm^\dagger = K_\mp, ~~ P_0^\dagger = K_0, \end{equation} the commutation relations take the form (with $X_\bullet = P_\bullet, K_\bullet$) \begin{eqnarray} \label{eq:CRin3dSpinBasis} &&[J_3,J_\pm] = \pm J_\pm, ~~ [J_+,J_-] = 2 J_3, \nonumber \\ &&[J_3, X_\pm]=\pm X_\pm, ~~ [J_3, X_0]=0, \nonumber \\ &&[J_+, X_+]=0, ~~ [J_+, X_0]=-\sqrt{2}X_+, ~~ [J_+, X_-]=\sqrt{2}X_0, \\ &&[J_-, X_+]=-\sqrt{2}X_0, ~~ [J_-, X_0]=\sqrt{2}X_-, ~~ [J_-, X_-]=0 \nonumber \\ &&[D, K_i]=-K_i, ~~ [D, P_i]=P_i, \nonumber \\ &&[K_-, P_+]=2 \left ( D+ J_3 \right ), ~~ [K_+, P_-]=2 \left ( D- J_3 \right ), ~~ [K_0, P_0] = 2D \nonumber \\ &&[K_0, P_+]=-\sqrt{2} J_+, ~~ [K_+, P_0]=\sqrt{2} J_+, ~~ [K_-, P_0]=-\sqrt{2} J_-, ~~ [K_0, P_-]=\sqrt{2} J_-. \nonumber \end{eqnarray} The generators, as computed from the Noether currents of the theory, read \begin{eqnarray} \label{eq:Daabb} D & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \omega_\ell \left( a^\dagger_{\ell m}a_{\ell m}+b^\dagger_{\ell m}b_{\ell m}\right), \\ \label{eq:J3aabb} J_3 & = & \sum_{\ell =0}^\infty \sum_{m=-\ell}^\ell m \left( a^\dagger_{\ell m}a_{\ell m}+b^\dagger_{\ell m}b_{\ell m}\right ), \\ \label{eq:P0aabb} P_0 & = & \sum_{\ell =0}^\infty \sum_{m=-\ell}^\ell \sqrt{(\ell+1)^2-m^2} \left( a^\dagger_{\ell+1,m}a_{\ell m}+b^\dagger_{\ell+1,m}b_{\ell m}\right ), \\ \label{eq:K0aabb} K_0 & = & \sum_{\ell =0}^\infty \sum_{m=-\ell}^\ell \sqrt{(\ell+1)^2-m^2} \left( a^\dagger_{\ell m}a_{\ell+1,m}+b^\dagger_{\ell m}b_{\ell+1,m}\right ), \end{eqnarray} \begin{eqnarray} \label{eq:J+aabb} J_+ & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\ell(\ell+1)-m(m+1)}\left( a^\dagger_{\ell,m+1}a_{\ell m}+b^\dagger_{\ell,m+1}b_{\ell m}\right ), \\ & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\ell(\ell+1)-m(m-1)} \left( a^\dagger_{\ell m}a_{\ell,m-1}+ b^\dagger_{\ell m}b_{\ell,m-1}\right ), \nonumber \\ \label{eq:J-aabb} J_- & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\ell(\ell+1)-m(m+1)} \left ( a^\dagger_{\ell m}a_{\ell,m+1}+b^\dagger_{\ell m}b_{\ell,m+1}\right ) \\ & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\ell(\ell+1)-m(m-1)} \left ( a^\dagger_{\ell,m-1}a_{\ell m}+b^\dagger_{\ell,m-1}b_{\ell m} \right ), \nonumber \end{eqnarray} \begin{eqnarray} \label{eq:P+aabb} P_+ & = & -\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\frac{(\ell+m+1)(\ell+m+2)}{2}} \left ( a^\dagger_{\ell+1,m+1}a_{\ell m}+b^\dagger_{\ell+1,m+1}b_{\ell m}\right ), \\ \label{eq:K-aabb} K_- & = & -\sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\frac{(\ell+m+1)(\ell+m+2)}{2}} \left ( a^\dagger_{\ell m}a_{\ell+1,m+1}+b^\dagger_{\ell m}b_{\ell+1,m+1}\right ), \\ \label{eq:P-aabb} P_- & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\frac{(\ell-m+1)(\ell-m+2)}{2}} \left ( a^\dagger_{\ell+1,m-1}a_{\ell m} + b^\dagger_{\ell+1,m-1}b_{\ell m} \right ), \\ \label{eq:K+aabb} K_+ & = & \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \sqrt{\frac{(\ell-m+1)(\ell-m+2)}{2}} \left ( a^\dagger_{\ell m}a_{\ell+1,m-1}+ b^\dagger_{\ell m}b_{\ell+1,m-1} \right ). \end{eqnarray} \subsection{Primary states and operators} Besides the quantum numbers associated with the conformal group, states can be characterized by their charge and by their parity. Charge is quickly dealt with. Any state of the form (\ref{eq:FockSpaceStates}) is an eigenstate of the charge operator $Q$. Consider now parity. At fixed charge $n$ and spin $\ell$, states divide into two {\it polarity} classes: polar states with parity $(-1)^\ell$ and axial states with parity $(-1)^{\ell+1}$. The two classes can schematically be written as \begin{eqnarray} {\mathrm {polar}}\qquad P=(-1)^\ell\,\,\,\,\,\,\quad &\Rightarrow&\quad \partial^{\ell+2k} \phi^{n_a}\bar\phi^{n_b} \delta^k \qquad\quad \,\, k\geq 0\label{polar}\\ {\mathrm {axial}}\,\qquad P=(-1)^{\ell+1} \quad &\Rightarrow &\quad \partial^{\ell+2k+1} \phi^{n_a}\bar\phi^{n_b} \epsilon \delta^k \qquad k\geq 0 \label{axial} \end{eqnarray} where $\partial$, $\delta$ and $\epsilon$ represent respectively a spacetime derivative $\partial_i$, the Kronecker delta $\delta_{ij}$ and the Levi-Civita tensor $\epsilon_{ijk}$. The $\delta$'s and the $\epsilon$ are all contracted with a pair of derivatives, while the remaining $\ell$ indices are symmetrized and trace-subtracted. As the $U(1)$ charge $Q$ commutes with the conformal group, the conformal multiplets have definite charge. On the other hand, by considering that $\partial \to -\partial $ under parity and the standard rule for adding angular momenta, one is easily convinced that the descendants of an operator with given polarity (polar or axial) can have either polarity. One can nonetheless label a conformal multiplet by the polarity of its primary state. In this paper, we will provide a systematic construction of all primaries whose number of derivatives is bounded by the charge $n$ (see \cite{deMelloKoch:2018klm,deMelloKoch:2017dgi,Henning:2019mcv} for a different but less explicit procedure to construct primaries of given spin and charge). In a first time we describe this construction, before proving via a combinatorics argument that our procedure indeed generates all such primaries. This will enable us to concretely illustrate the emergence of the superfluid Fock space structure within the operator spectrum at large charge. \subsubsection{Construction of primaries\label{sec:vacuum_primaries_construction}} States of the form \begin{equation} a^\dagger_{\ell_1,m_1} \dots a^\dagger_{\ell_{n_a},m_{n_a}} b^\dagger_{j_1,m_1} \dots b^\dagger_{j_{n_b},m_{n_b}} | 0 \rangle, \end{equation} can be decomposed into irreducible representations of $SO(3)$ \begin{equation} \ell_1\otimes \dots \otimes \ell_{n_a} \otimes j_1 \dots \otimes j_{n_b} = \left ( \ell_1+ \dots+\ell_{n_a} + j_1 \dots + j_{n_b}\right ) \oplus \dots \label{eq:spinDecomposition} \end{equation} Let us first consider the states with the highest total spin $\ell=\ell_1+\dots + j_{n_b}$ in the tensor product \REf{eq:spinDecomposition}, indicating them by \begin{equation} | n; \ell,m \rangle \end{equation} where $n=n_a-n_b$ and $m$ are respectively the $Q$ and $J_3$ eigenvalues. The highest weight state from which all the multiplet is constructed by repeatedly acting with $J_-$ is \begin{equation} \label{eq:highestWeightState} |n ; \ell, \ell \rangle = a^\dagger_{\ell_1,\ell_1} \dots a^\dagger_{\ell_{n_a},\ell_{n_a}} b^\dagger_{j_1,j_1} \dots b^\dagger_{j_{n_b},j_{n_b}} | 0 \rangle. \end{equation} By eqs.~\REf{eq:stateOperatorMapA}, \REf{eq:stateOperatorMapB} and by the discussion at the beginning of this section, the corresponding operators are polar and have the schematic form $\phi^{n_a}\bar \phi^{n_b}\partial^\ell$. In the basis (\ref{eq:x+-0}), the operator corresponding to (\ref{eq:highestWeightState}) involves only $\partial_-$ derivatives, as is made clear by (\ref{eq:a_llFields}). We can now search for combinations of states of the form \REf{eq:highestWeightState} that correspond to primaries. Let us first consider states involving creation operators of only one sort, say $a^\dagger$. A first obvious example is the state of charge $n$ with lowest dimension, which is given by \begin{equation} \label{eq:chargenspin0} | n \rangle = \frac{1}{\sqrt{n!}} (a_{00}^\dagger)^n |0\rangle = \frac{(4\pi)^{n/2}}{\sqrt{n!}} \phi^n(0) |0\rangle \end{equation} This state has spin $0$ and is a primary as it is annihilated by the $K_i$'s. To find a spin-$\ell$ primary we start with the ansatz \begin{equation} | n; \ell,\ell \rangle^{(0)}_A= (a_{00}^\dagger)^{n-1} a_{\ell\ell}^\dagger | 0 \rangle. \end{equation} Acting on it with $K_-$ we get\footnote{ As can be derived from \REf{eq:K0aabb}, \REf{eq:K+aabb}, $[K_0,a_{\ell\ell}^\dagger] = [K_+,a_{\ell\ell}^\dagger] = 0$ for all $\ell$, so the state is annihilated by both $K_0$ and $K_+$. Moreover \REf{eq:K-aabb} yields \begin{equation} [K_-,a_{\ell\ell}^\dagger] = -\sqrt{\frac{(2\ell)(2\ell-1)}{2} } a_{\ell-1,\ell-1}^\dagger . \end{equation} } \begin{equation} K_- | n; \ell,\ell \rangle^{(0)}_A = -\sqrt{\frac{2\ell(2\ell-1)}{2}} (a_{00}^\dagger)^{n-1} a_{\ell-1,\ell-1}^\dagger | 0 \rangle, ~~ \ell\neq 1. \end{equation} In order to cancel this contribution we modify the vector \begin{equation} | n; \ell, \ell \rangle^{(1)}_A= (a_{00}^\dagger)^{n-1} a_{\ell\ell}^\dagger | 0 \rangle-\sqrt{\frac{2\ell(2\ell-1)}{2}} (a_{00}^\dagger)^{n-2} a_{\ell-1,\ell-1}^\dagger a_{1,1}^\dagger | 0 \rangle. \label{2ndstep} \end{equation} Acting with $K_-$ on the new state we find \begin{equation} K_- | n; \ell,\ell \rangle^{(1)}_A = \sqrt{\frac{2\ell(2\ell-1)}{2}} \sqrt{\frac{(2\ell-2)(2\ell-3)}{2}}(a_{00}^\dagger)^{n-2} a_{\ell-2,\ell-2}^\dagger a_{1,1}^\dagger | 0 \rangle\, . \end{equation} Again, to cancel this contribution we add an extra term to (\ref{2ndstep}) and we continue further until we finally arrive at an exact primary \begin{equation} | n; \ell,\ell \rangle_A= \alpha_0\sum_{k=0}^{\ell}\gamma_{k,\ell} (a_{00}^\dagger)^{n-k-1} ( a_{1,1}^\dagger )^{k} a_{\ell-k, \ell-k}^\dagger | 0 \rangle, \label{eq:one-phononStateA} \end{equation} with \begin{equation} \gamma_{k,\ell}= \frac{(-1)^k}{k!}\sqrt{\frac{(2\ell)!}{2^k(2\ell-2k)!}}, \end{equation} with the overall coefficient $\alpha_0$ fixed by the normalization condition $\parallel | n; \ell,\ell \rangle_A\parallel^2=1$ \begin{equation} \label{eq:PrimaryNormalization} \alpha_0^2\left[ \sum_{k=0}^{\ell-2}\gamma^2_{k,\ell} (n-k-1)! k! + \left( \gamma_{\ell-1,\ell}+\gamma_{\ell,\ell} \right )^2 (n-\ell)! \ell! \right ] =1. \end{equation} As can be verified, this construction works if $1< \ell \leq n$. Explicit constructions of these states and of the corresponding operators for $\ell=0,1,2,3$ can be found in Appendix~\ref{app:spin23}. By using \REf{eq:Daabb} one can also check that the energy of this state, or equivalently the dimension of the corresponding operator, is given by \begin{equation} \Delta_A(n,\ell) = \frac{n}{2}+\ell, \end{equation} as expected in free theory. Similarly we can consider states that involve one creation operator $b^\dagger$. Repeating the construction it is straightforward to construct, for $\ell\leq n+1$, a primary \begin{equation} | n; \ell,\ell \rangle_B= \beta_0\sum_{k=0}^{\ell}\gamma_{k,\ell} (a_{00}^\dagger)^{n-k+1} ( a_{1,1}^\dagger )^{k} b_{\ell-k, \ell-k}^\dagger | 0 \rangle, \label{eq:one-phononStateB} \end{equation} with \begin{equation} \beta_0^2\sum_{k=0}^{\ell}\gamma^2_{k,\ell} (n-k-1)! k! =1. \end{equation} These special cases can be combined to generate more primaries. Indeed, one can define spin $\ell$ multiplets of operators $\left\{\mathcal A_{\ell,m}^\dagger \right\}$, $\left\{\mathcal B_{\ell,m}^\dagger\right\}$ with $m=-\ell,\dots,\ell$ whose highest weight elements are \begin{eqnarray}\label{eq:define_C_atomic} \mathcal A_{\ell,\ell}^\dagger &=& \sum_{k=0}^{\ell}\gamma_{k,\ell} (a_{00}^\dagger)^{\ell-k-1} ( a_{1,1}^\dagger )^{k} a_{\ell-k, \ell-k}^\dagger, \quad\quad \ell \geq 2 \\ \mathcal B_{\ell,\ell}^\dagger &=& \sum_{k=0}^{\ell}\gamma_{k,\ell} (a_{00}^\dagger)^{\ell-k+1} ( a_{1,1}^\dagger )^{k} b_{\ell-k, \ell-k}^\dagger, \quad\quad \ell \geq 0 . \end{eqnarray} $\mathcal A_{\ell,m}^\dagger$ and $\mathcal B_{\ell,m}^\dagger $ are polar primaries, because they commute with all $K_i$, and they have charge $\ell$. Notice, that $\mathcal A_{0,0}$ is not defined and $\mathcal A_{1,m}=0$, while $\mathcal B_{0,0}^\dagger=a_{00}^\dagger b_{00}^\dagger$. The primary states we constructed are then given by \begin{eqnarray} |n; \ell,\ell\rangle_A &=& \alpha_0 (a_{00}^\dagger)^{n-\ell} \mathcal A_{\ell,\ell}^\dagger |0\rangle , \\ |n; \ell,\ell\rangle_B &=& \beta_0 (a_{00}^\dagger)^{n-\ell} \mathcal B_{\ell,\ell}^\dagger |0\rangle . \end{eqnarray} Since the $\mathcal A_{\ell,m}^\dagger$'s, $\mathcal B_{\ell,m}^\dagger$'s, as well as $a_{00}^\dagger$, are all primaries, any product of them is a primary as well. This lets us generate primaries of various spins and charges by acting on the vacuum with these operators \begin{equation} \label{eq:CDproduct} \left(a_{00}^\dagger\right)^{n-\sum_\alpha \ell_\alpha - \sum_\beta \tilde{\ell}_\beta}\prod_{\alpha} \mathcal A_{\ell_\alpha,m_\alpha}^\dagger \prod_\beta \mathcal B_{\tilde \ell_\beta,\tilde m_\beta}^\dagger | 0 \rangle , \end{equation} where the number of derivatives of the corresponding operator is $P\equiv \sum_\alpha \ell_\alpha + \sum_\beta \tilde{\ell}_\beta$. Notice this state is an eigenstate of $J^2$ only for maximal spin states ($m_\alpha = \ell_\alpha, \tilde m_\beta = \tilde \ell_\beta$ or $m_\alpha = -\ell_\alpha, \tilde m_\beta =-\tilde \ell_\beta$). Otherwise, one must take linear combinations of these terms to build spin multiplets. By inspecting their definition, one can be convinced that $\mathcal A_{\ell,\ell}^\dagger$ and $\mathcal B_{\ell,\ell}^\dagger$ (and hence the corresponding spin multiplets) can not be written as products of $\mathcal A^\dagger$'s and $\mathcal B^\dagger$'s with lower-spin -- they are in a sense ``irreducible''. Thus the above representation of a primary is unique. Indeed, as it turns out, the above representation generates all the primaries with number of derivatives bounded by $n$. In the next section we will offer a combinatoric proof of that. Before moving to that, and to ease the counting, it is convenient to note that the dimensionality of the space generated by (\ref{eq:CDproduct}) is the same as that of space generated by (\ref{eq:FockSpaceStates}) barring the spin 1 ladder operators $a^\dagger_{1,m}$. This can be seen by picking only the $k=0$ terms in the series (\ref{eq:define_C_atomic}) for the $\mathcal A$'s and $\mathcal B$'s. This remark will be used in the next section to prove that (\ref{eq:CDproduct}) provide a complete basis for primaries. \subsubsection{Combinatorics: counting primaries} As a warmup, we will first consider different subclasses of operators for which we can provide explicit expressions for the number of primaries. After having done that, we will prove that the set (\ref{eq:CDproduct}) is indeed a complete basis for the primaries. \paragraph{No $\bar\phi$, spin $\ell\leq n$, number of derivatives equal to $\ell$}\mbox{}\\ \noindent{Consider} the polar operators with $k=0$ and no $\bar\phi$ fields in eq.~(\ref{polar}). They correspond to symmetric traceless tensors with schematic form $\partial^\ell \phi^n$. Using coordinates (\ref{eq:x+-0}), the highest weight elements of the corresponding $SO(3)$ multiplets have the schematic form $\partial_-^\ell \phi^n$. The counting is now straightforward: there are as many operators as there are inequivalent ways of distributing $\ell$ derivatives $\partial_{-}$ among $n$ fields $\phi$. That is given by the number of partitions of $\ell$ into at most $n$ integers, which we denote by $p(\ell,n)$. In the case $\ell\leq n$, the partition cannot contain more than $n$ elements, and so $p(\ell,n)$ reduces to the number $p(\ell)$ of unconstrained partitions of $\ell$. For example, for $\ell=5$ we get the following partitions. \begin{equation} 5: ~~(5),~(4,1),~(3,2),~(3,1,1),~(2,2,1),~(2,1,1,1),~(1,1,1,1,1),\label{l=5} \end{equation} Thus, there are \begin{equation} p(5,n) = 7 \end{equation} operators with spin $\ell=5$ and charge $n \geq 5$, while for charge $n=3$ there are only \begin{equation} p(5,3)=5 \end{equation} operators in total, counted by the first five partitions in (\ref{l=5}). We can now count primary operators. Obviously, at spin $\ell$, primaries will be in one to one correspondence with operators that cannot be obtained by acting with derivatives on all operators with spin $\ell-1$. Therefore the number of primaries is given by \begin{equation} \label{eq:primaryCount} \mathrm{Prim}(\ell,n)\equiv p(\ell,n)-p(\ell-1,n). \end{equation} For $\ell\leq n$ this number has a simple interpretation. Namely, it corresponds to the number of partitions of $\ell$, except those that can be obtained from partitions of $\ell-1$ by adding $1$, in other words partitions of $\ell$ containing $1$ should be eliminated \footnote{{That is because when acting with a derivative on an operator involving less than $n$ derivatives, among many terms, there will always arise one involving a single derivative on $\phi$.}}. As an example, for $\ell=5$ and $\ell =4$ we have respectively \begin{equation} \begin{array}{cccccccccc} 5: && (5) & (4,1) & (3,2) & (3,1,1) & (2,2,1) & (2,1,1,1) & (1,1,1,1,1) \\ 4: && & (4) & &(3,1) & (2,2) & (2,1,1) & (1,1,1,1). \end{array} \end{equation} Clearly, the $\ell=5$ primaries are counted by the partitions without $1$, so that for $n\geq 5$ \begin{equation} \mathrm{Prim}(5,n) = 2\,. \end{equation} In the previous subsection we found that any string $(a_{00}^\dagger)^{n-\ell}\Pi_\alpha \mathcal A_{\ell_\alpha\ell_\alpha}^\dagger$ forms a primary with total spin $\ell = \sum_\alpha \ell_\alpha \leq n$. Since there is no $\mathcal A_{1,1}$, it is clear that these primary states correspond to partitions of $\ell$ without 1's. Our counting argument then shows these are all the primaries of our class (polar with $k=0$ and no $\bar \phi$'s). \paragraph{No $\bar\phi$, arbitrary spin $\ell$, number of derivatives equal to $\ell$}\mbox{}\\ \noindent{For} arbitrary $\ell$, the number of primaries (\ref{eq:primaryCount}) is given by the number of partitions of $\ell$ with each part bigger than 1 and not larger than $n$, i.e. by the number of solutions of the equation \begin{equation} \sum_i \ell_i = \ell, ~~ 1< \ell_i \leq n. \label{eq:SpinRestrictions} \end{equation} That can be proven as follows. Every partition $t$ can be associated with a Young tableau. For instance, the partition $t=(4,3,2)$ of $9$ corresponds to \begin{equation} t=\ytableaushort{~~~~,~~~,~~} \end{equation} A conjugated Young tableau $t^*$ is defined by interchanging columns and rows, meaning that for the example above $t^*=(3,3,2,1)$ \begin{equation} t^*=\ytableaushort{~~~,~~~,~~,~} \end{equation} This map obviously establishes an equality between the number $p(\ell,n)$ of partitions of $\ell$ into at most $n$ parts -- i.e. the number of Young tableaux with at most $n$ rows -- and the number of partitions $p^*(\ell,n)$ with parts bounded by $n$ -- i.e. the number of Young tableaux with at most $n$ columns. Therefore, the number of primaries can also be written as \begin{equation} \label{eq:count_higher_spin_primaries} \mathrm{Prim}(\ell,n)=p(\ell,n)-p(\ell-1,n) = p^*(\ell,n)-p^*(\ell-1,n). \end{equation} As before, we observe that every tableau counted by $p^*(\ell-1,n)$ can be promoted to a tableau counted by $p^*(\ell,n)$ by adding a row with just one box, \begin{equation} \ytableaushort{~~~,~~~,~~} \to \ytableaushort{~~~,~~~,~~,~}*[*(gray)]{0,0,0,1} \end{equation} therefore, as claimed the number of primaries is given by the number $p^*(\ell, n, 2)$ of Young tableaux with each row bounded by $2\leq\ell_i\leq n$ (see Appendix \ref{app:counting_primaries} for examples). Clearly, this is equal to the number of products of operators $\mathcal A_{\ell_\alpha,\ell_\alpha}^\dagger$ defined in (\ref{eq:define_C_atomic}) such that $2\leq \ell_\alpha \leq n$ and $\sum_\alpha {\ell_\alpha} = \ell$. Notice that, while the counting is still valid, the construction does not work for $\ell > n$, since the $\mathcal A_{\ell,\ell}^\dagger$ operators have charge equal to spin, and thus cannot be used to generate operators with spin higher than the charge. \paragraph{$\phi$ and $\bar\phi$, arbitrary spin $\ell$, number of derivatives equal to $\ell$}\mbox{}\\ Consider now polar operators with $k=0$ but involving $\bar\phi$ fields. In this case the highest weight elements have the schematic form $\partial_-^\ell \phi^{n_a} \bar\phi^{n_b}$. To count the number of such operators, one can first distribute the derivatives as $\partial_-^{\ell-\ell'}\phi^{n_a} \times \partial_-^{\ell'}\bar\phi^{n_b}$, and compute the total number of operators as \begin{equation} \sum_{\ell'=0}^\ell p(\ell-\ell',n_a) p(\ell',n_b) . \end{equation} This implies the number of primaries is given by \begin{align}\label{eq:counting_highest_with_b} \mathrm{Prim}(\ell,n_a,n_b) & = \sum_{\ell'=0}^\ell p(\ell-\ell',n_a) p(\ell',n_b) - \sum_{\ell'=0}^{\ell-1} p(\ell-1-\ell',n_a) p(\ell',n_b) \nonumber \\ & = \sum_{\ell'=0}^{\ell-1} p^*(\ell-\ell',n_a,2) p^*(\ell',n_b) , \end{align} where we have used the equalities deduced above from Young tableaux. This number is easy to interpret as the number of products of the form \begin{equation} \label{eq:CDproductMax} (a_{00}^\dagger)^{n-\ell} \prod_{\alpha} \mathcal A_{\ell_\alpha,\ell_\alpha}^\dagger \prod_\beta \mathcal B_{\tilde \ell_\beta,\tilde \ell_\beta}^\dagger \end{equation} with $2\leq \ell_\alpha \leq n$, $0\leq \tilde \ell_\beta \leq n$ and $\sum_\alpha \ell_\alpha + \sum_\beta \tilde \ell_\beta = \ell$. Thus, these products of operators are all the highest-weight polar primaries with $k=0$ and $\ell \leq n$. Again, the counting (\ref{eq:counting_highest_with_b}) is valid for $\ell > n$, but the explicit construction does not apply in this regime. \paragraph{All operators with number of derivatives bounded by $n$}\mbox{}\\ We finally consider operators made of both $\phi$ and $\bar\phi$ fields and a number of derivatives $P\leq n$, with eventually contracted indices. We will not provide an explicit formula for the number of primaries in the general case, but will show that primaries are in one-to-one correspondence with operators of the form (\ref{eq:CDproduct}). The following argument is valid in any dimension. A basis of the linear space of charge-$n$ operators is obtained by considering the set of monomials of the schematic form $\partial_{\mu_1}\dots \partial_{\mu_P}\phi^{n_a}\bar\phi^{n_b}$ with $n_a-n_b=n$ and with the $P$ derivatives distributed on the fields in all possible ways (removing the operators which are made redundant by the equations of motion $\partial^2 \phi = \partial^2\bar\phi=0$). We focus on a finite-dimensional subspace $H_{n_a,n_b,P}$ of fixed $n_a$, $n_b$ and $P$. The counting argument that we will provide works for each of those subspaces individually, and thus extends to the full space of operators. For each subspace, we construct a different basis, in which part of the elements are manifestly descendant states. The remaining elements of the basis span a subspace of same dimensionality as the subspace of explicitly known primary operators. This means that we have successfully identified complete basis of primaries and descendants. The construction is the following. Monomials in the basis can be organized by factoring out all powers of $\phi$ carrying either 0 or 1 derivative \begin{equation} \label{eq:orderedBasis} \mathbb B_{n_a,n_b,P} = \left\{ \phi^q (\partial_{\mu_1} \phi) (\partial_{\mu_2}\phi) \cdots (\partial_{\mu_p} \phi)\, O^{(P-p)}_{n_a-p-q,n_b}\ , p \leq P \right\} , \end{equation} with $O^{(p)}_{n, m}$ any monomial involving $n$ $\phi$'s, $m$ $\bar\phi$'s and $p$ derivatives, such that each $\phi$ is derived at least twice. Notice that all $\partial_{\mu_i}$ factors commute with each other, hence without loss of generality we can assume they are ordered $\mu_1 \leq \mu_2 \leq \dots \leq \mu_p$. For any $p\leq P$ we will also consider the sub-basis of operators where $p$ $\phi$'s have a single derivative \begin{equation} \label{eq:orderedBasisFixedp} \mathbb B_{n_a,n_b,P}^p = \left\{ \phi^q (\partial_{\mu_1} \phi) (\partial_{\mu_2}\phi) \cdots (\partial_{\mu_p} \phi)\, O^{(P-p)}_{n_a-p-q,n_b}\ \right\} . \end{equation} Now, for $p\geq 1$, we can rewrite the elements of $\mathbb B_{n_a,n_b,P}^p$ as (for simplicity we write $O$ instead of $O^{(P-p)}_{n_a-p-q,n_b}$) \begin{equation} \begin{split} \phi^q (\partial_{\mu_1} \phi) (\partial_{\mu_2}\phi) & \cdots (\partial_{\mu_p} \phi)\, O \\ = \frac{1}{q+1} \Bigg( & \partial_{\mu_1} \Big( \phi^{q+1} (\partial_{\mu_2}\phi) \cdots (\partial_{\mu_p} \phi)\, O \Big) \\ & - \sum_{k=2}^p \phi^{q+1} (\partial_{\mu_2} \phi) \cdots (\partial_{\mu_{k-1}}\phi) (\partial_{\mu_{k+1}}\phi) \cdots (\partial_{\mu_p}\phi) \partial_{\mu_1}\partial_{\mu_k} \phi\, O \\ & - \phi^{q+1} (\partial_{\mu_2} \phi) \cdots ( \partial_{\mu_p}\phi) \partial_{\mu_1}O \Bigg) . \end{split} \end{equation} The term in the first line of the right-hand side is obviously a descendant operator, while the two other lines contain monomials belonging to $\mathbb B_{n_a,n_b,P}^{p-2}$ and $\mathbb B_{n_a,n_b,P}^{p-1}$. This process can be repeated, rewriting the operators of the two last lines in the same way, as linear combinations of descendants and members of the lower sub-bases. The process can be iterated until the right hand side is written as a linear combination of descendants and monomials in $\mathbb B_{n_a,n_b,P}^0$. The latter involve no single-derivative $\phi$ fields and cannot be further rewritten. Our result implies that the subspace generated by $\mathbb B_{n_a,n_b,P}^p$ has the same primary content as the subspace generated by $\mathbb B_{n_a,n_b,P}^0$. Indeed, as this holds for any $p$, the very space generated by $\mathbb B_{n_a,n_b,P}$ has the same primary content as the subspace generated by $\mathbb B_{n_a,n_b,P}^0$. We therefore conclude that the subspace of primaries within $H_{n_a,n_b,P}$ is $\leq$ than the number of elements in $\mathbb B_{n_a,n_b,P}^0$. Our proof can now be completed by comparing the elements in $\mathbb B_{n_a,n_b,P}^0$ to the linearly independent primary states provided in (\ref{eq:CDproduct}). The latter, as we already remarked, are in a one-to-one correspondence with the set (\ref{eq:FockSpaceStates}), barring states involving $a^\dagger_{\ell=1,m}$. By the operator-state correspondence the traceless symmetric derivatives $\partial_{\mu_1}\dots \partial_{\mu_r} \phi$ and $\partial_{\mu_1}\dots \partial_{\mu_s} \bar \phi$ match respectively $a^\dagger_{r,m_r}$ and $b^\dagger_{s,m_s}$. It is then manifest that the elements in $\mathbb B_{n_a,n_b,P}^0$ and in (\ref{eq:FockSpaceStates}) are in a one-to-one correspondence. In particular the exclusion of $\partial_i \phi$ factors in $\mathbb B_{n_a,n_b,P}^0$ crucially matches the exclusion of $a_{1,m}^\dagger$ in (\ref{eq:FockSpaceStates}), which is mandated in turn to match the building blocks (\ref{eq:CDproduct}). As the cardinality of the basis $\mathbb B_{n_a,n_b,P}^0$ sets an upper bound to then dimension of the sub-space of primaries, it must be that the states (\ref{eq:CDproduct}) with the same $n_a, n_b$ and $P$ are a complete basis for the corresponding space of primaries. \section{Fock space of superfluid fluctuations\label{sec:superfluid_fluctuations}} We now turn to the semiclassical description, which applies, as we will see, for sufficiently large charge, regardless of the coupling. The goal of this section is to associate primary operators to fluctuations around the non-trivial saddle. \subsection{Fluctuations around a non-trivial saddle \label{sec:saddle}} A detailed presentation of the large charge semiclassical method can be found in~\cite{Badel:2019oxl,Badel:2019khk}. Here we will outline the main ideas, providing formulae for further reference. We present the method in the context of an interacting Wilson-Fisher fixed point, but it applies also in free theory: all formulae can be safely taken for $\lambda=0$ and $\lambda n = 0$. \subsubsection{Saddle point} The method outlined in this section is suitable for computing correlators of the form \begin{equation} \langle \bar \phi^n(x_f) \mathcal O_N(x_N) \dots \mathcal O_1(x_1) \phi ^n(x_i) \rangle, \label{eq:N+2Correlators} \end{equation} First, it proves useful to map the theory to the cylinder $\mathbb R^d \to \mathbb R \times \mathbb S^{d-1}$ (generalizing (\ref{eq:PlaneCylinderCoords}) to to $d$~dimensions). For a generic primary operator, (\ref{eq:PlaneCylinderField}) generalizes to \begin{equation} \hat {\mathcal O} (\tau,\vec n) = e^{\Delta_{\mathcal O} \tau}\mathcal O(x), \label{eq:PlaneCylinderOperators} \end{equation} with $\Delta_{\mathcal O}$ the scaling dimension of $\mathcal O$. The theory on the cylinder is equivalent to that on the plane only at the Wilson-Fisher fixed point, where the theory is conformal. However, to compute corrections we have to work off-criticality, and set the coupling at its critical value only at the end of computations. The advantage of this mapping is that time-translation, which is a symmetry on the cylinder even off-criticality, corresponds to dilation on the plane, which is not a symmetry off-criticality. The additional symmetry of the non-critical theory on the cylinder makes it easier to find a saddle point. Explicit solutions for the saddle on the plane are known only in $d=3$ and $d=4$ for \REf{eq:phi6} and \REf{eq:phi4} respectively~\cite{Cuomo:2021ygt}. In terms of the variables on the cylinder the correlator (\ref{eq:N+2Correlators}) has the form \begin{equation} \langle \hat{\bar\phi}^n(\tau_f) \hat{\mathcal O}_N(\tau_N) \dots \hat{\mathcal O}_1(\tau_1) \hat{\phi}^n(\tau_i)\rangle e^{-\Delta_{\phi^n}\tau_f} e^{-\Delta_N\tau_N} \dots e^{-\Delta_1\tau_1} e^{-\Delta_{\phi^n}\tau_i}\,, \end{equation} where for simplicity we did not indicate the dependence of the operators on the angular coordinates. The operator-state correspondence (see section \ref{sec:oppStateCorr}) yields \begin{equation} |n\rangle = \frac{(4 \pi)^{n/2}}{\sqrt{n!}} \lim_{\tau\to-\infty} e^{-\Delta_{\phi^n}\tau_i} \hat\phi^n(\tau_i) |0\rangle\,, \end{equation} and its conjugate \begin{equation} \langle n| = \frac{(4 \pi)^{n/2}}{\sqrt{n!}} \lim_{\tau_f \to \infty} \langle 0 | e^{ \Delta_{\phi^n}\tau_f} \hat{\bar\phi}^n(\tau_f)\,. \end{equation} Eq.~(\ref{eq:N+2Correlators}) is thus related to cylinder correlators according to \begin{equation} \lim_{x_f \to \infty} \frac{(4\pi)^n}{n!} x_f^{2\Delta_{\phi^n}} \langle \bar \phi^n (x_f) \mathcal O_N (x_N)\dots \mathcal O_1(x_1) \phi ^n(0) \rangle = \langle n | \hat {\mathcal O}_N (\tau_N)\dots \hat {\mathcal O}_1(\tau_1) | n \rangle \prod_{j=1}^N e^{-\Delta_j \tau_j}. \label{eq:CorrelatorsPlaneCylinder} \end{equation} Since $|n\rangle$, corresponding to operator $\phi^n$, is the lowest dimension state of charge $n$, for any charge $n$ state $|\psi_n\rangle$ with non-zero overlap with $|n\rangle$, we have \begin{align} \lim_{\tau_i\to-\infty} e^{H \tau_i} | \psi_n \rangle & = \lim_{\tau_i\to-\infty} e^{\Delta_{\phi^n}\tau_i} |n\rangle \langle n |\psi_n \rangle \\ \lim_{\tau_f\to\infty} \langle \psi_n | e^{-H \tau_f} & = \lim_{\tau_f\to\infty} e^{-\Delta_{\phi^n}\tau_f} \langle \psi_n |n \rangle \langle n |, \end{align} where $H$ is the Hamiltonian on the cylinder. Therefore we can also write \begin{equation} \label{eq:CorrelatorCylinderPsi} \langle n | \hat {\mathcal O}_N (\tau_N)\dots \hat {\mathcal O}_1(\tau_1)| n \rangle = \lim_{\substack{\tau_f \to \infty \\ \tau_i \to - \infty}} \frac{\langle \psi_n | e^{-H \tau_f} \hat {\mathcal O}_N (\tau_N)\dots \hat {\mathcal O}_1(\tau_1) e^{H \tau_i} | \psi_n \rangle}{\langle \psi_n | e^{-H (\tau_f - \tau_i)} | \psi_n \rangle}. \end{equation} The right hand side can be represented by a path integral. For that purpose, it is useful to introduce polar coordinates for the fields \begin{equation} \hat \phi = \frac{\rho}{\sqrt{2}} e^{i \chi},~~ \hat{\bar \phi} = \frac{\rho}{\sqrt{2}} e^{-i \chi}\,, \label{eq:polarCoordinatesFields} \end{equation} and single out their zero modes on the sphere \begin{eqnarray} \chi &=& \chi_0+\chi_\perp, ~~ \int \chi (\vec n) d \Omega_{d-1} = \chi_0 \Omega_{d-1}, ~~ \int \chi_\perp (\vec n) d \Omega_{d-1} = 0\,,\\ \rho&=&\rho_0+\rho_\perp,~~\int \rho(\vec n)d \Omega_{d-1} = \rho_0 \Omega_{d-1}, ~~\int \rho_\perp (\vec n) d \Omega_{d-1} = 0\,. \end{eqnarray} with $\Omega_{d-1} = \frac{2\pi^{d/2}}{\Gamma(d/2)}$ the volume of $\mathbb S^{d-1}$. A convenient choice for the state~$| \psi_n \rangle$ is then \begin{equation} \label{eq:decomposeHomogeneous} \langle \rho, \chi | \psi_n \rangle = \delta(\rho_0-f) \delta(\rho_\perp)\delta(\chi_\perp)e^{i n \chi _0}, \end{equation} with $f$ a constant whose value will be suitably decided below. As a result eq.~(\ref{eq:CorrelatorCylinderPsi}) can be recast as \begin{equation} \label{eq:cylinderPI} \langle n | \hat {\mathcal O}_N \dots \hat {\mathcal O}_1| n \rangle \underset{_{\substack{\tau_f \to \infty \\ \tau_i \to - \infty}} }{=} \mathcal Z^{-1} \int d\chi_i d\chi_f e^{- \frac{in(\chi_f-\chi_i)}{\Omega_{d-1}}} \int_{\substack{\rho(\tau_i)=f \\ \chi(\tau_i)=\chi_i}} ^{\substack{\rho(\tau_f)=f \\ \chi(\tau_f)=\chi_f}} \mathcal D \rho \mathcal D \chi \, \hat{\mathcal O}_N \dots \hat{\mathcal O}_1 \, e^{-S[\rho,\chi]}, \end{equation} with \begin{equation} \label{eq:cylinderPIPartitionFunction} \mathcal Z = \int d\chi_i d\chi_f e^{- \frac{in(\chi_f-\chi_i)}{\Omega_{d-1}}} \int_{\substack{\rho(\tau_i)=f \\ \chi(\tau_i)=\chi_i}} ^{\substack{\rho(\tau_f)=f \\ \chi(\tau_f)=\chi_f}} \mathcal D \rho \mathcal D \chi \, e^{-S[\rho,\chi]} \,. \end{equation} and where the action is given by \begin{equation} \label{eq:polarLagrangian} S[\rho, \chi] = \int d\tau d\Omega_{d-1} \left [ \frac{1}{2} (\partial \rho)^2 +\frac{1}{2} \rho^2 (\partial \chi)^2 + \frac{1}{2}m^2 \rho^2 +V_{int}(\rho) \right] \end{equation} with $m=\frac{d}{2}-1$ and \begin{equation} \label{eq:Vint} V_{int}(\rho) = \begin{cases} \frac{\lambda}{16} \rho^4 & \text{ for } (\bar \phi \phi)^2 , \\ \frac{\lambda^2}{288} \rho^6 & \text{ for } (\bar \phi \phi)^3 . \end{cases} \end{equation} The saddle point is fixed by two conditions, corresponding to the variation of the action with respect to $\phi$ in the bulk and on the boundary. The latter, in view of eq.~(\ref{eq:decomposeHomogeneous}), reduces to variation with respect to the zero modes of $\chi$, $\chi_i$ and $\chi_f$. From the bulk we have \begin{eqnarray} \label{eq:chiEOM} \partial_\mu \left ( \sqrt{g} g^{\mu \nu}\rho^2\partial_{\nu} \chi\right ) &=& 0,\\ -\partial^2\rho+\rho\left[(\partial\chi)^2+m^2\right]+\partial_\rho V_{int}(\rho)&=&0 \label{eq:rhoEOM} \end{eqnarray} with $g_{\mu \nu}$ the metric on the cylinder. The first equation, corresponding to variation with respect to $\chi$, coincides with $U(1)$ current conservation. The variation at the boundaries gives instead \begin{equation} (\rho^2 \dot \chi) (\tau_i) =(\rho^2 \dot \chi) (\tau_f) = - \frac{in}{\Omega_{d-1}}, \label{eq:boundaryEOM} \end{equation} which fixes the charge to be $n$ and spatially homogeneous at the boundaries. Equations \REf{eq:chiEOM}, \REf{eq:rhoEOM}, \REf{eq:boundaryEOM} along with the constraint \REf{eq:decomposeHomogeneous} have the simple solution \begin{equation} \rho_S (\tau) = f, ~~ \chi_S (\tau)= -i \mu (\tau-\tau_i)+\chi_i, \label{eq:SaddleSolution} \end{equation} with $\mu$ and $f$ satisfying \begin{eqnarray} \label{eq:muRelationf} \mu^2-m^2 &=& \frac{1}{f} \frac{\partial V_{int}(f)}{\partial f}\,, \\ f^2 \mu &=& \frac{n}{\Omega_{d-1}}. \label{eq:ChargeRelationfnmu} \end{eqnarray} A few comments are in order. The last two equations determine the ``suitable'' value of $f$, we alluded to below its definition in \REf{eq:decomposeHomogeneous}. It is only for this specific choice of $f$ in \REf{eq:decomposeHomogeneous} that the saddle point equations have a solution with a simple linear time dependence. Other choices would give solutions with a more complicated behaviour near the boundaries, but for $\tau_f-\tau_i\to \infty$ the result for \REf{eq:cylinderPI} would be the same. Notice that as the solution is invariant under the combination $H-\mu Q$ of time translations and charge rotations, $\mu$ should be interpreted as the chemical potential. Finally notice that, while $\chi_f-\chi_i= -i\mu (\tau_f-\tau_i)$ is fixed by \REf{eq:SaddleSolution}, the zero mode $\chi_i$ is not: integrating over it guarantees that correlators respect charge conservation. Eqs. \REf{eq:muRelationf} and \REf{eq:ChargeRelationfnmu}, for the two choices in \REf{eq:Vint} imply \begin{align} \text{ for } (\bar \phi \phi)^2: \mu_4(\lambda n,d) & = \frac{(d-2)}{2}\frac{\left( 3^{1/3}+ \left[ \frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3} - \sqrt{ \left(\frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3}\right)^2-3 }\right]^{2/3} \right) } {3^{2/3} \left[ \frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3} - \sqrt{ \left(\frac{9 \lambda n \Gamma(d/2)}{2 \pi^{d/2} (d-2)^3}\right)^2-3 }\right]^{1/3} } \label{eq:mu_d_phi4} , \\ \text{ for } (\bar \phi \phi)^3: \mu_6(\lambda n,d) & = \frac{(d-2)}{2} \frac{\sqrt{1+\sqrt{1+ \frac{\lambda^2 n^2 \Gamma(d/2)^2}{3 \pi^d (d-2)^4} }}}{\sqrt{2}} .\label{eq:mu_d_phi6} \end{align} Expanding around the saddle we can systematically compute any observable as a power series in $\lambda$ with coefficients that are themselves functions of $\lambda n$. For instance, given \begin{equation} \lim_{\substack{\tau_f \to \infty \\ \tau_i \to - \infty}} \langle \psi_n | e^{-H (\tau_f - \tau_i)} | \psi_n \rangle = e^{-\Delta_{\phi^n} (\tau_f - \tau_i)} | \langle n | \psi_n \rangle|^2, \end{equation} and its path integral representation (\ref{eq:cylinderPIPartitionFunction}), the evaluation of the action on the saddle point immediately gives the scaling dimension of $\phi^n$ at leading order shown in \REf{eq:phinDimension}. \subsubsection{Fluctuations \label{sec:SpectrumFluctuations}} Expanding the fields (\ref{eq:cylinderPI}) around the saddle \begin{equation} \label{eq:cylinderFluctuations} \rho=\rho_S+r, ~~ \chi=\chi_S+\frac{\pi}{f}. \end{equation} we can now write \begin{equation} \langle n | \hat {\mathcal O}_N \dots \hat {\mathcal O}_1| n \rangle = \frac{\displaystyle \int d\chi_i \int \mathcal D r \mathcal D \pi \, \hat{\mathcal O}_N \dots \hat{\mathcal O}_1 e^{-\hat S[r,\pi]}} {\displaystyle 2\pi \int \mathcal D r \mathcal D \pi \, e^{-\hat S[r,\pi]}} , \label{eq:aroundSaddle} \end{equation} where the action for the fluctuations is given by \begin{equation} \hat S[r,\pi] = \int d\tau d\Omega_{d-1} \left ( \mathcal L_2+\mathcal L _{int} \right ), \label{eq:FluctuationsLagrangian} \end{equation} with \begin{equation} \mathcal L_2=\frac{1}{2} (\partial r)^2+\frac{1}{2} (\partial \pi)^2-2i \mu r \dot\pi +\frac{1}{2}\left [ V^{''}_{int}(f) - (\mu^2-m^2) \right ] r^2, \label{eq:quadraticLagrangian} \end{equation} and \begin{equation} \mathcal L_{int}= \frac{1}{f} \left [ r (\partial\pi)^2 - i \mu r^2 \dot \pi \right ] +\frac{r^2 (\partial\pi)^2}{2 f^2} + \left[ V_{int}(f+r) - \left( V_{int}(f) + V^{'}_{int}(f) r + \frac{1}{2} V^{''}_{int}(f) r^2 \right) \right]. \end{equation} Notice that the $\hat{\mathcal O}_i$ are local functions of $\rho$ and $\chi$. At the leading order the correlator is then simply given by the product of the $\hat{\mathcal O}_i$ computed on the saddle. The canonically conjugated momenta\footnote{As in (\ref{eq:aaCylinderMomentumComplex}), the presence of ``$i$'' in front of time derivatives is because we work in Euclidean time.} forming pairs $(r,P)$ and $(\pi, \Pi)$ are \begin{eqnarray} P = i \dot r, ~~ \Pi = i \dot \pi \left( 1+\frac{r}{f} \right )^2+2\mu r \left( 1+\frac{r}{2f} \right ). \label{eq:momentaSaddle} \end{eqnarray} These variables can be expanded in harmonic modes as \begin{equation}\label{eq:harmonicComponents} \begin{pmatrix} r(\tau,\vec n) \\ \pi(\tau,\vec n) \end{pmatrix} = \sum_{\ell=0}^\infty \sum_{\vec m} \begin{pmatrix} r_{\ell \vec m}(\tau) \\ \pi_{\ell\vec m}(\tau) \end{pmatrix} Y_{\ell\vec m}(\vec n) \ , \quad \begin{pmatrix} P(\tau,\vec n) \\ \Pi(\tau,\vec n) \end{pmatrix} = \sum_{\ell=0}^\infty \sum_{\vec m} \begin{pmatrix} P_{\ell\vec m}(\tau) \\ \Pi_{\ell\vec m}(\tau) \end{pmatrix} Y^*_{\ell\vec m}(\vec n)\,, \end{equation} where $Y_{\ell\vec m}(\vec n)$ are the spherical harmonics in $d-1$ dimensions\footnote{$\vec m$ is a multi-index taking \begin{equation} N_{\ell,d} = (2\ell+d-2) \frac{(\ell+d-3)!}{(d-2)! \ell!} \end{equation} different values.} satisfying \begin{equation} \Delta_{\mathbb S^{d-1}} Y_{\ell\vec m} (\vec n) = - J_\ell Y_{\ell\vec m}(\vec n) , \end{equation} where $\Delta_{\mathbb S^{d-1}}$ is the Laplacian on the sphere $\mathbb S^{d-1}$ and where the eigenvalue $J_\ell$ was given in \REf{eq:defJl}. The $Y_{\ell\vec m}(\vec n)$ also satisfy the normalization and completeness conditions \begin{equation} \label{eq:orthonormalityHarmonics} \int Y_{\ell\vec m}(\vec n) Y^*_{\ell'\vec m'}(\vec n) d\Omega_{d-1} = \delta_{\ell\ell'} \delta_{\vec m\vec m'}, \end{equation} and \begin{equation} \label{eq:completenessHarmonics} \sum_{\ell=0}^\infty \sum_{\vec m} Y_{\ell\vec m}(\vec n) Y^*_{\ell\vec m}(\vec n') = \delta^{(\mathbb S^{d-1})} (\vec n-\vec n'). \end{equation} Notice in particular that $Y_{0\vec 0}=1/\sqrt {\Omega_{d-1}}$. The harmonic modes are canonical variables satisfying equal-time commutation relations \begin{equation} \label{eq:commutationHarmonicComponents} \begin{split} [r(\tau,\vec n),P(\tau,\vec n')] & = i \delta(\vec n-\vec n') \Leftrightarrow [r_{\ell\vec m}(\tau),P_{\ell'\vec m'}(\tau)] = i \delta_{\ell\ell'}\delta_{\vec m\vec m'} ,\\ [\pi(\tau,\vec n),\Pi(\tau,\vec n')] & = i \delta(\vec n-\vec n') \Leftrightarrow [\pi_{\ell\vec m}(\tau),\Pi_{\ell'\vec m'}(\tau)] = i \delta_{\ell\ell'}\delta_{\vec m\vec m'} , \end{split} \end{equation} with the other commutators vanishing. \subsubsection{Linearized fluctuations} In section \ref{sec:twoFock} we will need the modes evolving according to the full lagrangian. To set the basis of perturbation theory and to compute the energy spectrum at lowest order we must however consider the modes of the quadratic Lagrangian \REf{eq:quadraticLagrangian} \begin{equation} \mathcal L_2=\frac{1}{2} (\partial r)^2+\frac{1}{2} (\partial \pi)^2-2i \mu r \dot\pi +\frac{1}{2}M^2r^2, \end{equation} with \begin{equation} M^2 = V^{''}_{int}(f) - f^{-1}V'_{int} (f)= V^{''}_{int}(f) - (\mu^2-m^2)\,. \end{equation} At this order the canonical momenta are \begin{eqnarray} \tilde P = i \dot r, ~~ \tilde \Pi = i \dot \pi +2\mu r . \label{eq:momentalinearized} \end{eqnarray} The quantized fields (and the spectrum) are obtained by considering the linearized equations of motion \begin{equation} \left( \begin{array}{cc} \partial_\tau^2+\Delta_{\mathbb S^{d-1}} -M^2 & 2i \mu \partial_\tau \\ -2i \mu \partial_\tau & \partial_\tau^2+\Delta_{\mathbb S^{d-1}} \end{array} \right) \left ( \begin{array}{c} r \\ \pi \end{array} \right)=0, \label{eq:EOMFluctuationsMatrix} \end{equation} and by finding the complete set of harmonic mode solutions of the form \begin{equation} \begin{pmatrix} r_{\ell\vec m}(\tau) \\ \pi_{\ell\vec m}(\tau) \end{pmatrix} Y_{\ell\vec m}(\vec n) = \begin{pmatrix} C_1 \\ C_2 \end{pmatrix} e^{-\omega \tau} Y_{\ell\vec m}(\vec n)\,. \end{equation} For each $\ell$ we find two solutions \begin{equation} \begin{split} \omega_{A}^2(\ell)=J_\ell+\frac{V^{''}_{int}(f)+3\mu^2+m^2}{2} - \sqrt{\left( \frac{V^{''}_{int}(f)+3\mu^2+m^2}{2}\right )^2+4 \mu^2 J_\ell}\, , \\ \omega_{B}^2(\ell)=J_\ell+\frac{V^{''}_{int}(f)+3\mu^2+m^2}{2} + \sqrt{\left( \frac{V^{''}_{int}(f)+3\mu^2+m^2}{2}\right )^2+4 \mu^2 J_\ell}\, , \label{eq:phiSpectrumV} \end{split} \end{equation} with the corresponding coefficients $C_{1,2}^{A,B}(\ell)$, whose expression we do not need to display. The $\omega_{A,B}^2(\ell)$ determine the energy spectrum shown in \REf{eq:phiSpectrum}. Expanding the fields $(\pi,r)$ in the complete set of solutions and imposing canonical commutation relation with the conjugated momenta \REf{eq:momentalinearized}, we find \begin{eqnarray} \label{eq:rpiDYExpansion} \left ( \begin{array}{c} r \\ \pi \end{array} \right) & = & \left ( \begin{array}{c} \displaystyle \frac{2\mu}{\omega_B^2(0)} \, p_\pi \\ \displaystyle \hat \pi -i p_\pi \tau \left ( 1- \frac{4\mu^2}{\omega_B^2(0)}\right ) \end{array} \right) Y_{0\vec 0} \\ && + \sum_{\ell=1}^\infty \sum_{\vec m} \sqrt{\frac{\omega_A(\ell)}{2 \left[ \omega_B^2(\ell)-\omega_A^2(\ell) \right ]}} \left [ \left ( \begin{array}{c} \displaystyle \sqrt{\frac{J_\ell}{\omega_A^2(\ell)}-1} \\ i \displaystyle \sqrt{\frac{\omega^2_+(\ell)}{J_\ell}-1} \end{array} \right) A_{\ell\vec m}Y_{\ell\vec m} e^{- \omega_A(\ell) \tau} + h.c. \right ] \nonumber \\ && + \sum_{\ell=0}^\infty \sum_{\vec m} \sqrt{\frac{\omega_B(\ell)}{2 \left[ \omega_B^2(\ell)-\omega_A^2(\ell) \right ]}} \left [ \left ( \begin{array}{c} \displaystyle \sqrt{1-\frac{J_\ell}{\omega_B^2(\ell)}} \\ -i \displaystyle \sqrt{1-\frac{\omega_A^2(\ell)}{J_\ell}} \end{array} \right) B_{\ell\vec m}Y_{\ell\vec m} e^{-\omega_B(\ell)\tau} + h.c. \right ], \nonumber \end{eqnarray} where operators $(A_{\ell\vec m},A_{\ell\vec m}^\dagger)$, $(B_{\ell\vec m},B_{\ell\vec m}^\dagger)$ and $(\hat \pi,p_\pi)$ are canonically conjugated pairs: \begin{equation} [A_{\ell\vec m},A^\dagger_{\ell'\vec m'}] = \delta_{\ell\ell'} \delta_{\vec m\vec m'}, ~~ [B_{\ell\vec m},B^\dagger_{\ell'\vec m'}] = \delta_{\ell\ell'} \delta_{\vec m\vec m'}, ~~ [\hat \pi, p_\pi] = i, \label{eq:commutationAABBPpipi} \end{equation} with all other commutators vanishing. In the last sum's $\ell=0$ term, one has to use the limit \begin{equation} \lim_{\ell\to 0} \frac{\omega^2_A(\ell)}{J_\ell} = 1- \frac{4\mu^2}{\omega_B^2(0)} . \end{equation} Notice that the $A_{\ell\vec m}$ are defined for $\ell\geq 1$ and have frequency $\omega_A(\ell)$, while the $B_{\ell\vec m}$ are defined for $\ell\geq 0$ and have frequency $\omega_B(\ell)$. The role of the $\ell =0$ mode in the $A$ sector is played by $\hat \pi$. Several features of \REf{eq:phiSpectrumV} are worth remarking. The first is \begin{equation} \omega_A(0)=0\,. \end{equation} This is the manifestation of a Goldstone boson associated with $U(1)$ symmetry breaking around the saddle. The $U(1)$ acts as a constant shift of $\pi$, while $\rho$ is invariant. Therefore $A_{\ell\vec m}$ and $B_{\ell\vec m}$ are all neutral while $\hat \pi$ transforms by a constant shift. Notice that the conjugated momentum $p_\pi$ precisely generates these transformations. Indeed, applying Noether's theorem to the quadratic Lagrangian (\ref{eq:FluctuationsLagrangian}) and comparing the result to the generator $Q$ of $\chi$ shifts in (\ref{eq:polarLagrangian}), we find \begin{equation} \label{eq:pPiExact} p_\pi = (Q-n)\frac {Y_{0 \vec 0}}{f}. \end{equation} Up to a factor, the zero mode $\hat \pi$ is the phase $\chi_i$ that exactly parametrizes the family of solutions at the full non-linear level. It therefore makes sense to treat this mode fully non-linearly, singling it out when expressing $\phi$ in terms of the harmonic modes \begin{equation} \label{eq:minchia} \pi(\tau, \vec n) = \hat \pi Y_{0\vec 0} + \tilde \pi(\tau, \vec n) , \end{equation} and factoring it out from $\phi$\footnote{Here we have also absorbed the $i\mu \tau_i$ in \REf{eq:SaddleSolution} into $\hat \pi /f$ or equivalently set $\tau_i=0$.} \begin{equation} \label{eq:fieldPihatFactored} \hat \phi(\tau, \vec n) = \frac{f+r}{\sqrt{2}} e^{\mu \tau} e^{i \frac{\hat\pi Y_{00}}{f}} e^{i \frac{\tilde \pi}{f}} \simeq \frac{f+r}{\sqrt{2}} e^{\mu \tau} e^{i \frac{\hat\pi Y_{0\vec 0}}{f}} (1+i \frac{\tilde \pi}{f}). \end{equation} As dictated by the commutation relations and by the definition of the modes, the factor $e^{i \frac{\hat\pi Y_{00}}{f}}$ has charge 1 while the fields $r, \tilde \pi$ are neutral, which is consistent with the transformation property of $\hat \phi$. Thus $\hat \pi$ is a cyclic coordinate with periodicity $2\pi f/Y_{0\vec 0}$ and the canonical pair $(\hat \pi,p_\pi)$ does not correspond to a harmonic oscillator with an associated Fock space. The Hamiltonian for this pair is \begin{equation} H_{\hat \pi} = \frac{p_\pi^2}{2} \left [ 1 - \left ( \frac{4\mu^2}{\omega_B^2(0)} \right )^2 \right ]\,. \end{equation} The second important feature is that the $B$-mode is gapped \begin{equation} \omega^2_B(\ell )\geq\omega^2_B(0)=V^{''}_{int}(f)+3\mu^2+m^2>0\,. \end{equation} For large $\mu$, or equivalently large $\lambda n$ (see \REf{eq:mu_d_phi4} and \REf{eq:mu_d_phi6}), we can then integrate this mode out and derive an effective field theory description for the Goldstone mode~\cite{Hellerman:2015nra,Monin:2016jmo}, which consists of the $A_{\ell\vec m}$ and $\hat \pi$. The third property is that, for the classically scale invariant cases, $(\bar\phi \phi)^2$ in $d=4$ and $(\bar\phi \phi)^3$ in $d=3$, we have \begin{equation} \omega_A(1)=1. \label{eq:spectrumDescendant} \end{equation} This equation is associated with the fact that $A_{1\vec m}$ and $A_{1\vec m}^\dagger$ are respectively the $K_{\vec {m}}$ and $P_{\vec {m}}$ generators (see for instance \REf{eq:LOgeneratorK}). As such they have scaling dimension $-1$ and $1$. Acting with $A_{1\vec m}^\dagger$ on a state therefore produces a descendant. Finally, we have that in the free limit, $\lambda =0$, the two modes become \begin{equation} \omega_A(\ell) = \ell, ~~ \omega_B(\ell) = \ell+d-2, \label{eq:SpectrumLimit0} \end{equation} and at finite coupling their asymptotic behavior is given by \begin{equation} \omega_A(\ell) \underset{\ell\to \infty}{=} \omega_B(\ell)\underset{\ell\to \infty}{=} \ell. \label{eq:SpectrumLargeL} \end{equation} Excitations around the charge $n$ ground state $|n\rangle$ are obtained by acting with the neutral modes $A_{\ell\vec m}^\dagger$ and $B_{\ell\vec m}^\dagger$ \begin{equation} \label{eq:FockStates2} (A^\dagger_{\ell_1m_1})^{n^A_1} \dots (B^\dagger_{j_1 k_1})^{n^B_1} \dots | n \rangle . \end{equation} Taking now into account that states involving at least one $A_{1\vec m}^\dagger$ are descendants leads to the spectrum of primary operators at leading order which was mentioned at the end of section \ref{sec:epsilonIntro}. As we said the $\hat\pi,p_\pi$ pair does not produce a Fock space. Instead $e^{ i \frac{\hat\pi Y_{00}}{f}}$ and $e^{-i \frac{\hat\pi Y_{00}}{f}}$ respectively raise and decrease the charge by one unit, thus mapping to the corresponding fixed charge Fock spaces. \subsection{Relation between different Fock spaces in free theory\label{sec:twoFock}} Free field theory can be successfully studied around both the trivial $\phi =0$ and the non-trivial \REf{eq:SaddleSolution} saddles. That allows to find explicitly the mapping between the two corresponding Fock spaces. We will do that focussing on the $d=3$ case. The map between the two spaces corresponds to a canonical transformation resulting from equations \REf{eq:aaCylinderMomentumComplex}, \REf{eq:polarCoordinatesFields}, \REf{eq:cylinderFluctuations} and \REf{eq:momentaSaddle} \begin{equation} (\hat \phi, p_{\hat \phi})\,,\quad (\hat {\bar \phi}, p_{\hat {\bar \phi}}) \qquad\Rightarrow\qquad (r,P)\,, \quad (\pi, \Pi) . \end{equation} We use the decomposition of the fields in harmonic components \REf{eq:harmonicComponents}. In three dimensions $m \in \{ -\ell,-\ell+1,\dots, \ell\}$ is a simple index. The fields clearly satisfy \begin{equation} \label{eq:hermiticityHarmonicComponents} \begin{split} r_{\ell m}(\tau) = (-1)^m \big( r_{\ell,-m}(-\tau) \big)^\dagger , & \quad \pi_{\ell m}(\tau) = (-1)^m \big( \pi_{\ell,-m}(-\tau) \big)^\dagger , \\ P_{\ell m}(\tau) = (-1)^m \big( P_{\ell,-m}(-\tau) \big)^\dagger , & \quad \Pi_{\ell m}(\tau) = (-1)^m \big( \Pi_{\ell,-m}(-\tau) \big)^\dagger . \end{split} \end{equation} These components are written in terms of the zero mode and creation and annihilation operators yielding \begin{equation}\label{eq:canonicalTransform0} \begin{split} r_{00}(\tau) & = p_\pi + \frac{1}{\sqrt{2}} \left( B_{00}(\tau) + B_{00}^\dagger (-\tau) \right) ,\\ \pi_{00}(\tau) & = \hat \pi + \frac{i}{\sqrt{2}} \left( B_{00}^\dagger(-\tau) - B_{00}(\tau) \right) ,\\ P_{00}(\tau) & = \frac{i}{\sqrt{2}} \left( B_{00}^\dagger(-\tau) - B_{00}(\tau) \right) ,\\ \Pi_{00}(\tau) & = p_\pi , \end{split} \end{equation} and (for $\ell > 0$) \begin{equation}\label{eq:canonicalTransformLM} \begin{split} r_{\ell m}(\tau) & = \frac{1}{2\sqrt{\omega_\ell}} \left[ A_{\ell m}(\tau)+(-1)^m A_{\ell,-m}^\dagger(-\tau)+B_{\ell m}(\tau)+(-1)^m B_{\ell,-m}^\dagger (-\tau) \right] ,\\ \pi_{\ell m}(\tau) & = \frac{i}{2\sqrt{\omega_\ell}} \left[ A_{\ell m}(\tau)-(-1)^m A_{\ell,-m}^\dagger(-\tau)-B_{\ell m}(\tau)+(-1)^m B_{\ell,-m}^\dagger (-\tau) \right] ,\\ P_{\ell m}(\tau) & = \frac{i}{2\sqrt{\omega_\ell}} \left[-(-1)^m \ell A_{\ell,-m}(\tau)+ \ell A_{\ell,m}^\dagger(-\tau)-(-1)^m (\ell+1) B_{\ell,-m}(\tau)+(\ell+1) B_{\ell,m}^\dagger (-\tau) \right] ,\\ \Pi_{\ell m}(\tau) & = \frac{1}{2\sqrt{\omega_\ell}} \left[ (-1)^m (\ell+1) A_{\ell,-m}(\tau)+(\ell+1) A_{\ell,m}^\dagger(-\tau)- (-1)^m \ell B_{\ell,-m}(\tau) - \ell B_{\ell,m}^\dagger (-\tau) \right] . \end{split} \end{equation} Here we do not consider only the quadratic Hamiltonian for fluctuations around the saddle but take into account the exact solutions of the equations of motion. Thus operators $A_{\ell m}(\tau),B_{\ell m}(\tau)$ have complicated time dependence, not just a simple phase rotation. However, they satisfy the commutation relations \REf{eq:commutationHarmonicComponents}, (\ref{eq:commutationAABBPpipi}) and hermiticity \REf{eq:hermiticityHarmonicComponents} at all $\tau$. At $\tau=0$ they coincide with the $\tau$-independent creation-annihilation operators introduced in section \ref{sec:SpectrumFluctuations} for quadratic fluctuations. Our goal is to express these operators in terms of the ladder operators of vacuum fluctuations. The form of \REf{eq:polarCoordinatesFields} makes the mapping non-linear, which makes it difficult to find a closed form solution. However at large $n$ the solution can be reliably expressed as a systematic expansion in inverse powers of $n$. We will be studying fluctuations around the lowest energy state with charge $n$, for which $\langle a_{00}^\dagger a_{00} \rangle \sim n$. The large charge expansion can then be organized by assigning to operators a scaling with $n$\begin{equation} a_{00} \sim O(\sqrt{n}), ~~~~~~ a_{\ell\neq0,m}\sim b_{\ell m} \sim O(1). \label{eq:aanScaling} \end{equation} For instance, by singling out $a_{00}^\dagger a_{00}$ in the expression for $Q$ \REf{eq:Qaabb} we can write \begin{equation} \label{eq:a00a00replacement} a_{00}^\dagger a_{00} = n\left\{1 + \frac{1}{n} \left[ Q-n + b_{00}^\dagger b_{00} - \sum_{\ell=1}^\infty \sum_{m=-\ell}^\ell \left(a_{\ell m}^\dagger a_{\ell m} - b_{\ell m}^\dagger b_{\ell m}\right) \right] \right\} , \end{equation} where the term in square brackets represents an $\mathcal O(n^0)$ perturbation. In what follows we treat the fields as classical variables, disregarding issues of ordering. Expressions for quantum operators can be restored, in principle, by finding an appropriate ordering such that the commutation relations are satisfied. Our goal can be achieved through the following steps: \begin{enumerate} \item Remembering that for free theory in $d=3$ we have \begin{equation} \mu_3(0,3) = \frac{1}{2} , \quad f = \sqrt{\frac{n}{2\pi}}, \quad \omega_\ell = \ell+\frac{1}{2} , \end{equation} and combining equations \REf{eq:aaCylinderFieldComplex}, \REf{eq:polarCoordinatesFields}, \REf{eq:SaddleSolution} and \REf{eq:cylinderFluctuations} we can write \begin{equation} \frac{f+r}{\sqrt{2}} e^{\frac{i \pi}{f}} = \sum_{\ell=0}^\infty \sum_{m=-\ell}^\ell \frac{1}{\sqrt{2\omega_\ell}} \left( a_{\ell m}^\dagger e^{\ell \tau} Y_{\ell m}^*(\vec n) + b_{\ell m}e^{-(\ell+1) \tau} Y_{\ell m}(\vec n) \right) \equiv h(\tau, \vec n)\,. \end{equation} It is also convenient to write \begin{align} r(\tau, \vec n) & = \sqrt{2 h(\tau, \vec n) h(-\tau,\vec n)^\dagger} -f , \\ e^{\frac{i \pi(\tau,\vec n)}{f}} & = \frac{h(\tau, \vec n)}{\sqrt{ h(\tau,\vec n) h(-\tau,\vec n)^\dagger } } . \label{eq:eipi} \end{align} Notice that, here and later, we formally treat $a_{0,0}$ and $a_{0,0}^\dagger$ as invertible as we are working in a subspace with large charge. For example, we can write \begin{equation} \frac{1}{\sqrt{ h(\tau,\vec n) h(-\tau,\vec n)^\dagger } } = \frac{1}{\sqrt{ \frac{n}{2\pi} + s(\tau,\vec n)} } \approx \sqrt{\frac{2\pi}{n}} -\sqrt{\frac{2\pi^3}{n^3}} s(\tau,\vec n) + 3\sqrt{\frac{\pi^5}{2n^5}}(s(\tau,\vec n))^2 + \dots \end{equation} where we used $a_{00}^\dagger a_{00}=n+\dots$ and parametrized all subleading effects by $s(\tau,\vec n)$. \item Using the orthonormality of spherical harmonics \REf{eq:orthonormalityHarmonics}, we extract the harmonic components $r_{\ell m},\pi_{\ell m},P_{\ell m},\Pi_{\ell m}$ from \REf{eq:harmonicComponents}. \item We finally solve \REf{eq:canonicalTransform0} and \REf{eq:canonicalTransformLM}, for $A_{\ell m},B_{\ell m}$, $\hat \pi$ and $p_\pi$. \end{enumerate} \paragraph{Leading order}\hfill At leading order in the $n^{-1}$ expansion, we get \begin{equation} p_\pi = 0, ~~ \exp \left[ i \frac{\hat \pi }{\sqrt{2n}} \right]=\frac{a^\dagger_{00}}{\sqrt{n}}, ~~ B_{\ell m}(\tau) = \frac{a_{00}b_{\ell m}}{\sqrt{n}}e^{-(\ell +1)\tau}, ~~ A_{\ell m} (\tau) = \frac{a^\dagger_{00}a_{\ell m}}{\sqrt{n}}e^{-\ell \tau}. \label{eq:twoFockSpacesRel} \end{equation} As explained, the zero-mode $\hat\pi$ is kept in the exponential. This also ensures that the expressions are polynomial (monomial at this order) in the vacuum ladder operators. One further justification of the exponential notation will appear when computing the propagator in Appendix \ref{sec:propagatorCylinder}. The commutation relations have the form \begin{eqnarray} \left [ A_{\ell m}, A_{\ell 'm'}^\dagger \right ] & = & \frac{1}{n} \left (a_{00}^\dagger a_{00} \, \delta_{\ell\ell'} \delta_{mm'} - a_{\ell m} a_{\ell'm'}^\dagger\right )=\delta_{\ell\ell'} \delta_{mm'}+O\left ( n^{-1}\right ), \\ \left [ B_{\ell m}, B_{\ell'm'}^\dagger \right ] & = & \frac{1}{n} \left (a_{00} a_{00}^\dagger \, \delta_{\ell\ell'} \delta_{mm'} + b_{\ell m} b_{\ell 'm'}^\dagger \right )=\delta_{\ell\ell'} \delta_{mm'}+O\left ( n^{-1}\right ), \end{eqnarray} which are canonical at the required accuracy (see \REf{eq:commutationAABBPpipi}). \paragraph{Next to leading order}\hfill We find that $\exp\left[ i \frac{\hat \pi }{\sqrt{2n}} \right]$ is still given by (\ref{eq:twoFockSpacesRel}) while $p_\pi$ is given by its exact result (\ref{eq:pPiExact}). The expressions for the other ladder operators are long. Therefore, here we provide only that for $A_{\ell m}$, since we will need it in the next section (that for $B_{\ell m}$ can be found in Appendix \ref{sec:appNLOFockStates}) \begin{equation} \label{eq:NLOFockA} \begin{split} A_{\ell m} =\ & \frac{a_{00}^\dagger a_{\ell m}}{\sqrt{n}} + \frac{1}{4(1+2\ell )n^{3/2}} \Big((1+4\ell ) \big( n b_{00} - b_{00}^\dagger (a_{00}^\dagger)^2 \big) a_{\ell m} -2 n b_{00}^\dagger b_{\ell m} \\ & \hspace{126pt} + (-1)^m \big( (-1+2\ell ) n b_{00}^\dagger + (1+2\ell ) b_{00} a_{00}^2\big) a_{\ell ,-m}^\dagger \\ & \hspace{126pt} - (-1)^{m} \big( 2(1+\ell ) n b_{00} + 2 \ell b_{00}^\dagger (a_{00}^\dagger)^2 \big)b_{\ell ,-m}^\dagger \Big) \\ & + \!\!\!\! \sum_{\substack{\ell_1,\ell_2 > 0 \\ \text{all } m_1,m_2}} \!\!\!\! \frac{(-1)^m\sqrt{\pi}C_{-m,m_1,m_2}^{\ell ,\ell_1,\ell_2}}{8\sqrt{2\omega_\ell \omega_{\ell_1}\omega_{\ell_2}}n^{3/2}} \Big( - (2+3\ell +\ell_1+\ell_2) (a_{00}^\dagger)^2 a_{\ell_1,m_1}a_{\ell_2,m_2} \\[-20pt] & \hspace{145pt} + 2 (1+\ell -\ell_1+3\ell _2) n b_{\ell_1,m_1} a_{\ell_2,m_2} \\ & \hspace{145pt} + (\ell -\ell_1-\ell_2) a_{00}^2 b_{\ell_1,m_1}b_{\ell_2,m_2} \\ & \hspace{145pt} + 2(-1)^{m_2} (2+\ell+3\ell_1+\ell_2) n a_{\ell_1,m_1}a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - 2 (-1)^{m_1}(1+3\ell-\ell_1+\ell_2) (a_{00}^\dagger)^2 b_{\ell_1,-m_1}^\dagger a_{\ell_2,m_2} \\ & \hspace{145pt} + 2 (-1)^{m_2} (1+\ell-\ell_1+\ell_2) a_{00}^2 b_{\ell_1,m_1} a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - 2 (-1)^{m_2}(2-\ell+\ell_1+3\ell_2) n b_{\ell_1,m_1}b_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} + (-1)^{m_1+m_2}(2+\ell+\ell_1+\ell_2)a_{00}^2 a_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - 2 (-1)^{m_1+m_2} (1-\ell+3\ell_1-\ell_2) n b_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - (-1)^{m_1+m_2}(3\ell-\ell_1-\ell_2)(a_{00}^\dagger)^2 b_{\ell_1,-m_1}^\dagger b_{\ell_2,-m_2}^\dagger \Big) \,. \end{split} \end{equation} Here we introduced the Gaunt coefficients \begin{equation} \label{eq:gaunt} \begin{split} C_{m_1 m_2 m_3}^{\ell_1\ \, \ell_2\ \, \ell_3} & = \int Y_{\ell_1 m_1} Y_{\ell_2 m_2} Y_{\ell_3 m_3} d\Omega_2 \\ & = \sqrt{\frac{(2\ell_1+1)(2\ell_2+1)(2\ell_3+1)}{4\pi}} \begin{pmatrix} \ell_1 & \ell_2 & \ell_3 \\ 0 & 0 & 0 \end{pmatrix} \begin{pmatrix} \ell_1 & \ell_2 & \ell_3 \\ m_1 & m_2 & m_3 \end{pmatrix} , \end{split} \end{equation} given in terms of Wigner $3j$ symbols. These coefficients vanish unless the spins satisfy the triangle inequality \begin{equation} \label{eq:triangle} | \ell_1-\ell_2 | \leq \ell_3 \leq \ell_1+\ell_2 , \end{equation} meaning each spin has to be in the tensor product of the other two. Moreover Gaunt coefficients vanish unless $m_1+m_2+m_3=0$ and $\ell_1+\ell_2+\ell_3$ is even. Some remarks on (\ref{eq:NLOFockA}) are in order. First, notice that the NLO corrections have relative size $n^{-1/2}$. Higher orders behave similarly, resulting in an expansion in powers of $n^{-1/2}$. However, when computing observables, the NLO terms do not interfere with the leading order terms, resulting in an expansion in powers of $1/n$, as expected in the semiclassical framework. This is exemplified in section \ref{sec:observable2_estimate}. When considering even higher orders, $A_{\ell m}$ will contain sums over $4,6,\dots$ spins with coefficients that, like \REf{eq:gaunt}, are integrals of products of respectively $5,7,\dots$ spherical harmonics. As these coefficients go like powers of $\ell$ one would expect the parameter controlling the convergence of the expansion to go like $\frac{\ell^\kappa}{n}$ for some $\kappa$. We will discuss this in detail in section \ref{sec:large_spin}. Finally, notice that some of the NLO terms don't annihilate the state $|n\rangle$, so that $A_{\ell m}|n\rangle \not = 0$ . The reason is that $|n\rangle$ is the lowest energy state of charge $n$ for the full hamiltonian (the one associated with \REf{eq:FluctuationsLagrangian}), while $A_{\ell m}$ and $B_{\ell m}$ are the ladder operators for the quadratic hamiltonian (associated with \REf{eq:quadraticLagrangian}). The vacuum $|\Omega\rangle$, which is annihilated by $A_{\ell m}$ and $B_{\ell m}$, coincides with $|n\rangle$ only at leading order, hence our result. \subsection{Mapping superfluid excitations to operators \label{sec:FluidFluctuationsToOperators}} With the tools presented in the previous sections, we are now ready to identify operators and map them to superfluid excitations. The latter, as defined in \REf{eq:FockStates2}, can be expressed as a power series in $n^{-1/2}$ of polynomials of $a_{\ell m}, a^\dagger_{\ell m}, b_{\ell m}, b^\dagger_{\ell m}$ acting on the free Fock vacuum $|0\rangle$. These, by the operator state correspondence, can in turn be written in terms of operators involving $\bar\phi, \phi$ and their derivatives. To identify primary states, we must express the special conformal generators in terms of $A_{\ell m}, B_{\ell m}$ and $A_{\ell m}^\dagger, B_{\ell m}^\dagger$. This is done by inverting \REf{eq:NLOFockA} and the other formulae relating ladder operators in the two frames, plugging the result in (\ref{eq:K0aabb}), (\ref{eq:K-aabb}), and (\ref{eq:K+aabb}). For instance, at leading order, using (\ref{eq:twoFockSpacesRel}), we get \begin{equation} \label{eq:LOgeneratorK} K_0 = \sqrt{n} A_{1,0} , ~~ K_- = -\sqrt{n} A_{1,-1}, ~~ K_+ = \sqrt{n} A_{1,1}. \end{equation} Thus, as was already discussed, at leading order only strings of creation operators not containing $A_{1,m}^\dagger$ are primaries. There is a clear parallel with the conclusion of section \ref{sec:vacuum_primaries_construction}. This is due to the fact that, at leading order, states generated by creation operators $A_{\ell m}^\dagger, B_{\ell m}^\dagger$ correspond to the states generated by $\mathcal A_{\ell,m}^\dagger, \mathcal B_{\ell,m}^\dagger$. As a result, due to the following identities \begin{equation} A^\dagger_{\ell m} | n \rangle = \frac{a_{00}a_{\ell m}^\dagger}{\sqrt{n}} \frac{ (a_{00}^\dagger )^n}{\sqrt{n!}} | 0 \rangle= \frac{a_{\ell m}^\dagger (a_{00}^\dagger )^{n-1}}{\sqrt{(n-1)!}} | 0 \rangle = \frac{(4\pi)^{\frac{n-1}{2}}}{\sqrt{(n-1)!}}\mathcal Y_{\ell m}^{\mu_1\dots\mu_\ell} \phi^{n-1} \partial_{\mu_1} \cdots \partial_{\mu_\ell} \phi |0\rangle, \end{equation} the state $A^\dagger_{\ell m} | n \rangle$ corresponds at leading order to an operator with $\ell$ derivatives all acting on the same field \begin{equation} \label{eq:LOSpinlOperator} \phi^{n-1} \partial_{\mu_1\dots\mu_\ell}\phi. \end{equation} \section{How large is ``large spin'' ?\label{sec:large_spin}} Quantization around the saddle offers a systematic computation of observables for states with charge $n$ as a power series in $n^{-1}$. Clearly, as $n\to \infty$ the procedure works for states with finite spin $\ell$, for the ground state $| n \rangle$ in particular. In this section we will study the convergence of the expansion when both $\ell$ and $n$ become large. \subsection{Matrix elements for excited states\label{sec:observable1_estimate}} On general grounds we expect the expansion to be controlled by the ratio $\ell^\kappa/n$ for some $\kappa$. One way to find out what $\kappa$ is, would be to perform NLO computations around the non-trivial saddle. However, we'll make use of the fact that we know a class of primary states in free theory in exact form and not just as an expansion in inverse powers of the charge. That will give us full control of the computation, allowing to successfully trace any transition between different regimes (see Section~\ref{sec:primary_phonons}). Intuitively we expect the radial component of $\phi$ to be a good parameter to control the validity of the semiclassical approximation. The smallness of the size of its quantum fluctuation relative to its expectation value is a necessary condition for the semi-classicality of a state \footnote{For an illustrative example based on the spinning top see~\cite{Monin:2016jmo}.}. Fluctuations comparable to the expectation value, and thus consistent with the vanishing of $\phi$ (at least somewhere), signal the breakdown of the semiclassical approximantion. We will thus study the large $n$ behavior of the following matrix elements \begin{equation} \Phi(\theta;\ell,n,p) =\prescript{}{A}\langle n; \ell, \ell | : \partial_\tau^p\hat {\bar \phi} (\tau, \vec n) \partial_\tau^p \hat \phi (\tau, \vec n) : | n; \ell, \ell \rangle_A . \label{eq:OrderParameter} \end{equation} for arbitrary integer $p$, where $|n,\ell,\ell\rangle_A$ is the primary state found in (\ref{eq:one-phononStateA}). Rewriting the fields in terms of ladder operators \REf{eq:aaCylinderFieldComplex} and \REf{eq:aaCylinderFieldComplexConj} yields \begin{equation} \Phi(\tau; \ell, n, p) = \alpha_0^2 \sum_{k,k' = 0}^{\ell} \langle \psi_k | \sum_{\substack{ \ell',m' \\ \ell'',m''} } (-1)^p \omega_{\ell'}^p \omega_{\ell''}^p a^\dagger_{\ell',m'} a_{\ell'',m''}Y^*_{\ell',m'} Y_{\ell'',m''} \frac{e^{\left (\omega_{\ell'}-\omega_{\ell''} \right ) \tau}}{\sqrt{4 \omega_{\ell'} \omega_{\ell''} }} | \psi_{k'} \rangle, \end{equation} where we introduced the following notation \begin{equation} \label{eq:PrimaryPsiK} | \psi_k \rangle = \gamma_{k,\ell} (a_{00}^\dagger)^{n-k-1} ( a_{1,1}^\dagger )^{k} a_{\ell-k, \ell-k}^\dagger | 0 \rangle. \end{equation} Using that for $k, k' \neq \ell-1,\ell$ we have \begin{align} & \langle 0 | a_{00}^{n-k-1} a_{11}^{k}a_{\ell-k,\ell-k} a^\dagger_{\ell',m'} a_{\ell'',m''} ( a_{00}^\dagger )^{n-k'-1} ( a_{11}^\dagger )^{k'} a_{\ell-k',\ell-k'}^\dagger | 0 \rangle \nonumber \\ & {} = (n-k-1)! k! \left[ (n-k-1) \delta_{\ell' 0} + k \delta_{\ell' 1} + \delta_{\ell', \ell-k}\right] \delta_{\ell' \ell''} \delta_{\ell' m'} \delta_{\ell' m''} \delta_{k k'}, \end{align} and neglecting the terms with $k,k' = \ell-1,\ell$, which are subleading, we get \begin{equation} \label{eq:OrderParameterSum} \Phi (\tau; \ell, n, p) = (-1)^p \alpha_0^2 \sum_{k=0}^{\ell-2} \frac{(2\ell)!(n-k-1)!}{2^{k+1} k! (2\ell-2k)!} \left [ (n-k-1) |Y_{00}|^2 \omega^{2p-1}_0 + k |Y_{11}|^2 \omega_1^{2p-1}+|Y_{\ell-k,\ell-k}|^2 \omega_{\ell-k}^{2p-1} \right ] . \end{equation} We then use \begin{equation} | Y_{\ell\ell}(\varphi,\theta) | = \frac{1}{2^\ell \ell !}\sqrt{\frac{(2\ell+1)!}{4\pi} } \sin^{\ell} \theta\,, \end{equation} which means $|Y_{\ell-k,\ell-k}|^2$ is maximal at $\theta=\pi/2$. Approximating factorials by Stirling's formula, we finally find \begin{align} \Phi(\pi/2;\ell,n,p) = \frac {n}{4^{p+1}\pi} \Bigg[ & Q_0(\ell,p) + \frac{Q_1(\ell,p)}{n} + \frac{Q_2(\ell,p)}{n^2} + \dots \nonumber \\ & + \frac{\ell^{\xi}}{n}\left (P_0(\ell,p) + \frac{P_1(\ell,p)}{n}+\frac{P_2(\ell,p)}{n^2} + \dots \right ) \Bigg] , \label{eq:OrderParameterResult} \end{align} where $P_k(\ell)$ and $Q_k(\ell)$ are $n$-independent functions which at large $\ell$ scale as $\ell^k$, and $\xi = 2p-\frac{1}{2}$. It can be concluded that for the case at hand $\kappa=1$. In other words, the semiclassical expansion can be trusted as long as $\ell \ll n$. We expect that for a wide class of observables, even for theories with interaction, computations around the non-trivial saddle can be organized in a systematic series in powers of $\ell /n$. Another instance is examined in Appendix \ref{sec:observable2_estimate}. However, not all quantities have this type of expansion as we now discuss. \subsection{Primary states \label{sec:primary_phonons}} Let us consider $1/n$ corrections to the operator whose leading term is given by \REf{eq:LOSpinlOperator} and whose associated state is given in exact form by \REf{eq:one-phononStateA} \footnote{The spin $\ell$ is bounded by $2 \leq \ell < n$.}. By the notation \REf{eq:PrimaryPsiK} we can write the state succinctly as \begin{equation} \label{eq:primSumPsiK} |n; \ell, \ell\rangle_A = \alpha_0 \sum_{k=0}^\ell |\psi_k\rangle\,. \end{equation} The vectors $|\psi_k\rangle$ are mutually orthogonal, but they are not normalized. Comparing their relative norms we find \begin{equation} \label{eq:expansionParamNorm} \frac{\langle \psi_k | \psi _k \rangle}{\langle \psi_{k-1} | \psi _{k-1} \rangle} =\frac{(\ell-k+1)(2\ell-2k+1)}{(n-k)k}\sim \frac{\ell^2}{nk}\,, \end{equation} where in the last equation we used $k\leq \ell\ll n$. This equation implies the norms $\langle \psi_k |\psi_k \rangle\propto (\ell^2/n)^k/k!$ approximate the coefficients in the expansion of the exponential $\exp(\ell^2/n)$. We then have two regimes depending on whether $\ell^2/n\ll 1$ or $\ell^2/n\mathrel{\rlap{\lower4pt\hbox{\hskip1pt$\sim$} 1$. In the first case the succession $\langle \psi_k |\psi_k \rangle$ is peaked at $k=0$. Instead, for $\ell^2/n\mathrel{\rlap{\lower4pt\hbox{\hskip1pt$\sim$} 1$ the succession is peaked at \begin{equation} \label{eq:primaryKMax} k_{max} = \frac{2\ell^2}{n}, \end{equation} and has a width of order $\sqrt{k_{max}} = \ell / \sqrt{n}$ (see Figure~\ref{fig:relNorm}). Thus, the primary state \REf{eq:one-phononStateA} is dominated by the sum of $| \psi _k \rangle$ roughly in the range $ k_{\max}-\sqrt{k_{\max}}\mathrel{\rlap{\lower3pt\hbox{\hskip0pt$\sim$} k\mathrel{\rlap{\lower3pt\hbox{\hskip0pt$\sim$} k_{\max}-\sqrt{k_{\max}}$. \begin{figure}[h] \begin{center} \includegraphics[width=10cm]{relativeNorm} \caption{\label{fig:relNorm} Normalized $\langle \psi_k |\psi_k \rangle$ as function of $k$ for $n=10^3$ and $\ell =500$.} \end{center} \end{figure} This result seems to suggest that, for primary states, the $1/n$ expansion (\ref{eq:one-phononStateA}), or equivalently \REf{eq:primSumPsiK}, breaks down at $\ell \sim \sqrt{n}$. However, the expressions for (primary) operators are coordinate dependent. What we have shown here is that, when expressed in terms of creation-annihilation operators $a_{\ell,m}$, $a^\dagger_{\ell,m}$, primary operators are written as power series in $\ell/\sqrt{n}$. There may exist other coordinates that partially resum the series leading to a manifest expansion in powers of $\ell/n$. The mere fact that the expectation value \REf{eq:OrderParameter}, which is coordinate-independent, is presented as a power series in $\ell/n$, speaks in favor of that possibility. Unfortunately, those coordinates are certainly not the creation-annihilation operators corresponding to phonons $A_{\ell,m}$, $A^\dagger_{\ell,m}$. Indeed, rewriting the first two terms in (\ref{eq:one-phononStateA}) using the leading order relation \REf{eq:twoFockSpacesRel}, gives \begin{equation} | n, \ell, \ell \rangle _ A \underset{\ell \gg 1}{=} \alpha_0 \sqrt{(n-1)!} \left ( A_{\ell\ell}^\dagger - \frac{\sqrt{2}\ell}{\sqrt{n}}A_{\ell-1,\ell-1}^\dagger A_{1,1}^\dagger \right) |n\rangle . \end{equation} One may hope that the second term in parenthesis is cancelled by NLO corrections \REf{eq:NLOFockA}, however, it is straightforward to show that it is not the case. We can show using \REf{eq:gauntL1} that the only potentially relevant term in \REf{eq:NLOFockA} \begin{equation} \frac{(-1)^{\ell-1}\sqrt{\pi}C_{-\ell,m_1,m_2}^{\ell ,\ell_1,\ell_2}}{8\sqrt{2\omega_\ell \omega_{\ell_1}\omega_{\ell_2} n}} (2+3\ell +\ell_1+\ell_2) A^\dagger_{\ell_1,m_1}A^\dagger_{\ell_2,m_2}, \end{equation} scales as $O(\ell^0)/\sqrt{n}$, for $\ell_1=m_1=\ell-1$, $\ell_2 =m_2= 1$, so it cannot cancel the term scaling as $\ell/\sqrt{n}$. Our conclusion of this section is that the semiclassical expansion can be trusted for spins as large as the $U(1)$ charge, $\ell \sim n$, as long as we are dealing with coordinate-independent quantities. On the other hand, if we want to identify primary states, using creation-annihilation operators corresponding to phonons, perturbative expansion breaks down much earlier, for $\ell \sim \sqrt{n}$. We expect that for spins in the window $\sqrt{n}<\ell \ll n$ there should exist different semiclassical backgrounds, expanding around which would allow to describe primary states perturbatively\footnote{Expanding the summand in \REf{eq:PrimaryNormalization} for large $\ell$ and $n$ and computing the sum via saddle-point approximation leads to \begin{equation} \sum_{k=0}^\infty \frac{1}{k!} \left ( \frac{2\ell^2}{n}\right )^k \exp \left ( -\frac{k^2}{\ell} \right ) = \exp \left \{ \frac{2\ell^2}{n} \left [ 1-\frac{2\ell}{n} + O \left ( \frac{\ell^2}{n^2} \right )\right ] \right \}, \end{equation} which suggests that this result can be obtained perturbatively in a double scaling limit $n \gg 1$, $\ell \gg 1$, $\ell / n =\text{fixed}.$ }. \section{3-pt function \label{sec:3ptfunction}} In the next two sections we will further explore the semiclassical methodology described in Section~\ref{sec:saddle}. Focussing on the Wilson-Fisher fixed point in $4-\varepsilon$ dimensions, corresponding to the theory in eq. \REf{eq:phi4}, we will derive new results by studying $3$- and $4$-point functions involving two operators with large charge $n$ at next to leading order in $\varepsilon$ (or equivalently in $n^{-1}$). More precisely, we will compute correlators of the class presented in Eq.~\REf{eq:N+2Correlators} involving one or two additional operators $\mathcal O_i$, i.e. $N$ equals 1 or 2. For simplicity we will focus on insertions of just one specific type of neutral operators \begin{equation} \mathcal O (x) = (\bar \phi \phi)^k (x). \end{equation} We start from the $3$-point function of $\bar\phi\phi$, which, up to the normalization, is fully determined by the scaling dimensions and a single fusion coefficient. The scaling dimension of $\phi^n$ is given by \REf{eq:phinDimension}, while that of $\bar \phi \phi$ can be easily computed using standard perturbation theory through Feynman diagrams as we will see shortly. As a result the only parameter to compute is the fusion coefficient $\lambda_{\bar \phi \phi}$, which appears in the 3-pt function of canonically (re-)normalized operators $[{\cal O}_i]$ as\footnote{Canonical normalization corresponds to $\left\langle 0| [{\cal O}](x)[{\cal O}](y)|0\right\rangle=(x-y)^{-2\Delta_{\cal O}}$.} \begin{equation} \left \langle [\bar \phi^n] (x_f) [\bar \phi \phi] (x) [ \phi^n](x_i) \right \rangle=\frac{\lambda_{\bar \phi \phi}}{(x_f - x_i)^{2\Delta_{\phi^n}-\Delta_\mathcal O} (x-x_i)^{\Delta_\mathcal O} (x_f-x)^{\Delta_\mathcal O}}. \end{equation} On the cylinder, using (\ref{eq:PlaneCylinderOperators}), one can more simply write \begin{equation} {\lambda_{\bar \phi \phi}}=\lim_{\substack{\tau_f\to \infty \\ \tau_i\to -\infty}}\frac{\langle 0| [ \widehat{{\bar\phi}^n}](\tau_f,\vec n_f) [\widehat{\bar \phi \phi}] (\tau, \vec n) [ \widehat{ \phi^n}](\tau_i,\vec n_i) | 0 \rangle}{\langle 0| [ \widehat{{\bar\phi}^n}](\tau_f,\vec n_f) [ \widehat{ \phi^n}](\tau_i,\vec n_i) | 0 \rangle} \equiv \langle n|[\widehat{\bar \phi \phi}](\tau,\vec n)|n\rangle . \label{lambdacyl} \end{equation} For the theory and the operators at hand, renormalization is multiplicative, so that canonically normalized and bare operators are related by $[{\cal O}_i]={\cal O}_i/Z_i$, with $Z_i$ generally UV divergent. For instance, the 2-point function of $\bar \phi \phi$ is given by \begin{equation} \langle (\bar \phi \phi)(x) (\bar \phi \phi)(y) \rangle =\frac{Z^2_{\bar \phi \phi}}{(x-y)^{2\Delta_{\bar \phi \phi}}}, \end{equation} where at one loop order, i.e. just the diagram in Fig.~\ref{fig:phiphiRenormalization}, \begin{equation} Z_{\bar \phi \phi} = \Omega^{-1}_{d-1} (d-2)^{-1} \left [1-\frac{\lambda}{8\pi^2} \, \frac{1}{4-d} \right ] \left [ 1- \frac{\lambda}{16\pi^2} \left ( 1+\gamma+\log \pi \right ) \right ], \label{eq:NormRenorm} \end{equation} which implies the scaling dimension is \begin{equation} \Delta_{\bar\phi \phi}\equiv (d-2)+\gamma_{\bar \phi \phi} = (d-2)+ \frac{\lambda}{8\pi^2}. \label{eq:phiphiAnomalousDimension} \end{equation} \begin{figure}[h] \centering \includegraphics{Pictures/phibarphi_renorm.pdf} \caption{\label{fig:phiphiRenormalization} One-loop renormalization of $\bar\phi \phi$.} \end{figure} For large $n$, \REf{lambdacyl} can be computed semiclassically by expanding around the saddle point (\ref{eq:SaddleSolution}). Equation (\ref{eq:aroundSaddle}) yields in this case \begin{equation} \lambda_{\bar \phi \phi} =Z^{-1}_{\bar \phi \phi} \, \frac{\displaystyle \int \mathcal D r \mathcal D \pi \, (\widehat{\bar\phi\phi}) (\tau, \vec n) e^{-\hat S[r,\pi]}} {\displaystyle \int \mathcal D r \mathcal D \pi \, e^{-\hat S[r,\pi]}}, \label{eq:SaddleFusion} \end{equation} where the path integrals have the boundary conditions specified by \REf{eq:cylinderPI}. \paragraph{Leading order:} the computation boils down to evaluating the integrands on the saddle, leading to \begin{equation} \label{eq:fusionPhiPhi} \lambda_{\bar \phi \phi} = f^2 \Omega_3 = \frac{n}{\mu_*}, \end{equation} where we used \REf{eq:ChargeRelationfnmu} and the leading order result $Z_{\bar \phi \phi}^{-1} = 2 \Omega_{3}$. For small $\lambda n$ we have $\mu_*=1$, see \REf{eq:mu_d_phi4}, and the result, $\lambda_{\bar \phi \phi}=n$, coincides with the tree level computation using Feynman diagrams. In this section the symbol $\mu$ refers to $\mu_4(\lambda n, d)$ while $\mu_*$ refers to $\mu_4(\lambda_* n,4)$. Notice this is the chemical potential of the 4D theory evaluated at the critical coupling of the theory in $d=4-\varepsilon$, given in \REf{eq:phi4FixedPoint}. \paragraph{Next to leading order.} The result is independent of the choice of $(\tau, \vec n)$ in (\ref{eq:SaddleFusion}), therefore, we make the convenient choice $(\tau, \vec n)=(0,\hat n_d)$, with \begin{equation} \hat n_{d}=(\underbrace{0,0,\dots,0,1}_{d}). \label{eq:direction} \end{equation} By expanding around the saddle, the expectation value of the bare operator is then \begin{equation} \left \langle n | \left ( \bar \phi \phi \right )(0, \hat n_{d}) | n \right \rangle = \frac{1}{2} \left \langle n | f^2 + 2 f r (0, \hat n_{d}) + r^2(0, \hat n_{d}) | n \right \rangle, \label{eq:3ptFunc1} \end{equation} which at NLO, i.e. 1-loop, gives \begin{equation} \left \langle n | \left ( \bar \phi \phi \right )(0, \hat n_{d}) | n \right \rangle = \frac{f^2}{2} - \left \langle r(0, \hat n_{d}) \int d\tau d \Omega_{d-1}\left [ r (\partial\pi)^2 - i \mu r^2 \dot \pi +\frac{\lambda f^2 r^3}{4} \right ] \right \rangle + \frac{1}{2}\left \langle r^2(0, \hat n_{d})\right \rangle, \label{eq:3ptFunc2} \end{equation} where within the $\left\langle\dots \right\rangle$ the fields $r, \pi$ are free fields \REf{eq:rpiDYExpansion} propagating according to the quadratic action expanded around the background. The resulting Feynman diagrams are depicted in Fig.~\ref{fig:lambdaNLO}. \begin{figure}[H] \centering \includegraphics[width=7cm]{Pictures/phibarphi_3pt.pdf} \caption{\label{fig:lambdaNLO} Topology of diagrams entering $\langle n | ( \bar \phi \phi ) | n \rangle $ at NLO } \end{figure} The first step is to find the propagator of $(r,\pi)$. In matrix form this can be written as \begin{equation} D(\tau-\tau',\vec n\cdot\vec n') = \sum_\ell F^{(\ell )}(\tau-\tau') C^{(d/2-1)}_\ell(\vec n\cdot\vec n'), \end{equation} where $C^{(d/2-1)}_{\ell}(\cos \theta)$ are Gegenbauer polynomials and $F^{(\ell)}(\tau)$ is a $2\times 2$ matrix whose exact expression is given in Appendix \ref{sec:propagatorCylinder}. The details of the computation can be found in Appendix~\ref{app:lambdaComputation}. The result is \begin{equation} \lambda_{\bar \phi \phi} = \frac{n}{\mu_*}+\frac{2(3\mu_*^2+1)}{\left [ 2(3\mu_*^2-1) \right]^{3/2}}-\frac{3-2\mu_*^2+3\mu_*^4}{2(3\mu_*^2-1)} +\sum_{\ell=1}^\infty \left [ S_\ell(\mu_*) - c_{-1}(\mu_*) \ell -c_{0}(\mu_*) -\frac{c_{1}(\mu_*)}{\ell} \right ], \label{eq:FusionFinal} \end{equation} with \begin{equation} S_\ell(\mu) \equiv S_\ell(\mu,1,4), \end{equation} where \begin{equation} S_\ell(\mu,m,d)= \frac{2\ell+d-2}{\omega_B^2(0)} \, \frac{\omega_B(\ell) \omega_A(\ell) (3\mu^2+m^2)-J_\ell(\mu^2-m^2)}{\omega_B(\ell) \omega_A(\ell) \left [\omega_B(\ell) + \omega_A(\ell)\right ]} \, C_\ell^{(d/2-1)}(1), \label{eq:Sl_definition} \end{equation} while the coefficients $c_{-1,0,1}(\mu)$ are defined by the asymptotic behavior of the summand \begin{equation} S_\ell(\mu) \underset{\ell\to \infty}{\equiv} c_{-1}(\mu) \ell + c_{0}(\mu) + \frac{c_{1}(\mu)}{ \ell} + \dots, \end{equation} so as to render the sum in \REf{eq:FusionFinal} finite. Their exact values are \begin{equation} c_{-1}(\mu) = c_0 (\mu) = \frac{\mu^2+1}{3\mu^2-1}, ~~ c_1 (\mu) = -\frac{\mu^4+2\mu^2-3}{2(3\mu^2-1)}. \end{equation} As discussed in the Appendix, the transcendental terms proportional to the Euler constant $\gamma$, and to $\ln \pi$, which normally appear in 1-loop expressions, cancel out once we fix $\lambda = \lambda_*$. For small $\lambda_* n$, expanding $\mu_*$ in a power series in $\lambda_* n$ \begin{equation} \mu_* = 1+\frac{\lambda_* n}{16\pi^2}-\frac{3}{2}\left( \frac{\lambda_* n}{16\pi^2} \right )^2+O(\lambda_*^3n^3), \end{equation} we get \begin{equation} \lambda_{\bar \phi \phi} \underset{\lambda_* n \to 0}{=} n \left [ 1- \frac{\lambda_* n}{16\pi^2}+\frac{5}{2}\left( \frac{\lambda_* n}{16\pi^2} \right )^2 + O (\lambda_* n)^3 \right ]+ \left [ 6\zeta^2(3)-\frac{13}{2} \right ]\left( \frac{\lambda_* n}{16\pi^2} \right )^2 + O (\lambda_* n)^3 + O (n^{-1}). \end{equation} While for large $\lambda_* n$, and therefore $\mu_*\gg 1$, the sum in \REf{eq:FusionFinal} approximately satisfies \begin{equation} \sum_{\ell=1}^\infty \left [ S_\ell(\mu_*,1,4) - c_{-1}(\mu_*) \ell -c_{0}(\mu_*) -\frac{c_{1}(\mu_*)}{\ell} \right ] \underset{\mu \to \infty}{=} \frac{1}{6} \mu_*^2 \log \mu_*. \end{equation} Combining that with the leading contribution $n/\mu_*$ and using the relation $\mu_*^3=\lambda_* n / 4 \Omega_{3}$, which applies in the large $\lambda_* n$ regime, we get \begin{eqnarray} \lambda_{\bar \phi \phi} &\underset{\lambda n \to \infty}{=}& \frac{8\pi^2}{\lambda_*} \left ( \frac{\lambda_* n}{8\pi^2} \right )^{2/3} \left ( 1+\frac{\lambda_*}{144\pi^2} \log \frac{\lambda_* n}{8\pi^2}\right ) \nonumber \\ & \simeq & \frac{8\pi^2}{\lambda_*} \left ( \frac{\lambda_* n}{8\pi^2} \right )^{\frac{2}{3}+\frac{\lambda_*}{144\pi^2}} = \frac{5}{2\varepsilon} \left ( \frac{2\varepsilon n}{5} \right )^{\frac{2}{3}+\frac{\varepsilon}{45}} \sim n^{\frac{\Delta_{\bar \phi \phi}}{d-1}}, \end{eqnarray} where $\Delta_{\bar \phi \phi}$ is given by eq.~\REf{eq:phiphiAnomalousDimension} with $\lambda\to \lambda_*$. The scaling with $n$ is precisely as predicted by the large charge EFT description~\cite{Monin:2016jmo}. \section{4-pt function \label{sec:4ptfunction}} Focussing again on the Wilson-Fisher fixed point in $d=4-\varepsilon$ we will now study, by the same methodology, the four point function with two insertions of $(\bar \phi \phi)$. Let us recall that a general 4-point correlator in a CFT can be written using $s$- and $t$-channel representations \begin{eqnarray} \langle \mathcal O_4 (x_4) \mathcal O_3 (x_3) \mathcal O_2 (x_2) \mathcal O_1 (x_1) \rangle & = & \frac {g_{12,34}(z, \bar z)} {x_{12}^{\Delta_1+\Delta_2} x_{34}^{\Delta_3+\Delta_4}} \left ( \frac {x_{24}} {x_{14}} \right ) ^{\Delta_1-\Delta_2} \left ( \frac {x_{14}} {x_{13}} \right ) ^{\Delta_3-\Delta_4} \label{eq:tChannel4ptFunction} \\ & = & \frac {g_{32,14}(1-z, 1-\bar z)} {x_{32}^{\Delta_3+\Delta_2} x_{14}^{\Delta_1+\Delta_4}} \left ( \frac {x_{24}} {x_{34}} \right ) ^{\Delta_3-\Delta_2} \left ( \frac {x_{34}} {x_{13}} \right ) ^{\Delta_1-\Delta_4} \end{eqnarray} where $z$ and $\bar z$ are defined by the conformal ratios according to \begin{equation} u=\bar z z=\frac {x_{12}^2 x_{34}^2} {x_{13}^2x_{24}^2},~~ v=(1-z)(1-\bar z)=\frac {x_{14}^2 x_{23}^2} {x_{13}^2x_{24}^2}. \label{eq:Definitionuvzz} \end{equation} Modulo kinematic factors, the relevant information is encapsulated in the $g_{ij,kl}(z,\bar z)$. For Euclidean signature, the two variables $z\equiv e^{\tau +i\theta}$ and $\bar z\equiv e^{\tau -i\theta}$ are related by complex conjugation. Using conformal transformations to map $x_1\to 0$, $x_4\to \infty$ and \begin{equation} x_3=\hat n\equiv ({0,0,\dots,0,1})\qquad\qquad x_2=\hat n(\theta) e^\tau\equiv ({0,0,\dots,\sin\theta,\cos\theta})e^\tau, \end{equation} we can rewrite \begin{eqnarray} g_s(z,\bar z)&\equiv &g_{12,34}(z,\bar z) = |z|^{\Delta_1} \langle {\cal O}_4 |\hat {\cal O}_3 (0,\hat n)\hat {\cal O}_2(\tau,\hat n(\theta))| {\cal O}_1 \rangle, \label{eq:CBsChannelGen}\\ g_t(z,\bar z)&\equiv &g_{32,14}(1-z, 1-\bar z) = \frac{| 1-z |^{\Delta_2+\Delta_3}}{|z|^{\Delta_2}} \langle {\cal O}_4 |\hat {\cal O}_3 (0,\hat n)\hat {\cal O}_2(\tau,\hat n(\theta))| {\cal O}_1 \rangle. \label{eq:CBtChannelGen} \end{eqnarray} The $g_{ij,kl}(z,\bar z)$ can be decomposed as a sum over the primary operators that appear in the operator product expansion (OPE) of $ij$ and $kl$ \begin{equation} g_{ij,kl}(z,\bar z) = \sum_{\alpha} \lambda_{ij\alpha} \bar \lambda_{kl\alpha} g_{\Delta_\alpha,\ell_\alpha}^{\Delta_{ji},\Delta_{kl}}(z, \bar z), ~~ \Delta_{ij}=\Delta_i-\Delta_j, \label{eq:s-t-channelCB} \end{equation} where $\alpha$ labels the primaries while $\Delta_\alpha$, $\ell_\alpha$ and $\lambda_{ij\alpha}$ respectively represent their dimensions, spins and fusion coefficients. The conformal blocks $g_{\Delta,\ell}^{\Delta_{ji},\Delta_{kl}}(z, \bar z)$ are completely fixed functions: their functional form is fixed by the conformal group and their normalization by \REf{eq:s-t-channelCB}. Their explicit expressions in $d=2,4$ can be found in~\cite{Dolan:2003hv}. What matters for our discussion is that in any dimension they admit a power series expansion in $|z|$ ~\cite{Dolan:2003hv,Hogervorst:2013sma} \begin{equation} g_{\Delta,\ell} ^{\Delta_{21},\Delta_{34}} (z,\bar z) =|z| ^{\Delta} \sum_{k=0}^\infty |z| ^ k \sum^{\ell+k}_{j=j_0(\ell,k)} A_{k,j}^{\Delta_{21},\Delta_{34}} (\Delta,\ell) C_{j}^{(d/2-1)}(\cos \theta), ~~ z=|z| e^{i \theta} \label{eq:ConfBlockStatesDescendantsGegenbauer} \end{equation} with $j_0(\ell,k)=\max \left ( \ell-k, k-\ell \mod 2\right )$, where the term proportional to $|z|^k C_j(\cos\theta)$ corresponds to the level $k$ descendant with spin $j$. The dimension and spin of the intermediate primaries is directly read from this expansion. The $A_{k,j}^{\Delta_{21},\Delta_{34}} (\Delta,\ell)$ are calculable coefficients, in particular $A_{0,0}^{\Delta_{21},\Delta_{34}} (\Delta,0)=1$. We will here study the specific correlator \begin{equation} \left \langle [\bar \phi^n] (x_4) [\bar \phi \phi] (x_3) [\bar \phi \phi] (x_2) [ \phi^n](x_1) \right \rangle, \end{equation} so that equations (\ref{eq:CBsChannelGen}) and (\ref{eq:CBtChannelGen}) reduce to \begin{eqnarray} g_s(z,\bar z) &\equiv& g_{\phi^n, \bar \phi\phi; \bar \phi\phi, \bar \phi^n}(z,\bar z) = Z^{-2}_{\bar \phi \phi} |z|^{\Delta_{\phi^n}}\frac{\langle n | (\widehat {{\bar \phi} \phi}) (0,\hat n) (\widehat {{\bar \phi} \phi}) (\tau, \hat n(\theta)) | n \rangle}{\langle n | n \rangle}, \label{eq:CBsChannelChargen}\\ g_t(z,\bar z) &\equiv &g_{\bar \phi\phi, \bar \phi\phi; \phi^n, \bar \phi^n}(1-z,1-\bar z) = Z^{-2}_{\bar \phi \phi} \frac{|1-z|^{2\Delta_{\bar \phi \phi}}}{|z|^{\Delta_{\bar \phi \phi}}} \frac{\langle n | (\widehat {{\bar \phi} \phi}) (0,\hat n) (\widehat {{\bar \phi} \phi}) (\tau, \hat n(\theta)) | n \rangle}{\langle n | n \rangle}\,.\nonumber\\ \label{eq:CBtChannelChargen} \end{eqnarray} In the regime $\Delta_{\phi^n}\gg \Delta_{\bar \phi\phi}$, the $s$-channel is controlled by the ``Heavy-Light'' OPE, while the $t$-channel is controlled by the ``Heavy-Heavy'' and the ``Light-Light'' OPEs. \subsection{Leading order \label{sec:LO4ptFunction}} As before, the leading order contribution corresponds to evaluating the path integral on the saddle and gives \begin{equation} \frac{\langle n | (\hat {\bar \phi} \hat \phi) (0,\hat n_d) (\hat {\bar \phi} \hat \phi) (\tau, \vec n) | n \rangle}{\langle n | n \rangle} = \frac{f^4}{4}. \label{leading4} \end{equation} The implications of this result, when considering the $s$- and $t$-channels are as follows. \paragraph{s-channel.} From \REf{eq:CBsChannelChargen} and \REf{eq:NormRenorm} we obtain \begin{equation} g_s(z,\bar z) = \left (f^2 \Omega_3 \right )^2 |z|^{\Delta_{\phi^n}}\, . \end{equation} Therefore, the only operator appearing in the $\phi^n\times \bar \phi \phi$ OPE is $ \phi ^n (x)$ itself with the fusion coefficient \REf{eq:fusionPhiPhi}. Moreover, we see that at this order the descendants of $\phi^n$ do not contribute. This is to be expected, because the contribution of descendants is suppressed by powers of the ratio $\frac{\Delta_{\bar \phi \phi}}{\Delta_{\phi^n}}$, and thus by an inverse power of $n$, just as a consequence of conformal symmetry (see also~\cite{Jafferis:2017zna}). For instance, the first descendant term in the conformal block has coefficient \begin{equation} \label{eq:DescendantCoefficient} A_{1,1}^{\Delta_{21},\Delta_{34}} (\Delta,0) = \frac{(\Delta_{21}+\Delta)(\Delta_{34}+\Delta)}{4\Delta}, \end{equation} which, for the case at hand, equals \begin{equation} \frac{\Delta_{\bar \phi \phi}^2}{4\Delta_{\phi^n}}, \end{equation} and is suppressed in the limit $n\gg 1$. \paragraph{t-channel.} From eqs.~(\ref{eq:CBtChannelChargen},\ref{leading4}) we obtain \begin{equation} g_{t}(y, \bar y) = \left ( \frac{n}{\mu} \right )^2 \frac{| y |^{2\Delta_{\bar \phi \phi}}}{|1-y|^{\Delta_{\bar \phi \phi}}}. \label{eq:LOepsilontChannel} \end{equation} Expanding in powers of $y$ \begin{eqnarray} g_{t}(y, \bar y) = \left ( \frac{n}{\mu} \right )^2 \, |y|^4 \left [ 1+ |y| C_{1}^{(1)}(\cos\theta) + |y|^2 C_{2}^{(1)}(\cos\theta) + |y|^3 C_{3}^{(1)}(\cos\theta) + \dots \right ], \end{eqnarray} and comparing with the expansion in conformal blocks, \REf{eq:s-t-channelCB}, we deduce that in this channel there appears a tower of primary operators labelled by their spin $\ell$ and by an integer $k$, with dimension \begin{equation} \Delta_{(k,\ell)} = 4+2k +2\ell, \qquad\ell, k =0,1,2, \dots, \label{eq:t-ChannelSpectrumLO} \end{equation} and with fusion coefficients satisfying \begin{equation} \lambda^{n,n}_{(k,\ell)} \bar \lambda^{\bar \phi \phi,\bar \phi \phi}_{(k,\ell)}= \frac{f^4}{4}(-1)^k \frac{ (k!)^2 (k+2\ell)!(k+2\ell+1)! }{ (2k)! (2k+4\ell+1)! }. \end{equation} At weak coupling these operators correspond to \begin{equation} \mathcal O_{(k,\ell)} (x) = \left ( \bar \phi \phi \, \partial^{2k} \partial_{\{\mu_1} \dots \partial_{\mu_{2\ell}\}} \bar \phi \phi \right ) (x), \label{eq:t-ChannelOperatorsLO} \end{equation} where $\{ \}$ indicates the traceless symmetric component. \subsection{NLO \label{sec:NLO4ptFunction}} At next to leading order we must consider, in full analogy with \REf{eq:3ptFunc1}, \begin{equation} \langle n | (\bar \phi \phi) (0,\hat n) (\bar \phi \phi) (\tau,\hat n(\theta)) | n \rangle = \langle n | \left [ \frac{f^2}{2} + f r (0, \hat n) + \frac{r^2(0, \hat n) }{2} \right ] \left [ \frac{f^2}{2} + f r (\tau,\hat n(\theta)) + \frac{r^2(\tau, \hat n(\theta)) }{2} \right ] | n \rangle\,, \label{eq:4ptFunction1} \end{equation} from which both connected and disconnected diagrams arise at NLO, see Fig. \ref{fig:4ptFunction}. \begin{figure}[H] \centering \includegraphics[width=10cm]{Pictures/phibarphi_4pt.pdf} \caption{\label{fig:4ptFunction} Topology of diagrams entering $\langle n | ( \bar \phi \phi ) ( \bar \phi \phi ) | n \rangle $ } \end{figure} Disconnected diagrams just correspond to factorized 3-point functions, which we computed before. Therefore, what is left to compute is the one-phonon exchange connected diagram, which leads to \begin{equation} Z_{\bar \phi \phi}^{-2}\frac{\langle n | (\hat {\bar \phi} \hat \phi) (0,\hat n_d) (\hat {\bar \phi} \hat \phi) (\tau, \vec n) | n \rangle}{\langle n | n \rangle} = \lambda_{\bar \phi \phi}^2 \left [ 1 +\frac{4\mu\Omega_3}{n}D_{rr}(z,\bar z) \right ], \end{equation} with $\lambda_{\bar \phi \phi}$ the fusion coefficient in \REf{eq:FusionFinal} and with the propagator for the radial mode given by (see Appendix \ref{sec:propagatorCylinder}) \begin{equation} D_{rr}(z, \bar z) = \sum_{\ell=0}^\infty \frac{\ell+1}{\Omega_{3}} \, \left ( |z|^{\omega_A(\ell)} \, \frac{J_\ell - \omega_A^2(\ell) } {2\omega_A(\ell)} + |z|^{\omega_B(\ell)} \, \frac{\omega_B^2(\ell)-J_\ell } {2\omega_B(\ell)}\right ) \frac{C_\ell^{(1)} (\cos \theta)}{\omega_B^2(\ell)-\omega_A^2(\ell)}. \label{eq:4ptPropagatorrr} \end{equation} Having secured the four-point function at NLO, we can now turn our attention to the spectrum of operators appearing in the different channels. \paragraph{s-channel.} The analysis is straightforward. Indeed using \REf{eq:CBsChannelChargen} and \REf{eq:4ptPropagatorrr} we see that the four-point function \begin{equation} g_{s}(z,\bar z)= \lambda_{\bar \phi \phi}^2 |z|^{\Delta_{\phi^n}} \left [ 1 +\frac{4\mu}{n} \sum_{\ell=0}^\infty \, \left ( |z|^{\omega_A(\ell)} \, \frac{J_\ell - \omega_A^2(\ell) } {2\omega_A(\ell)} + |z|^{\omega_B(\ell)} \, \frac{\omega_B^2(\ell)-J_\ell } {2\omega_B(\ell)}\right ) \frac{(\ell+1) C_\ell^{(1)} (\cos \theta)}{\omega_B^2(\ell)-\omega_A^2(\ell)} \right ] \label{eq:4ptPropagatorrrS-channel} \end{equation} is already in the form \REf{eq:ConfBlockStatesDescendantsGegenbauer}. Therefore, we can identify the primary operators by simply looking at the powers of $|z|$ in the expansion. These are in one-to one correspondence with the $A$- and $B$-type single phonon states found in Section~\REf{sec:SpectrumFluctuations} and result in two separated towers of primaries with dimension \begin{equation} \Delta_A=\Delta_{\phi^n}+\omega_A(\ell), ~~\ell \geq 2\qquad \qquad \Delta_B= \Delta_{\phi^n}+\omega_{B}(\ell), ~~ \ell \geq 0\, . \label{eq:onephononSc-channel} \end{equation} Notice that the tower of $A$-type primaries starts at $\ell=2$. Indeed, the $\ell=1$ \mbox{$A$-phonon} does appear in \REf{eq:4ptPropagatorrrS-channel} but it corresponds to the descendant $\partial_\mu \phi^n$. Instead $\ell=0$ corresponds to the ``Goldstone mode", which controls the global fluctuations of the phase of $\phi$ and, as such, is not excited by neutral operators like $\bar \phi \phi$. The corresponding fusion coefficients can be read off from the coefficients in front of~$|z|^\Delta$ \begin{equation} \lambda^{\ell}_{\bar \phi \phi,A} = \lambda_{\bar \phi \phi} \sqrt{ \frac{4\mu}{n} (\ell+1) \frac{J_\ell - \omega_A^2(\ell) } {2\omega_A(\ell)} } , ~~ \ell\geq 1,\end{equation} \begin{equation} \lambda^{\ell}_{\bar \phi \phi,B} = \lambda_{\bar \phi \phi} \sqrt{ \frac{4\mu}{n} (\ell+1) \frac{\omega_B^2(\ell) - J_\ell } {2\omega_B(\ell)} } , ~~ \ell\geq 0.\end{equation} We see that these are $n$ suppressed by $\sim \sqrt {\mu/n}$ with respect to $\lambda_{\bar \phi \phi}$. It should also be noted that these operators enter the OPE without their descendants, similarly to $\phi^n$ at leading order. \paragraph{t-channel.} The analysis is somewhat more complicated. The reason is that the corresponding expression of the four-point function \begin{eqnarray} g_{t}(1-z,1 - \bar z) & = & \lambda_{\bar \phi \phi}^2 \frac{|1-z|^{2\Delta_{\bar \phi \phi}}}{|z|^{\Delta_{\bar \phi \phi}}} \label{eq:4ptPropagatorrrt-channel} \\ && \left [ 1 +\frac{4\mu}{n} \sum_{\ell=0}^\infty \, \left ( |z|^{\omega_A(\ell)} \, \frac{J_\ell - \omega_A^2(\ell) } {2\omega_A(\ell)} + |z|^{\omega_B(\ell)} \, \frac{\omega_B^2(\ell)-J_\ell } {2\omega_B(\ell)}\right ) \frac{(\ell+1) C_\ell^{(1)} (\cos \theta)}{\omega_B^2(\ell)-\omega_A^2(\ell)} \right ] \nonumber \end{eqnarray} is written as a power series in $|z|$, and not in $|1-z|$. In order to get the latter, we have to analytically continue the four-point function to the region $z=1$. That would allow to analyze the spectrum of operators appearing in the $t$-channel at next to leading order. Unfortunately, we do not know how to perform the analytic continuation in closed form\footnote{In other words, we do not possess the propagator in closed form for $z\sim \bar z\sim 1$.}, and instead, we will illustrate the principle with an example. For that, let us consider a simplified situation, $z=\bar z \in \mathbb{R}$, in other words $\theta=0$. Introducing the following notation for the summand in \REf{eq:4ptPropagatorrrt-channel} \begin{equation} G(z;\ell)=2\left ( z^{\omega_A(\ell)} \, \frac{J_\ell - \omega_A^2(\ell) } {2\omega_A(\ell)} + z^{\omega_B(\ell)} \, \frac{\omega_B^2(\ell)-J_\ell } {2\omega_B(\ell)}\right ) \frac{(\ell+1) C_\ell^{(1)} (1)}{\omega_B^2(\ell)-\omega_A^2(\ell)}, \end{equation} and using its asymptotic behavior (see also \REf{eq:SpectrumLargeL}) \begin{equation} G(z;\ell) \underset{\substack{\ell\to \infty \\ z\to 1}}{=} z^{\ell} \left ( \ell+1-\frac{3}{2}\frac{\mu^2-1}{\ell}-(1-z)\ell+\dots\right ) \end{equation} we can find leading asymptotic of the four point function for $z\to 1$ \begin{equation} g_{t}(1-z,1 - z) \underset{z\to 1}{=} \lambda_{\bar \phi \phi}^2 (1-z)^{2\Delta_{\bar \phi \phi}} \left \{ 1 +\frac{2\mu}{n} \left [ \frac{1}{(1-z)^2}+\frac{\Delta_{\bar \phi \phi}-1}{1-z}+\frac{3}{2} (\mu^2-1)\log (1-z) \right ] + \mathcal R(z) \right \} \label{eq:4ptt-Channelzto1} \end{equation} with the remainder \begin{equation} \mathcal R(z) = \frac{2\mu}{n} G(z;0)+\frac{2\mu}{n} \sum_{\ell=1}^\infty \left [G(z;\ell) - z^{\ell} \left ( \ell+1-\frac{3}{2}\frac{\mu^2-1}{\ell} \right ) \right ] \underset{z\to 1}{=} O((1-z) \log(1-z)) \end{equation} a less singular function. Singular terms in \REf{eq:4ptt-Channelzto1} correspond to different operators. The term proportional to $(1-z)^{-2}$ corresponds to an operator with scaling dimension $\Delta=2$, which is nothing else but $\bar \phi \phi$ (its anomalous dimension is invisible at this order).\footnote{In general we expect not only $\bar \phi \phi$ but its spin-$\ell$ analogues of the form \begin{equation} \bar \phi \partial_{\mu_1} \dots \partial_{\mu_\ell} \phi, \label{eq:NLOtChannelphiphi} \end{equation} to appear in the $\bar \phi \phi\times \bar \phi \phi$ OPE.} The second term corresponds to its descendant, whose coefficient is fixed by the conformal symmetry (compare with \REf{eq:DescendantCoefficient}). Lastly, the term with $\log(1-z)$ can be exponentiated, leading to a modified prefactor \begin{equation} g_{t}(1-z,1 - z) \underset{z\to 1}{\supset} \lambda_{\bar \phi \phi}^2 (1-z)^{2\Delta_{\bar \phi \phi}} \, (1-z)^{\frac{3\mu}{n} (\mu^2-1)}. \end{equation} The resulting exponent should correspond to the scaling dimension of $\Delta_{(\bar \phi \phi)^2}$ (see \REf{eq:t-ChannelSpectrumLO} and \REf{eq:t-ChannelOperatorsLO}) at NLO. Indeed, using \REf{eq:ChargeRelationfnmu}, \REf{eq:muRelationf} and \REf{eq:phiphiAnomalousDimension} we can write \begin{equation} \Delta_{(\bar \phi \phi)^2} = 2\Delta_{\bar \phi \phi}+\frac{3\mu}{n} (\mu^2-1) = 4+O(\varepsilon^2), \label{eq:NLODimesnionSpin0} \end{equation} which coincides with the computation using Feynman diagrams, see Appendix~\ref{app:phiphiDimensionFeynman}. The last result can also be directly derived using the general relation (see e.g. \cite{Baume:2014rla}) $\Delta_{(\bar \phi \phi)^2}= d+\beta'(\lambda_*)$, between the dimension of the interaction term $(\bar\phi\phi)^2$ and $\beta'\equiv \partial_\lambda \beta$. Using \REf{eq:phi4FixedPoint} and $\beta(\lambda)=-\varepsilon \lambda + \frac{5 \lambda^2}{16\pi^2}-\frac{15\lambda^3}{(16\pi^2)^2}+O(\lambda^4)$ immediately gives $\Delta_{(\bar \phi \phi)^2}= 4 + O(\varepsilon^2)$. We conclude this section with two comments. First, it is straightforward to extend the computation presented above to the case when the two `light' operators are $( \bar \phi \phi ) ^k$. \REf{eq:4ptt-Channelzto1} is just minimally modified to \footnote{The fusion coefficient $\lambda_{(\bar \phi \phi)^k}$ can be computed by repeating the steps of Section \ref{sec:3ptfunction}.} \begin{equation} g_{t}(1-z,1 - z) \underset{z\to 1}{=} \lambda_{(\bar \phi \phi)^k}^2 (1-z)^{2\Delta_{(\bar \phi \phi)^k}} \left \{ 1 +\frac{2\mu k^2}{n} \left [ \frac{1}{(1-z)^2}+\frac{\Delta_{(\bar \phi \phi)^k}-1}{1-z}+\frac{3}{2} (\mu^2-1)\log (1-z) \right ] + \dots \right \}, \end{equation} which implies that the two leading contributions are associated to $(\bar \phi \phi)^{2k}$ and $(\bar \phi \phi)^{2k-1}$. Moreover, by exponentiating the term with $\log(1-z)$ we obtain, at 1-loop accuracy, a relation between scaling dimensions \begin{equation} \Delta_{(\bar \phi \phi)^{2k}} - 2 \Delta_{(\bar \phi \phi)^{k}} = k^2 \left ( \Delta_{(\bar \phi \phi)^{2}} - 2 \Delta_{(\bar \phi \phi)} \right ), \end{equation} which can be checked perturbatively using the results of appendix \ref{app:phiphiDimensionFeynman}. This provides an additional cross-check. The second comment concerns the computation of similar correlators in a general CFT using the universal EFT superfluid description, as done in \cite{Hellerman:2015nra,Monin:2016jmo}. Even though the EFT description can be trusted only for sufficiently large separations between the two `light' operators, we can try and use the results of~\cite{Monin:2016jmo} for the 4-point function to formally analyze what operators appear in t-channel. Repeating almost verbatim (albeit unjustifiably) the computation leading to \REf{eq:t-ChannelSpectrumLO} we conclude that the spectrum of operators in this case is given by \begin{equation} \Delta = \delta_1+\delta_2+2k+\ell, \label{eq:EFTtChannelSpectrum} \end{equation} which for large $\ell \gg 1$ coincides with the predictions of the analytic bootstrap~\cite{Komargodski:2012ek,Fitzpatrick:2012yx}. This fact indicates there should be away to frame the statement, which is purely within the reach of EFT. But we do not know how. \section{Summary} Perhaps the most synthetic way to state the result of \cite{Hellerman:2015nra} is by saying that in an euclidean CFT the insertion of a large charge operator produces `around' the insertion point a state that is equivalent to a conformal superfluid. The equivalence is made fully evident by exploiting the mapping of the theory to the cylinder. A consequence of this result is that, while the lowest dimension operator of given charge corresponds to the superfluid ground state at fixed charge density, the operators of higher dimension must be in one-to-one correspondence with the superfluid excitations. The latter range from states with finite spin involving a finite number of phonons to states involving vortices whose spin scales with the charge \cite{vortices1}. This result is remarkable but, as stated in its generality, it is a bit abstract, not very tangible. In this paper, considering weakly coupled theories, where the operator spectrum can also be constructed using standard perturbation theory, we made that operator correspondence more tangible. In practice we considered Wilson-Fisher $U(1)$ invariant fixed points and focussed on charge-$n$ operators, like $\phi^n$, with $n\gg 1$. By constructing the operators we have shown how their spectrum automatically exhibits the structure of the Fock space of phonon excitations in the superfluid. As our question mainly concerned counting and structure, we obtained the above result by focussing on the simplest case of free field theory. In that case the spectrum can be fully and exactly worked out in terms of elementary fields and their derivatives, and yet, for large $n$, one can still describe it in terms of superfluid excitations in a systematic $1/n$ expansion. By conformal invariance, operator multiplets are fully classified by their primary operator. We have identified polynomials in fields and derivatives that play the role of building blocks in the systematic construction of the primaries. These building blocks are themselves primaries and are labelled by spin $\ell$ and by a discrete label taking two values, $\mathcal A$ and $\mathcal B$. At a given spin $\ell$, a building block is fully determined by its highest weight element, carrying $J_3=\ell$. For instance the highest weight $\mathcal A$-blocks are given by products involving only $\phi$ (no $\bar \phi$) and $\partial_-$ derivatives of the form\footnote{This expression is equivalent to (\ref{eq:define_C_atomic}) with an abuse of notation, since here we do not distinguish between the full field $\phi$ or its creation part.} \begin{equation} \mathcal A_{\ell,\ell}\equiv \sum_{k=0}^{\ell-1} \alpha_k \phi^{\ell -k-1}(\partial_-\phi)^k(\partial_-^{\ell-k} \phi) \end{equation} with the $\alpha_k$ some given coefficients. The lower $J_3$ elements of the block $\mathcal A_{\ell, m}$ ($m=-\ell,-\ell+1,\dots, \ell$) are trivially obtained by acting with the lowering spin operator $J_-$. The $\mathcal A$ blocks are only defined for $\ell\geq 2$. Indeed the $\mathcal A_{00}$-block would be trivially proportional to the identity operator, while the $\ell=1$ block would be proportional to $\partial_-\phi$, and as such it would be a descendant not commuting with the special conformal generators. $\mathcal B$-blocks have instead the form \begin{equation} \mathcal B_{\ell,\ell}\equiv \sum_{k=0}^\ell \beta_k \phi^{\ell -k+1}(\partial_-\phi)^k(\partial_-^{\ell-k} \bar \phi) \end{equation} with suitable $\beta_k$. Unlike the $\mathcal A$ blocks, the $\mathcal B$-blocks are defined for all $\ell\geq 0$. In particular $\mathcal B_{0,0}= \phi\bar \phi$. Notice that the blocks have charge $\ell$, thus their charge equals their spin. By a combinatoric argument, we have then proven that all primaries of spin bounded by the charge are obtained by taking products of $\mathcal A$ and $\mathcal B$ blocks, corresponding to expressions of the form \begin{equation} \phi^{n-n_\mathcal A-n_\mathcal B} \left (\prod_{\alpha} \mathcal A_{\ell_\alpha, m_\alpha}\right ) \left (\prod_{\beta} \mathcal B_{\tilde \ell_\beta, \tilde m_\beta}\right ), ~~~ n_\mathcal A=\sum_\alpha \ell_\alpha, ~~~ n_\mathcal B=\sum_\beta \tilde \ell_\beta. \end{equation} These expressions are well defined as long as $n-n_\mathcal A-n_\mathcal B\geq 0$. In particular, they are well defined in the limit $n\to \infty$ while keeping the total number of derivatives and powers of $\bar \phi$ finite. This result for the operator spectrum is precisely in one-to-one correspondence with the Fock space of hydrodynamic modes around the superfluid solution. The latter consists of $A$ and $B$-type modes of all possible spins. However $A$-modes of spin $\ell =0$ and $\ell =1$ should not be considered when constructing operators of fixed charge. The former excitations do not correspond to Fock states, and simply give rise to operators that interpolate between subspaces of given charge. The latter are seen to correspond to descendants. Our building blocks allow to explicitly construct all the primary operators whose number of derivatives is bounded by their charge. The spin of these states then satisfy $\ell\leq n$, and we naturally expect a new regime to arise for $\ell >n$. On the other hand, as illustrated in section \ref{sec:primary_phonons}, the structure of the building blocks displays also a subtle change of regime at the smaller value $\ell\sim \sqrt n$. In the superfluid description the change of regime consists in the fact that for $\ell\ll\sqrt n$, the primary states are approximated by states with a fixed number of phonons, while for $\ell\mathrel{\rlap{\lower3pt\hbox{\hskip0pt$\sim$}\sqrt n$ the primary building blocks involve a significant mixture of states with different numbers of phonons. This indicates that at $\ell \mathrel{\rlap{\lower4pt\hbox{\hskip1pt$\sim$} \sqrt n$ the interactions among phonons become important and that states cannot be described as small fluctuations around a homogeneous superfluid solution. In other words it appears the superfluid description breaks down for $\ell \mathrel{\rlap{\lower4pt\hbox{\hskip1pt$\sim$} \sqrt n$. However, and remarkably, leaving aside the explicit expression of the primaries, it happens that for $\sqrt{n} < \ell < n $ the superfluid description can still be used for computing coordinate (basis) independent quantities, like the expectation value of $\bar \phi \phi$. This fact strongly suggests that in the window $\sqrt{n} < \ell < n $ there exists another hydrodynamic saddle point allowing a more convenient description of the primaries. We have not investigated that, but this is clearly an issue worth further study. As we already said, in this paper we haven't addressed at all what happens for even higher spins, $\ell >n$. However, our combinatoric argument shows that, even in this case, the counting of primary operators (with maximal spin given the number of derivatives) coincides with the counting of superfluid phonon excitations, with the constraint that each phonon's spin be less than the charge. At the moment we cannot say anything in favor of the existence of yet another hydrodynamic saddle point, rendering computations perturbative in this case, but these puzzling facts clearly warrant further investigation. Indeed, in the interacting case the regime of large spin and large charge is expected to be universally described by vortex dynamics as discussed in \cite{vortices1}. The search for different saddles with respect to which expand at large spin in the the free and interacting case seems, unavoidably, the next thing to study. \subsection*{Acknowledgements} We would like to thank Gabriel Cuomo, Brian Henning and Matt Walters for useful discussions. The work of G.B. and R.R. is partially supported by the Swiss National Science Foundation under contract 200020-188671 and through the National Center of Competence in Research SwissMAP. \newpage \appendices \section{Coefficient $\mathcal Y_{\ell m}^{\mu_1\dots\mu_\ell}$\label{sec:appYlmCoeff}} We provide here a few explicit formulas regarding the coefficient defined in (\ref{eq:defineYlmCoeff}). First, we write the spherical harmonics in the basis (\ref{eq:x+-0}) \begin{eqnarray} Y_{\ell m} & = & \label{eq:SphericalCartesian} [-\mathrm{sign}(m)]^m \sqrt{\frac{(2\ell+1)(\ell+m)!(\ell-m)!}{2^{|m|} \, 4\pi }}\sum_{\substack{\alpha_++\alpha_-+\alpha_0=\ell \\ \alpha_+-\alpha_--=m}} \frac{n_+^{\alpha_+} n_0^{\alpha_0} n_-^{\alpha_-} }{(-2)^{\mathrm{min}(\alpha_+,\alpha_-)}\alpha_+!\alpha_0!\alpha_-!} \\ & = & [-\mathrm{sign}(m)]^m \sqrt{\frac{(2\ell+1)(\ell+m)!(\ell-m)!}{2^{|m|} \, 4\pi }}\sum_{k ~ \mathrm{step}~2} ^{\ell-|m|} \frac{n_+^{\frac{\ell+m-k}{2}} n_0^{k} n_-^{\frac{\ell-m-k}{2}} }{(-2)^{\frac{\ell-|m|-k}{2}}\left ( \frac{\ell+m-k}{2} \right )! k ! \left ( \frac{\ell-m-k}{2}\right )!}, \nonumber \end{eqnarray} where the sum over $k$ is taken in steps of 2, starting form $\ell-|m|~\mathrm{mod}~2$. After the integration we get the coefficients \begin{align} \mathcal Y_{\ell m}^{ \overbrace{+ \ldots +}^{\alpha_+} \overbrace{0\ldots0}^{\alpha_0}\overbrace{-\ldots-}^{\alpha_-}} = ~& \delta_{\alpha_+ + \alpha_0+\alpha_- , \ell} \delta_{\alpha_- - \alpha_+, m} \frac{[-\mathrm{sign}(m)]^m \sqrt{\pi} \, (2\ell+1) \sqrt{(\ell+m)!(\ell-m)!}} {\ell !} \nonumber \\ & \times \sum_{k \text{ step } 2}^{\ell-|m|} \frac{(-1)^\frac{\ell-k-|m|}{2}}{2^{\frac{3}{2}\ell - k - \frac{\alpha_0}{2}}} \frac{\Gamma\left ( \frac{k+\alpha_0+1}{2}\right )}{\Gamma \left ( \ell+\frac{3}{2}\right )} \frac{\left ( \ell-\frac{k+\alpha_0}{2}\right )!}{\left ( \frac{\ell+m-k}{2}\right )! k! \left ( \frac{\ell-m-k}{2}\right )!} . \label{eq:YlmCoeffExplicit} \end{align} \section{Explicit expressions for spin 2 and 3 primaries\label{app:spin23}} In this appendix we give explicit expression for spin-$\ell$ primary operators associated to the primary states (\ref{eq:one-phononStateA}) using operator-state correspondence (\ref{eq:aVsDerivatives}). As discussed in section \ref{sec:oppStateCorr}, it is assumed products of (derivatives of) $\phi$ are normal-ordered and evaluated at the origin. The counting of primaries given in (\ref{eq:primaryCount}) indicates there is one primary of spin $\ell=0,2,3$ but none of spin $1$. \paragraph{Spin $0$} is trivial, for we have only one state given by (\ref{eq:chargenspin0}). \paragraph{Spin $1$} Explicitly, we have three states \begin{equation} \left ( a_{00}^\dagger \right ) ^{n-1}a_{1,m}^\dagger | 0 \rangle, \end{equation} which correspond to \begin{eqnarray} \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{1,1} | 0 \rangle & = & -(4 \pi)^{n/2} \phi^{n-1} \partial_- \phi | 0 \rangle, \\ \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{1,0} | 0 \rangle & = & (4 \pi)^{n/2} \phi^{n-1} \partial_0 \phi | 0 \rangle, \\ \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{1,-1} | 0 \rangle & = & (4 \pi)^{n/2} \phi^{n-1} \partial_+ \phi | 0 \rangle. \end{eqnarray} It is straightforward to show using \REf{eq:K0aabb}, \REf{eq:K-aabb} and \REf{eq:K+aabb} that those states, hence operators, are descendants, as we would expect since these operators can be written as derivatives of~$\phi^n$. \paragraph{Spin $2$} We can write two spin-2 operators by combining $n$ fields $\phi$ and two derivatives in a traceless and symmetric way \begin{equation} O^{(2,1)}_{\mu \nu} = \phi^{n-1}\left ( \partial_\mu\partial_\nu \phi - \frac{\delta_{\mu \nu}}{3}\partial^2 \phi \right ), ~~ O^{(2,2)}_{\mu \nu} = \phi^{n-2}\left ( \partial_\mu \phi \partial_\nu \phi - \frac{\delta_{\mu \nu}}{3}( \partial \phi )^2 \right ). \end{equation} One linear combination of these is the spin-two primary. To give examples of primary states with non-maximal $J_3$ eigenvalue, let us repeat the method of section \ref{sec:vacuum_primaries_construction} in this simple case. We consider a state \begin{equation} | n ; 2, 0 \rangle_{A} = \alpha_1 \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{2,0} | 0 \rangle+\alpha_2 \left ( a_{00}^\dagger \right ) ^{n-2} \left ( a^\dagger _{1,0} \right )^2 | 0 \rangle+ \beta_2 \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{1,-1} a^\dagger _{1,1} | 0 \rangle. \end{equation} Acting with $K_\pm$ and $K_0$ we see that this state is primary provided $\alpha_2=\beta_2=-\alpha_1$. It follows from (\ref{eq:aVsDerivatives}) that \begin{eqnarray} | n ; 2, 0 \rangle_{A} & = & (4 \pi)^{n/2}\alpha_1 \left [ \frac{1}{3} \phi^{n-1}\left ( \partial_0^2 \phi - \partial_+\partial_- \phi \right ) - \phi^{n-2}\left ( \partial_0 \phi \partial_0 \phi - \partial_+ \phi \partial_- \phi \right ) \right ]| 0 \rangle \\ & = & (4 \pi)^{n/2}\frac{\alpha_1}{2} \left [ \frac{1}{3} \phi^{n-1}\left ( 3\partial_0^2 \phi - \partial^2 \phi \right ) - \phi^{n-2}\left ( 3 \partial_0 \phi \partial_0 \phi - (\partial \phi)^2 \right ) \right ]| 0 \rangle \\ & = & (4 \pi)^{n/2}\frac{3\alpha_1}{2} \left [ \frac{1}{3} \phi^{n-1}\left ( \partial_0^2 \phi - \frac{1}{3}\partial^2 \phi \right ) - \phi^{n-2}\left ( \partial_0 \phi \partial_0 \phi - \frac{1}{3}(\partial \phi)^2 \right ) \right ]| 0 \rangle \\ & = & (4 \pi)^{n/2}\frac{3\alpha_1}{2} \left ( \frac{1}{3}O^{(2,1)}_{00} - O^{(2,2)}_{00} \right) | 0 \rangle. \end{eqnarray} Hence, we conclude the operator $O_{\mu\nu}^{(2,1)} - 3 O_{\mu\nu}^{(2,2)}$ is primary. \paragraph{Spin $3$} In this case we have an anzatz \begin{eqnarray} | n ; 3, 0 \rangle_{A} & = & \alpha_1 \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{3,0} | 0 \rangle \\ & + &\alpha_2 \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,0} a^\dagger _{1,0} | 0 \rangle+\beta_2 \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,1} a^\dagger _{1,-1} | 0 \rangle+\gamma_2 \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,-1} a^\dagger _{1,1} | 0 \rangle \nonumber \\ & + & \alpha_3 \left ( a_{00}^\dagger \right ) ^{n-3} \left ( a^\dagger _{1,0} \right )^3 | 0 \rangle + \beta_3 \left ( a_{00}^\dagger \right ) ^{n-3} a^\dagger _{1,-1} a^\dagger _{1,1}a^\dagger _{1,0} | 0 \rangle. \nonumber \end{eqnarray} As before, acting with $K$ and imposing that the state be primary we get \begin{eqnarray} | n ; 3, 0 \rangle_{A} & = & (4 \pi)^{n/2}\alpha_1 \left[ \left ( a_{00}^\dagger \right ) ^{n-1} a^\dagger _{3,0} \right. \\ & - &3 \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,0} a^\dagger _{1,0} -\sqrt{2} \left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,1} a^\dagger _{1,-1} -\sqrt{2}\left ( a_{00}^\dagger \right ) ^{n-2} a^\dagger _{2,-1} a^\dagger _{1,1} \nonumber \\ & + & \left. 2 \left ( a_{00}^\dagger \right ) ^{n-3} \left ( a^\dagger _{1,0} \right )^3 + 6 \left ( a_{00}^\dagger \right ) ^{n-3} a^\dagger _{1,-1} a^\dagger _{1,1}a^\dagger _{1,0} \right ] | 0 \rangle, \nonumber \end{eqnarray} which corresponds to \begin{equation} | n ; 3, 0 \rangle_{A} = (4 \pi)^{n/2}\alpha_1 \left ( \frac{1}{6} O^{(3,1)}_{000}-\frac{5}{6} O^{(3,2)}_{000}+5 O^{(3,3)}_{000}\right ) | 0 \rangle= (4 \pi)^{n/2} \frac{ \alpha_1}{6} \left (O^{(3,1)}_{000}-5 O^{(3,2)}_{000}+30 O^{(3,3)}_{000}\right ) | 0 \rangle, \end{equation} with \begin{eqnarray} O^{(3,1)}_{\mu \nu \lambda} & = & \phi^{n-1} \partial_\mu \partial_\nu \partial_\lambda \phi - \frac{1}{5} \left ( \delta_{\mu \nu} \partial_\lambda+ \delta_{\mu \lambda} \partial_\nu + \delta_{\lambda \nu} \partial_\mu \right ) \partial^2 \phi, \\ O^{(3,2)}_{\mu \nu \lambda} & = & \phi^{n-2} \partial_\mu \partial_\nu \phi \partial_\lambda \phi+\phi^{n-2} \partial_\mu \phi \partial_\nu \partial_\lambda \phi + \phi^{n-2} \partial_\nu \phi \partial_\mu \partial_\lambda \phi \\ && - \frac{\delta_{\mu \nu}}{5} \left ( \partial _ \lambda\phi \partial^2 \phi + \partial_\lambda (\partial \phi )^2 \right ) - \frac{\delta_{\mu \lambda}}{5} \left ( \partial _ \nu \phi \partial^2 \phi + \partial_\nu (\partial \phi )^2 \right ) - \frac{\delta_{\lambda \nu}}{5} \left ( \partial _ \mu \phi \partial^2 \phi + \partial_\mu (\partial \phi )^2 \right ), \nonumber \\ O^{(3,3)}_{\mu \nu \lambda} & = & \phi^{n-3} \partial_\mu\phi \partial_\nu\phi \partial_\lambda \phi - \frac{1}{5} \left ( \delta_{\mu \nu} \partial_\lambda \phi+ \delta_{\mu \lambda} \partial_\nu \phi+ \delta_{\lambda \nu} \partial_\mu \phi \right ) (\partial \phi )^2. \end{eqnarray} the three spin-3 operators. We conclude operator $ O^{(3,1)}_{\mu\nu\lambda}-5 O^{(3,2)}_{\mu\nu\lambda}+30 O^{(3,3)}_{\mu\nu\lambda} $ is primary. \section{Counting primaries with $\ell>n$\label{app:counting_primaries}} Here we give several examples of the formula (\ref{eq:primaryCount}) presented in main text. First, let us consider the case of charge 2. We detail the partitions mentioned in the argument of the main text. We do this for the examples of spin 4 and 5: \begin{itemize} \item $\mathrm{Prim}(4,2) =1$ \begin{equation} \begin{array}{l | l || l | l || c} p(4,2)& p(3,2) & p^*(4,2) & p^*(3,2) & \mathrm{Prim}(4,2) \\ \hline (4) & (3) & (1,1,1,1) & (1,1,1) & \times\\ (3,1) & (2,1) & (2,1,1) & (2,1) & \times \\ (2,2) & & \mathbf{(2,2)} & & \checkmark \end{array} \end{equation} \item $\mathrm{Prim}(5,2) =0$ \begin{equation} \begin{array}{l | l || l | l || c} p(5,2)& p(4,2) & p^*(5,2) & p^*(4,2) & \mathrm{Prim}(5,2) \\ \hline (5) & (4) & (1,1,1,1,1) & (1,1,1,1)& \times \\ (4,1) & (3,1) & (2,1,1,1) & (2,1,1) & \times \\ (3,2) &(2,2) & (2,2,1) & (2,2) & \times \end{array} \end{equation} \end{itemize} In general, we find there is one primary operator for even spins and none for odd spins. Let us now give a more involved example with charge 3 and spin 8, resulting in $\mathrm{Prim}(8,3) =2$. \begin{equation} \begin{array}{l | l || l | l || c} p(8,3)& p(7,3) & p^*(8,3) & p^*(7,3) & \mathrm{Prim}(8,3) \\ \hline (8) & (7) & (1,1,1,1,1,1,1,1) & (1,1,1,1,1,1,1) & \times \\ (7,1) & (6,1) & (2,1,1,1,1,1,1) & (2,1,1,1,1,1)& \times \\ (6,2) & (5,2) & (2,2,1,1,1,1) & (2,2,1,1,1) & \times \\ (6,1,1) & (5,1,1) & (3,1,1,1,1,1) & (3,1,1,1,1)& \times \\ (5,3) & (4,3) & (2,2,2,1,1) & (2,2,2,1)& \times \\ (5,2,1) & (4,2,1) & (3,2,1,1,1) & (3,2,1,1) & \times \\ (4,4) & & \mathbf{(2,2,2,2)} & & \checkmark \\ (4,3,1) &(3,3,1) & (3,2,2,1) & (3,2,2) & \times \\ (4,2,2) & (3,2,2) & (3,3,1,1) & (3,3,1) & \times \\ (3,3,2) & &\mathbf{(3,3,2)} & & \checkmark \end{array} \end{equation} For arbitrary spin and charge $n=3$ an explicit expression is given by \begin{equation} \mathrm{Prim}(\ell,3) = \begin{cases} \left \lfloor \frac{\ell}{6} \right \rfloor , & \text{if } \ell = 6p + 1 \text{ for some } p\in \mathbb N, \\ \left \lfloor \frac{\ell}{6} \right \rfloor + 1 , & \text{if } \ell \neq 6p + 1 \text{ for all } p\in \mathbb N . \end{cases} \end{equation} \item In general, the number of primaries can be found from \begin{equation} \sum_{\ell=0}^\infty \mathrm{Prim}(\ell,n) x^\ell = \prod_{k=2}^n \frac{1}{(1-x^k)}. \end{equation} \section{NLO Fock states on non-trivial background\label{sec:appNLOFockStates}} Here we give the next to leading order result for the annihilation operators $B_{\ell m}$ over non-trivial background computed in section \ref{sec:twoFock}. The result uses the Gaunt coefficients defined in \REf{eq:gaunt}. For $\ell=0$: \begin{equation} \label{eq:NLOFockB0} \begin{split} B_{00} ~=~ & \frac{a_{00} b_{00}}{\sqrt{n}} + \frac{6 n b_{00} b_{00}^\dagger -3 a_{00}^2 b_{00}^2 + (a_{00}^\dagger)^2(b_{00}^\dagger)^2 }{8 n^{3/2}} \\ & + \sum_{\substack{\ell > 0 \\ \text{all } m }} \frac{1}{8(1+2\ell)n^{3/2}} \Big( (-1)^m 4 n (1+\ell) a_{\ell m}b_{\ell ,-m} - (-1)^m (3+2\ell)a_{00}^2 b_{\ell m} b_{\ell,-m} \\[-15pt] & \hspace{110pt} - (-1)^m (1+2\ell)(a_{00}^\dagger)^2 a_{\ell m} a_{\ell,-m} - (1+4\ell ) 2 n a_{\ell m} a_{\ell,m}^\dagger \\ & \hspace{110pt} - 4 a_{00}^2 b_{\ell m} a_{\ell m}^\dagger + (-1)^m (-1+2\ell ) a_{00}^2 a_{\ell m}^\dagger a_{\ell,-m}^\dagger \\ & \hspace{110pt} - (-1)^m 4 n \ell a_{\ell m}^\dagger b_{\ell,-m}^\dagger + 2 n (3+4\ell) b_{\ell m} b_{\ell m}^\dagger \\ & \hspace{110pt} + (-1)^m (1+2\ell) (a_{00}^\dagger)^2 b_{\ell m}^\dagger b_{\ell,-m}^\dagger \Big)\,. \end{split} \end{equation} For $\ell>0$: \begin{equation} \label{eq:NLOFockB} \begin{split} B_{\ell m} =\ & \frac{a_{00} b_{\ell m}}{\sqrt{n}} + \frac{1}{4(1+2\ell)n^{3/2}} \Big( (3+4\ell) \big( nb_{00}^\dagger -b_{00}a_{00}^2 \big)b_{\ell m} + 2 n b_{00} a_{\ell m} \\ & \hspace{124pt} + (-1)^{m} \big( (3+2\ell) n b_{00} + (1+2\ell) b_{00}^\dagger (a_{00}^\dagger)^2 \big)b_{\ell,-m}^\dagger \\ & \hspace{124pt} - (-1)^m \big( 2\ell n b_{00}^\dagger + 2(1+\ell) b_{00} a_{00}^2\big) a_{\ell,-m}^\dagger \Big) \\ & + \!\!\!\! \sum_{\substack{\ell_1,\ell_2 > 0 \\ \text{all } m_1,m_2 }} \!\!\!\! \frac{(-1)^m\sqrt{\pi}C_{-m,m_1,m_2}^{\ell,\ell_1,\ell_2}}{8\sqrt{2\omega_\ell\omega_{\ell_1}\omega_{\ell_2}}n^{3/2}} \Big( -(3+3\ell+\ell_1+\ell_2) a_{00}^2 b_{\ell_1,m_1}b_{\ell_2,m_2} \\[-20pt] & \hspace{145pt} + 2 (2+\ell+3\ell_1-\ell_2) n b_{\ell_1,m_1} a_{\ell_2,m_2} \\ & \hspace{145pt} + 2 (-1)^{m_2}(3+\ell+3\ell_1+\ell_2) n b_{\ell_1,m_1}b_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - (1-\ell+\ell_1+\ell_2) (a_{00}^\dagger)^2 a_{\ell_1,m_1}a_{\ell_2,m_2} \\ & \hspace{145pt} + 2 (-1)^{m_1}(\ell+\ell_1-\ell_2) (a_{00}^\dagger)^2 b_{\ell_1,-m_1}^\dagger a_{\ell_2,m_2} \\ & \hspace{145pt} + (-1)^{m_1+m_2}(1+\ell+\ell_1+\ell_2)(a_{00}^\dagger)^2 b_{\ell_1,-m_1}^\dagger b_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - 2 (-1)^{m_2} (2+3\ell+\ell_1-\ell_2) a_{00}^2 b_{\ell_1,m_1} a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - 2(-1)^{m_2} (1-\ell+\ell_1+3\ell_2) n a_{\ell_1,m_1}a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} + 2 (-1)^{m_1+m_2} (\ell+\ell_1-3\ell_2) n b_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \\ & \hspace{145pt} - (-1)^{m_1+m_2}(1+3\ell-\ell_1-\ell_2)a_{00}^2 a_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \Big) \,. \end{split} \end{equation} \section{Some asymptotics of Gaunt coefficients \label{app:Gaunt}} In the main text we are interested in the expansion at large $\ell$ of $A_{\ell\ell}^\dagger$. Thus we provide here some formulas for the asymptotics of relevant Gaunt coefficients. We use special cases of $3j$ symbols \cite{dlmf:3j} \begin{equation} \begin{pmatrix} \ell & \ell_1 & \ell_2 \\ 0 & 0 & 0 \end{pmatrix} = \begin{cases} 0 & L \text{ odd}, \\ (-1)^\frac{L}{2} \sqrt{\frac{(L-2\ell)!(L-2\ell_1)!(L-2\ell_2)!}{(L+1)!}} \frac{\left(\frac{L}{2}\right)!}{\left(\frac{L-2\ell}{2}\right)!\left(\frac{L-2\ell_1}{2}\right)!\left(\frac{L-2\ell_2}{2}\right)!} & L \text{ even}, \end{cases} \end{equation} \begin{equation} \begin{pmatrix} \ell & \ell_1 & \ell_2 \\ \ell & -\ell -m_2 & m_2 \end{pmatrix} = (-1)^{\ell-\ell_1-m_2} \sqrt{ \frac{(2\ell)! (L-2\ell)! (\ell+\ell_1+m_2)! (\ell_2-m_2)!}{(L+1)! (L-2\ell_1)! (L-2\ell_2)! (-\ell+\ell_1-m_2)!(\ell _2+m_2)!}}, \end{equation} where $L = \ell+\ell_1+\ell_2$. We can use Stirling formula to estimate these at large spin. If we consider $\ell$ to be large, due to triangle inequality \REf{eq:triangle}, at least one of $\ell_1,\ell_2$ has to be of order $\ell$. If we assume $\ell_1\sim \ell_2\sim \ell$, we have \begin{equation} \label{eq:gauntL1L2} \frac{ C_{\ell,-\ell-m_2,m_2}^{\ell,\ell_1,\ell_2} }{\sqrt{\omega_\ell \omega_{\ell_1} \omega_{\ell_2}}} \overset{\substack{\ell_1\sim \ell_2\sim \ell \\ \ell\to \infty}}{\longrightarrow} g_1\!\!\left( \frac{\ell_1}{\ell},\frac{\ell_2}{\ell},\frac{m_2}{\ell}\right)^{\ell/2} h_1\!\!\left(\frac{\ell_1}{\ell},\frac{\ell_2}{\ell},\frac{m_2}{\ell}\right) \left( \ell^{-7/4} + \mathcal O(\ell^{-11/4})\right) , \end{equation} where \begin{equation} g_1 (x,y,z) = \frac{ 4 (-1)^{1-x+y-2z} (x+y-1)^{x+y-1} (x+z+1)^{x+z+1} (y-z)^{y-z} }{ (x-y+1)^{x-y+1} (y-x+1)^{y-x+1} (x+y+1)^{x+y+1} (x-z-1)^{x-z-1} (y+z)^{y+z} } , \end{equation} whose absolute value is bounded by $1$ and for each pair $x,y$ there is one unique $z$ such that $|g_1(x,y,z)| = 1$, namely $z = \frac{x^2-y^2-1}{2}$, and \begin{equation} h_1 (x,y,z) = \frac{2}{\pi^{5/4}} \frac{ (y-z)^{1/4} (1+x+z)^{1/4} }{ (x-y+1)^{1/2} (y-x+1)^{1/2} (x+y+1) (x-z-1)^{1/4} (y+z)^{1/4} } . \end{equation} Hence, for each $\ell_1,\ell_2$ there is only one $m_1,m_2$ for which the coefficient is not exponentially suppressed, and for that choice \REf{eq:gauntL1L2} is of order $\ell^{-7/4}$. On the other hand if we assume $(\ell_1-\ell) \sim \ell_2 \sim 1$ (the case $\ell_2\sim \ell, \ell_1\sim 1$ will of course give similar result) \begin{equation} \label{eq:gauntL1} \frac{ C_{\ell,-\ell-m_2,m_2}^{\ell,\ell_1,\ell_2} }{\sqrt{\omega_\ell \omega_{\ell_1} \omega_{\ell_2}}} \overset{\substack{\ell_1\sim \ell \\ \ell\to \infty}}{\longrightarrow} (-1)^\ell h_2\!\left(\ell_1-\ell,\ell_2,m_2 \right) \ell^{\frac{\ell-\ell_1+m_2}{2}} \left( \ell^{-1} + \mathcal O(\ell^{-2})\right) , \end{equation} with \begin{equation} h_2 (x,y,z) = \frac{ (-1)^\frac{x+y-2z}{2} 2^\frac{x-2y+z-1}{2} (y-x)! \sqrt{(y-z)!} }{ \sqrt{\pi} \left(\frac{x+y}{2}\right)! \left(\frac{y-x}{2}\right)! \sqrt{(y+z)!(-x-z)!} } . \end{equation} Hence \REf{eq:gauntL1} is least suppressed in case $m_2 = \ell_1 - \ell$ which is its maximum allowed value since $|m_1| = |-\ell-m_2| \leq \ell_1$, and in that case \REf{eq:gauntL1} is of order $\ell^{-1}$. \section{Norm of an excited state\label{sec:observable2_estimate}} To furnish one more example of the perturbative expansion of quantities involving spinning charged states discussed in \ref{sec:observable1_estimate}, we now consider the computation of $\langle n | A_{\ell\ell} A_{\ell\ell}^\dagger |n\rangle$. This is equivalent to the norm of the state $A_{\ell\ell}^\dagger | n \rangle$. Writing the state as a power series in $n^{-1/2}$ \begin{equation} A_{\ell\ell}^\dagger | n \rangle = |\Psi_0 \rangle + \frac{1}{\sqrt{n}} |\Psi_1\rangle + \frac{1}{n} |\Psi_2 \rangle + \dots \end{equation} we have from \REf{eq:NLOFockA} \begin{equation} \label{eq:A_ll_n_terms} \begin{split} & |\Psi_0\rangle = \frac{a_{00}a_{\ell\ell}^\dagger}{\sqrt{n}} | n \rangle \\ & |\Psi_1\rangle = \Bigg(\frac{(1+4 \ell) b_{00}^\dagger a_{\ell\ell}^\dagger }{4(1+2\ell)} + \!\!\!\! \sum_{\substack{\ell_1,\ell_2 > 0 \\ \text{all } m_1,m_2}} \!\!\!\! \frac{(-1)^m\sqrt{\pi}C_{\ell,m_1,m_2}^{\ell,\ell_1,\ell_2}}{8\sqrt{2\omega_\ell\omega_{\ell_1}\omega_{\ell_2}}n} \Big( - (2+3\ell+\ell_1+\ell_2) a_{00}^2 a_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \\ & \hspace{240pt} + 2 (1+\ell-\ell_1+3\ell_2)n b_{\ell_1,-m_1}^\dagger a_{\ell_2,-m_2}^\dagger \\ & \hspace{240pt} + (\ell-\ell_1-\ell_2)(a_{00}^\dagger)^2 b_{\ell_1,-m_1}^\dagger b_{\ell_2,-m_2}^\dagger \Big) \Bigg) |n\rangle \end{split} \end{equation} since many terms vanish when applied to $|n\rangle$. We have not computed $|\Psi_2\rangle$ as this would require the NNLO expression for $A_{\ell\ell}^\dagger$. It is easy to see that $\langle \Psi_0 | \Psi_0 \rangle = 1$ and $\langle \Psi_0 | \Psi_1 \rangle = 0$. Thus order $n^{-1}$ correction to the norm is given by \begin{equation} || A_{\ell\ell}^\dagger | n \rangle ||^2 = 1 + \frac{1}{n} \big( \langle \Psi_1 | \Psi_1 \rangle + \langle \Psi_0 | \Psi_2 \rangle + \langle \Psi_2 | \Psi_0 \rangle \big) . \end{equation} We cannot directly evaluate the last two terms, but we can analyze the term $\langle \Psi_1 | \Psi_1 \rangle$. This will be given by an infinite sum over spins such as $\ell_1,\ell_2$, which we have no reason to expect will converge. Hence $\langle \Psi_0 | \Psi_2\rangle$ has to be an infinite sum as well, such that its divergent part cancels with that of $\langle \Psi_1 | \Psi_1 \rangle$. The order of magnitude of the spins for which the cancellation starts taking effect can only be the only characteristic spin of the problem : $\ell$. We can thus estimate that the tails of both sums will cancel when summed spins are greater than $\ell$. In other words, the behavior of both sums can be approximated, barring some unexpected cancellations, by estimating the behavior of the sum in $\langle \Psi_1|\Psi_1\rangle$ with a cutoff of order $\ell$. We observe all terms of $|\Psi_1\rangle$ in \REf{eq:A_ll_n_terms} are orthogonal to each other, so we must estimate the norm of these individual terms in the limit of large summed spins $\ell_1,\ell_2$. First, we consider $\ell_1 \sim \ell_2 \sim \ell$. We estimate the contribution of such terms to the norm as \begin{equation} \label{eq:divergentSumL1L2} \sum_{\substack{ \ell_1 \sim \ell_2 \sim \ell \\ m_1, m_2}} \frac{ \left| C_{\ell,m_1, m_2}^{\ell, \ell_1, \ell_2} \right|^2 }{ \omega_\ell \omega_{\ell_1} \omega_{\ell_2}} (\ell^2) \sim \sum_{ \ell_1 \sim \ell_2 \sim \ell} \left( \ell^{-7/4} \right)^2 (\ell^2) \sim \ell^2 \times \ell^{-3/2} \sim \ell^{1/2} , \end{equation} Let us briefly explain this estimation. We start with a single sum because of the orthogonality of terms in \REf{eq:A_ll_n_terms}, and the summand is the square of the coefficient in that equation. Then, as observed in the appendix \ref{app:Gaunt}, in this regime there is only one choice of $m_1,m_2$ which give a non-suppressed term (\ref{eq:gauntL1L2}). Finally, the double sum over $\ell_1,\ell_2$ yields an additional $\ell^2$ factor. Secondly, we consider the case $ \ell_1 - \ell \sim \ell_2 \sim 1$. Again, there is only one choice of $m_1,m_2$ that yields the dominant term (\ref{eq:gauntL1}). Neglecting other terms, we estimate the contribution to the norm as \begin{equation} \sum_{\substack{ \ell_1 \sim \ell, \ell_2\sim 1 \\ m_1, m_2}} \frac{ \left| C_{\ell,m_1, m_2}^{\ell, \ell_1, \ell_2} \right|^2 }{\ell \ell_1 \ell_2} (\ell^2) \sim \sum_{ \ell_1 \sim \ell, \ell_2\sim 1} \left( \ell^{-1} \right)^2 (\ell^2) \sim \ell \times 1 \sim \ell , \end{equation} where in the second estimation the sum yields a single $\ell$ factor since only $\ell_1$ is summed up to order $\ell$. We notice this contribution is dominating that of \REf{eq:divergentSumL1L2}. Evidently the case $ \ell_2 - \ell \sim \ell_1 \sim 1$ gives an equal contribution. Therefore, the series expansion of the norm is estimated schematically as \begin{equation} || A_{\ell\ell}^\dagger |n\rangle ||^2 \sim 1 + \frac{\ell+ \dots}{n} + \mathcal O(n^{-2}). \end{equation} where the dots represent terms which are subdominant at large $\ell$. We see the result is again expressed as a series in $\frac{\ell}{n}$. \section{Propagator on the cylinder\label{sec:propagatorCylinder}} In this section we show how to construct propagators corresponding to fluctuations of fields $r,\pi$ in \REf{eq:rpiDYExpansion}. From time translation and rotation symmetry we know the propagator can be written as \begin{equation} \langle x(\tau_1,\vec n_1) y(\tau_2, \vec n_2) \rangle = D_{x y}(\tau_1-\tau_2,\vec n_1 \cdot \vec n_2) , \end{equation} where $x,y \in \{ r,\pi\}$ are fields and $\langle \dots \rangle$ is the $\tau$-ordered Wick contraction. The quadratic Lagrangian yields a matrix equation similar to \REf{eq:EOMFluctuationsMatrix} for the propagator (note that it is not diagonal due to mixing between $\dot \pi$ and $r$) \begin{equation} -\left( \begin{array}{cc} \partial_\tau^2+\Delta_{\mathbb S^{d-1}} -M^2 & 2i \mu \partial_\tau \\ -2i \mu \partial_\tau & \partial_\tau^2+\Delta_{\mathbb S^{d-1}} \end{array} \right) \left( \begin{array}{cc} D_{rr} & D_{r\pi} \\ D_{\pi r} & D_{\pi \pi} \end{array} \right) = \delta(\tau_1-\tau_2)\delta^{(\mathbb S^{d-1})}(\vec n_1 \cdot \vec n_2 ). \end{equation} Expanding in spherical harmonics (in this case only with $\vec m = \vec 0$, which corresponds to Gegenbauer polynomials) \begin{equation} D(\tau,\vec n_1 \cdot \vec n_2) = \sum_\ell F^{(\ell)}(\tau) C^{(d/2-1)}_\ell(\cos (\vec n_1 \cdot \vec n_2)), \label{eq:PropagatorGeneralP} \end{equation} we obtain \begin{equation} \label{eq:ModePropagator} -N_\ell \Omega_{d-2} \left( \begin{array}{cc} \partial_\tau^2-J_\ell -M^2 & 2i \mu \partial_\tau \\ -2i \mu \partial_\tau & \partial_\tau^2-J_\ell \end{array} \right) \left( \begin{array}{cc} F^{(\ell)}_{rr} & F^{(\ell)}_{r\pi} \\ F^{(\ell)}_{\pi r} & F^{(\ell)}_{\pi \pi} \end{array} \right) = C_\ell^{(d/2-1)}(1)\delta(\tau), \end{equation} where the normalization factor of Gegenbauer polynomials is given by \begin{equation} N_\ell \int_{-1}^1 C^{(d/2-1)}_{\ell}(x) C^{(d/2-1)}_{\ell}(x) (1-x^2)^{\frac{d-3}{2}} d x =\frac{2^{4-d} \, \pi \, \Gamma(\ell+d-2)}{(2\ell+d-2) \, \ell! \, \Gamma^2\left (\frac{d}{2}-1\right )}. \end{equation} We can look for solutions of this equations for $\tau <0$ and $\tau >0$, which will be given by expressions similar to \REf{eq:rpiDYExpansion}, and then find the propagator by matching this solutions at $\tau =0$ with a specific discontinuity of derivatives. Alternatively, we can Fourier transform \REf{eq:ModePropagator} and use (see~\cite{dlmf:Gegenbauers}, table 18.6.1) \begin{equation} C^{(d/2-1)}_{\ell}(1) = \frac{\Gamma(\ell+d-2)}{\ell! \, \Gamma(d-2)}, \label{eq:Gegenbauer1} \end{equation} to obtain \begin{equation} F^{(\ell)}(\tau) = \frac{2\ell+d-2}{(d-2)\Omega_{d-1}}\int \frac{d \omega}{2\pi} e^{-i \omega \tau} \frac{M^{(\ell)}(\omega)} {(\omega^2+\omega_B^2(\ell))(\omega^2+\omega_A^2(\ell))}, \label{eq:momentumPropagator} \end{equation} with \begin{equation} M^{(\ell)} (\omega)=\left( \begin{array}{cc} \omega^2+J_\ell & 2 \mu \omega \\ -2 \mu \omega & \omega^2+J_\ell +M^2 \end{array} \right). \end{equation} For $\ell\neq 0$ integration in \REf{eq:momentumPropagator} can be easily done using Cauchy's theorem, resulting in \begin{equation} F^{(\ell)}(\tau)=\frac{2\ell+d-2}{(d-2)\Omega_{d-1}} \, \left ( \frac{M^{(\ell)}(-i\omega_A(\ell))e^{-\omega_A (\ell)\tau}}{2\omega_A(\ell)} - \frac{M^{(\ell)}(-i\omega_B(\ell))e^{-\omega_B(\ell) \tau}}{2\omega_B(\ell)} \right ) \frac{1}{\omega_B^2(\ell)-\omega_A^2(\ell)} \label{eq:propagatorJneq0tau>0} \end{equation} for $\tau>0$, and \begin{equation} F^{(\ell)}(\tau)=\frac{2\ell+d-2}{(d-2)\Omega_{d-1}} \, \left ( \frac{M^{(\ell)}(i\omega_A(\ell))e^{\omega_A(\ell) \tau}}{2\omega_A(\ell)} - \frac{M^{(\ell)}(i\omega_B(\ell))e^{\omega_B(\ell) \tau}}{2\omega_B(\ell)} \right ) \frac{1}{\omega_B^2(\ell)-\omega_A^2(\ell)} \label{eq:propagatorJneq0tau<0} \end{equation} for $\tau<0$. The same result can obviously be obtained directly from \REf{eq:rpiDYExpansion}. Indeed, say for $\tau_1<\tau_2$ computing non-zero spin contribution to time ordered correlator we get \begin{equation} \langle n | r(\tau_2) r(\tau_1) | n\rangle_\ell = \left ( \frac{J_\ell-\omega_A(\ell)^2}{2\omega_A(\ell)}e^{-\omega_A(\ell)|\tau_2-\tau_1|} + \frac{\omega_B(\ell)^2-J_\ell}{2\omega_B(\ell)}e^{-\omega_B(\ell)|\tau_2-\tau_1|}\right ) \frac{1}{\omega_B^2(\ell)-\omega_A^2(\ell)} \sum_{\vec m} Y_{\ell\vec m} Y^*_{\ell\vec m}, \end{equation} which upon using \REf{eq:Gegenbauer1} and (see~\cite{Frye:2012jj}) \begin{equation} \sum_{\vec m} Y_{\ell\vec m} (\vec n_1)Y^*_{\ell\vec m} (\vec n_2)= \frac{2\ell+d-2}{(d-2) \Omega_{d-1}}C_\ell^{(d/2-1)} (\vec n_1 \cdot \vec n_2) \end{equation} reproduces \REf{eq:propagatorJneq0tau>0}. Similarly, we can compute $r\pi$ and $\pi \pi$ components of the propagator. Dealing with $\ell=0$ modes is somewhat more subtle. The difficulty is that apart from the gapped mode corresponding to $(B_{0\vec0}, B_{0\vec0}^\dagger)$ there is also the gapless mode $\hat \pi, p_\pi$, for which \begin{equation} p_\pi | n \rangle = 0, \end{equation} and which does not have the Fock space structure. It does not present a problem for $\langle r r \rangle$, indeed using \REf{eq:rpiDYExpansion} we get \begin{equation} \langle n | r(\tau_2) r(\tau_1) | n \rangle_0 = \frac{1}{2\omega_B(0)\Omega_{d-1}} e^{-\omega_B(0) |\tau_2-\tau_1|}, \end{equation} which is consistent with \REf{eq:propagatorJneq0tau>0} and \REf{eq:propagatorJneq0tau<0}. On the other hand considering correlators linear in $\pi$ is problematic. However, that is not an issue, for in all instances the field $\pi$ appears only in the exponent\footnote{Bear in mind that $\hat \pi$ is defined on a compact space (circle), since charge is quantized. As such, the corresponding canonical momentum $ p_\pi$ is defined only on the space of periodic functions. Otherwise $p_\pi$ is not Hermitian. Indeed, the following relation holds \begin{equation} \int_0^{2\pi} d\hat \pi \psi_2^*(\hat \pi) \left [-i \partial_{\hat \pi} \psi_1(\hat \pi) \right ] = \int_0^{2\pi} d\hat \pi (\hat \pi) \left [-i \partial_{\hat \pi}\psi_2\right ]^* \psi_1(\hat \pi), \end{equation} only if $\psi_2(\hat \pi) \psi_1(\hat \pi)\Big |_{0}^{2\pi}=0$, i.e. for periodic functions $\psi_i(\hat\pi)$.}, hence, we need only to worry about correlators involving $e^{i \pi(\tau)/f}$. For instance, using Baker-Campbell-Hausdorff formula we obtain (for $\tau<0$) \begin{equation} \langle e^{-i\pi(0)/f} e^{i \pi(\tau)/f} \rangle _0 =\exp \left [ -\frac{1-\frac{4\mu^2}{\omega_B^2(0)}}{2\Omega_{d-1}f^2} \tau \right ] \exp \left [ \frac{1}{f^2}\frac{4\mu^2}{\omega_B^2(0)} \frac{e^{\omega_B(0)\tau}-1}{2\omega_B(0)\Omega_{d-1}}\right ]. \end{equation} Comparing with the naive expectation \begin{equation} \langle e^{-i\pi(0)/f} e^{i \pi(\tau)/f} \rangle _0 = 1+ \frac{D^{(0)}_{\pi\pi}(|\tau|)-D^{(0)}_{\pi\pi}(0)}{f^2}+O(f^{-4}), \end{equation} it is consistent to define (compare with \REf{eq:propagatorJneq0tau>0} and \REf{eq:propagatorJneq0tau<0}) \begin{equation} F^{(0)}_{\pi \pi}(\tau) = -\frac{1-\frac{4\mu^2}{\omega_B^2(0)}}{2\Omega_{d-1}} |\tau| + \frac{4\mu^2}{\omega_B^2(0)} \frac{e^{-\omega_B(0)|\tau|}}{2\omega_B(0)\Omega_{d-1}}+\mathrm{const} . \end{equation} Similarly, computing $\langle e^{-i\pi(0)/f} r(0) e^{i \pi(\tau)} \rangle _0$ allows to define \begin{equation} F^{(0)}_{r \pi}(\tau) = \mathrm{sign}(\tau)\frac{i\mu}{\omega_B^2(0)} \frac{e^{-\omega_B(0)|\tau|}}{\Omega_{d-1}}+\mathrm{const} . \end{equation} \section{3-pt function computation \label{app:lambdaComputation}} We start from \begin{equation} \left \langle n | \left ( \bar \phi \phi \right )(0, \hat n_{d}) | n \right \rangle = \frac{1}{2} \left \langle n | f^2 + 2 f r (0, \hat n_{d}) + r^2(0, \hat n_{d}) | n \right \rangle, \end{equation} and then expanding around the saddle, we get for the expectation value of bare fields \begin{equation} \left \langle n | \left ( \bar \phi \phi \right )(0, \hat n_{d}) | n \right \rangle = \frac{f^2}{2} - \left \langle r(0, \hat n_{d}) \int d\tau d \Omega_{d-1}\left [ r (\partial\pi)^2 - i \mu r^2 \dot \pi +\frac{\lambda f^2 r^3}{4} \right ] \right \rangle + \frac{1}{2}\langle r^2(0, \hat n_{d}) \rangle. \end{equation} Here as in section \ref{sec:3ptfunction}, the symbol $\mu$ refers to $\mu_4(\lambda n,d)$. One must keep in mind that $m$, $f$, $J_\ell$, $\omega_{A,B}(\ell)$ are functions of $d$, $\mu$ and $n$. The symbol $\mu_*$ will refer to $\mu_4(\lambda_* n,4)$. Using the propagator \REf{eq:PropagatorGeneralP} we compute contractions for terms without spatial derivatives \begin{eqnarray} \int \langle r_1 r_2^3\rangle & \to & 3\Omega_{d-1} \left [ \int d\tau F^{(0)}_{rr} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty F^{(\ell)}_{rr} (0) C_\ell^{(d/2-1)}(1) \right], \\ \int \langle r_1 \dot \pi_2 r_2^2\rangle & \to & 2\Omega_{d-1} \left [ \int d\tau F^{(0)}_{rr} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty \dot F^{(\ell)}_{\pi r} (0) C_\ell^{(d/2-1)}(1) \right] \nonumber \\ && +\Omega_{d-1} \left [ \int d\tau \dot F^{(0)}_{\pi r} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty F^{(\ell)}_{rr} (0) C_\ell^{(d/2-1)}(1) \right], \\ \int \langle r_1 \dot \pi_2^2 r_2\rangle & \to & 2 \Omega_{d-1} \left [ \int d\tau \dot F^{(0)}_{\pi r} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty \dot F^{(\ell)}_{\pi r} (0) C_\ell^{(d/2-1)}(1) \right] \nonumber \\ && -\Omega_{d-1} \left [ \int d \tau F^{(0)}_{r r} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty \ddot F^{(\ell)}_{\pi \pi} (0) C_\ell^{(d/2-1)}(1) \right], \\ \int \langle r^2\rangle & \to & \left [ \sum_{\ell=0}^\infty F^{(\ell)}_{rr} (0) C_\ell^{(d/2-1)}(1) \right], \end{eqnarray} where indices indicate evaluation point, for example $r_1 = r(\tau_1, \vec n_1)$. For the last term \begin{equation} \int g^{ij}_2\langle r_1 \partial_i \pi_2 \partial_j \pi_2 r_2\rangle \end{equation} in order to find contraction of two fields at the same point it is necessary to introduce a splitting, compute derivative(s) and then consider the limit. For example \begin{equation}\label{eq:contractionSpatialDerivatives1} \langle r_2 \partial_i \pi_2\rangle = \lim_{\vec n_2'\to \vec n_2} \partial'_i D_{\pi r}(0,\vec n_2'\cdot \vec n_2) = 0, \end{equation} which vanishes since $\vec n_2'\cdot \vec n_2$ is maximal for $\vec n_2' = \vec n_2$. Similarly, using the chain rule and the same argument we show \begin{equation}\label{eq:contractionSpatialDerivatives2} \langle \partial_i \pi_2 \partial_j \pi_2\rangle = \lim_{\vec n_2'\to \vec n_2} \partial'_i \partial_j D_{\pi \pi}(0,\vec n_2'\cdot \vec n_2) = \left. \frac{d}{d x} D_{\pi\pi}(0,x) \right|_{x= 1} (\partial_i \vec n_2) \cdot(\partial_j \vec n_2) \end{equation} and one can show, for example by choosing specific coordinates on the sphere, that \begin{equation} g_2^{ij} (\partial_i \vec n_2) \cdot(\partial_j \vec n_2) = d-1. \end{equation} Using (see~\cite{dlmf:Gegenbauers}, eq. (18.9.19)) \begin{equation} \label{eq:dGegenbauer} \frac{d}{dx} C_n^{(\lambda)}(x) = 2\lambda C_{n-1}^{(\lambda+1)}(x), \end{equation} and (see \REf{eq:Gegenbauer1}) \begin{equation} C_{\ell-1}^{(d/2)}(1)= C_\ell^{(d/2-1)}(1) \, \frac{J_\ell}{(d-1)(d-2)}, \end{equation} we obtain \begin{equation} \int g^{ij}_2\langle r_1 \partial_i \pi_2 \partial_j \pi_2 r_2\rangle \to \Omega_{d-1} J_\ell \left [ \int d \tau F^{(0)}_{r r} (\tau)\right ] \, \left [ \sum_{\ell=0}^\infty F_{\pi \pi}^{(\ell)}(0)C_\ell^{(d/2-1)}(1) \right]. \end{equation} For what follows we will need explicit expressions \begin{eqnarray} F_{rr}^{(0)}(\tau) & = & \frac{e^{-\omega_B(0) |\tau|}}{2 \Omega_{d-1}\,\omega_B(0)}, \\ \dot F_{\pi r}^{(0)}(\tau) & = & \frac{i \mu \, e^{-\omega_B(0) |\tau|}}{\Omega_{d-1} \, \omega_B(0)}, \\ F_{r r}^{(0)}(0) & = & \frac{1}{2 \Omega_{d-1}\,\omega_B(0)}, \\ F_{r r}^{(\ell)}(0) & = & \frac{2\ell+d-2}{\Omega_{d-1}(d-2)} \frac{\omega_B(\ell)\omega_A(\ell)+J_\ell}{2\omega_B(\ell)\omega_A(\ell) \left[ \omega_B(\ell)+\omega_A(\ell) \right ]}, ~~ {\ell \neq 0}, \\ F_{\pi \pi}^{(\ell)}(0) & = & \frac{2\ell+d-2}{\Omega_{d-1}(d-2)} \frac{\omega_B(\ell)\omega_A(\ell)+J_\ell+2(\mu^2-m^2)}{2\omega_B(\ell)\omega_A(\ell) \left[ \omega_B(\ell)+\omega_A(\ell) \right ]}, ~~ {\ell \neq 0}, \\ \dot F_{\pi r}^{(0)}(0) & = & \frac{i \mu}{ \Omega_{d-1} \, \omega_B(0)}, \\ \dot F_{\pi r}^{(\ell)}(0) & = & \frac{2\ell+d-2}{\Omega_{d-1}(d-2)} \frac{i \mu}{\omega_B(\ell)+ \omega_A(\ell)}, ~~ {\ell \neq 0}, \\ \ddot F_{\pi \pi}^{(0)}(0) & = & \frac{2\mu^2}{\Omega_{d-1} \, \omega_B(0)}, \\ \ddot F_{\pi \pi}^{(\ell)}(0) & = & \frac{2\ell+d-2}{\Omega_{d-1}(d-2)} \frac{\omega^2_+(\ell)+\omega_A^2(\ell)+\omega_B(\ell)\omega_A(\ell)-J_\ell-2(\mu^2-m^2)}{2 \left [\omega_B(\ell)+ \omega_A(\ell) \right ]}, ~~ {\ell \neq 0}, \end{eqnarray} and integrals \begin{eqnarray} \int d\tau F^{(0)}_{rr} (\tau) & = & \frac{1}{\Omega_{d-1}\,\omega_B^2(0)}, \\ \int d\tau \dot F^{(0)}_{\pi r} (\tau) & = & \frac{2i \mu}{\Omega_{d-1}\,\omega_B^2(0)}. \end{eqnarray} We obtain \begin{eqnarray} \left \langle n | \bar \phi \phi (0,\hat n_d) | n \right \rangle & = & \frac{n}{2 \mu \Omega_{d-1}} -\frac{2(\mu^2-m^2)}{\omega_B^2(0)}\sum_{\ell=0}^\infty F_{rr}^{(\ell)}(0) C_\ell^{(d/2-1)}(1) -\frac{2i\mu}{\omega_B^2(0)}\sum_{\ell=0}^\infty \dot F_{\pi r}^{(\ell)}(0) C_\ell^{(d/2-1)}(1) \nonumber \\ && {} +\frac{1}{\omega_B^2(0)}\sum_{\ell=0}^\infty \left [ \ddot F_{\pi \pi}^{(\ell)}(0) - J_\ell F_{\pi \pi}^{(\ell)}(0) \right ]C_\ell^{(d/2-1)}(1). \end{eqnarray} Which can be further simplified with \begin{equation} \omega_B^2(\ell)\omega_A^2(\ell) = J_\ell^2+2J_\ell (\mu^2-m^2),~~ \omega_B^2(\ell)+\omega_A^2(\ell) =2 (J_\ell +3\mu^2-m^2) \end{equation} leading to \begin{equation} \left \langle n | (\bar \phi \phi)(0,\hat n_{d}) | n \right \rangle = \frac{n}{2 \mu \Omega_{d-1}} +\sum_{\ell=0}^\infty \frac{1}{\omega_B^2(0)} \frac{2\ell+d-2}{\Omega_{d-1}(d-2)} C_\ell^{(d/2-1)}(1) \frac{\omega_B(\ell) \omega_A(\ell) (3\mu^2+m^2)-J_\ell(\mu^2-m^2)}{\omega_B(\ell) \omega_A(\ell) \left [\omega_B(\ell) + \omega_A(\ell)\right ]}. \label{eq:3pt_Full} \end{equation} Denoting the summand in \REf{eq:3pt_Full} by \begin{equation} \frac{S_\ell(\mu,m,d)}{(d-2)\Omega_{d-1}} \end{equation} and considering its asymptotics at $\ell \to \infty$ \begin{equation} S_\ell(\mu,m,d) \underset{\ell\to \infty}{\equiv} c_{-1}(\mu,m,d) \ell^{d-3}+c_{0}(\mu,m,d) \ell^{d-4} + c_{1}(\mu,m,d) \ell^{d-5} + \dots, \end{equation} we get \begin{eqnarray} \left \langle n | (\bar \phi \phi ) (0,\hat n_d) | n \right \rangle & = & \frac{n}{2 \Omega_{d-1}\mu_4((\lambda_*+\delta \lambda)n, d) } \\ && {} + \frac{1}{(d-2)\Omega_{d-1}} \Bigg \{S_0(\mu,m,d) +\sum_{\ell=1}^\infty \Big [S_\ell(\mu,m,d) - c_{-1}(\mu,m,d) \ell^{d-3} \nonumber \\ && {} -c_{0}(\mu,m,d) \ell^{d-4} - c_{1}(\mu,m,d) \ell^{d-5} \Big ] \nonumber \\ && {} + c_{-1}(\mu,m,d) \zeta(3-d)+c_{0}(\mu,m,d) \zeta(4-d) + c_{1}(\mu,m,d)\zeta(5-d) \Bigg \}_{\lambda_*} \nonumber \end{eqnarray} where we put explicit dependence of $\mu_4$ on the 1-loop coupling counterterm, with \begin{equation} \delta \lambda = \frac{5(\lambda_*)^2}{16 \pi^2} \frac{1}{4-d}, \end{equation} and 1-loop terms don't need any counterterm corrections at this order. The renormalized coupling is denoted by $\lambda_*$ which at this stage is considered as independent of the dimension. Expanding the first term in $\lambda$ and other terms in $4-d$, keeping only $O(1)$ terms (these are the only ones that are needed at this order), leads to \begin{eqnarray} \left \langle n | (\bar \phi \phi ) (0,\hat n_d) | n \right \rangle & = & \Bigg\{ \frac{n}{2\Omega_{d-1}\mu} - \frac{5\lambda^2}{16 \pi^2}\frac{1}{4-d}\frac{n}{2\Omega_{d-1}\mu^2} \, \frac{\partial \mu}{\partial \lambda} \\ && {} + \frac{1}{(d-2)\Omega_{d-1}} \Bigg( R(\mu_*) + \frac{c_{1P}(\mu,m)}{4-d}+c_{1F}(\mu_*,1) \Bigg) \Bigg\}_{\lambda_*} \nonumber \end{eqnarray} where we introduced \begin{eqnarray} R(\mu)&=& S_0(\mu,1,4) +\sum_{\ell=1}^\infty \left [ S_\ell(\mu,1,4) - c_{-1}(\mu,1,4) \ell -c_{0}(\mu,1,4) -\frac{c_{1}(\mu,1,4)}{\ell} \right ] \nonumber \\ && {} + c_{-1}(\mu,1,4) \zeta(-1)+c_{0}(\mu,1,4) \zeta(0), \label{eq:FiniteSum} \end{eqnarray} and \begin{equation} c_{1}(\mu,m,d)\zeta(5-d) \underset{d\to 4}{=}\frac{c_{1P}(\mu,m)}{4-d}+c_{1F}(\mu,m), \end{equation} with \begin{eqnarray} \label{eq:Zeta1Pole} c_{1P}(\mu,m) & = & \frac{m^2+2m^4+\mu^2-3m^2\mu^2-\mu^4}{2(3\mu^2-m^2)}, \\ c_{1F}(\mu,m) & = & \frac{12m^4-5\mu^2-6\mu^4-m^2(18\mu^2+5)}{12(3\mu^2-m^2)}. \label{eq:Zeta1Finite} \end{eqnarray} In the theory at hand, equation \REf{eq:mu_d_phi4} takes the form \begin{equation} \label{eq:mu_equation} \mu^2-m^2 = \frac{n \lambda}{4 \mu \Omega_{d-1}} . \end{equation} This implies \begin{equation} \frac{\partial \mu}{\partial \lambda} = \frac{n}{4 \Omega_{d-1} (3 \mu^2-m^2)} , \end{equation} which yields \begin{eqnarray} \left \langle n | (\bar \phi \phi ) (0,\hat n_d) | n \right \rangle & = & \Bigg\{ \frac{n}{2\mu \Omega_{d-1}}-\frac{5}{16 \pi^2} \, \frac{1}{4-d} \, \frac{\lambda^2 n^2}{8 \Omega_{d-1}^2 \mu^2 (3\mu^2 -m^2)} \\ && {} + \frac{1}{(d-2)\Omega_{d-1}} \Bigg( R(\mu_*) + \frac{c_{1P}(\mu,m)}{4-d}+c_{1F}(\mu_*,1) \Bigg) \Bigg\}_{\lambda_*} .\nonumber \label{eq:3pt_Expanded} \end{eqnarray} Taking into account normalization \REf{eq:NormRenorm} and expanding in $\lambda$ we get \begin{eqnarray} \lambda_{\bar \phi \phi} & = & Z^{-1}_{\bar \phi \phi } \left \langle n | (\bar \phi \phi ) (0,\hat n_d) | n \right \rangle \\ & = & \Bigg\{ \frac{n (d-2)}{2\mu}+ \frac{\lambda n(d-2)}{16 \pi^2 \mu (4-d)} + \frac{\lambda n (d-2)}{32\pi^2 \mu}(1+\gamma+\log\pi) \nonumber \\ & & {} -\frac{5}{16 \pi^2} \, \frac{1}{4-d} \, \frac{\lambda^2 n^2}{8 \Omega_{d-1}^2 \mu^2 (3\mu^2 -m^2)}+ R(\mu_*) + \frac{c_{1P}(\mu,m)}{4-d}+c_{1F}(\mu_*,1) \Bigg\}_{\lambda_*}. \nonumber \end{eqnarray} Using (\ref{eq:mu_equation}) to substitute $\lambda n$, we can gather the three order $\frac{1}{4-d}$ terms, then expand $\mu$, $m$ and $\Omega_{d-1}$ in $4-d$ to show cancellation of divergences and get a finite part \begin{align} & \frac{1}{4-d} \left( \frac{(d-2)\Omega_{d-1} (\mu^2-m^2)}{4 \pi^2 } -\frac{5 \Omega_{d-1} (d-2) (\mu^2-m^2)^2}{8 \pi^2 (3\mu^2 -m^2)} + c_{1P}(\mu,m) \right) \nonumber \\ & \overset{d\to 4}{\longrightarrow} \ \ \frac{2(\mu_*^2+1)-(\gamma + \log \pi)(\mu_*^4+2\mu_*^2-3)}{4(3\mu_*^2-1)} . \end{align} Some $\gamma+\log\pi$ appeared from \begin{equation} \label{eq:OmegaDerivative} \frac{1}{\Omega_3} \left.\frac{\partial \Omega_{d-1}}{\partial d}\right|_{d=4} = \frac{1}{2} \left ( \gamma +\log \pi -1\right ) . \end{equation} We can as well substitute $\lambda n$ in the term \begin{equation} \frac{\lambda n (d-2)}{32\pi^2 \mu}(1+\gamma+\log\pi) \ \overset{d\to 4}{\longrightarrow}\ \frac{\mu_*^2-1}{2}(1+\gamma+\log\pi) . \end{equation} We see there is not yet full cancellation of $\gamma+\log\pi$ terms. The reason is the very first term, which is enhanced by $n$, contains $\mu(\lambda_* n,d)$ which we still have to expand in $4-d$, bringing $n(4-d)$ contributions at NLO. To this end, we can express the derivative of $\mu$ with respect to $d$ from~\REf{eq:mu_d_phi4} \begin{equation} \left. \frac{\partial \mu}{\partial d}\right|_{d=4} =\left. \frac{\mu}{3\mu^2-1} \left [ 1-\frac{1}{\Omega_3} (\mu^2-1) \frac{\partial \Omega_{d-1}}{\partial d} \right ] \right|_{d=4} \end{equation} and use (\ref{eq:OmegaDerivative}). This yields \begin{equation} \frac{n(d-2)}{2 \mu} = \frac{n}{\mu_*} - \frac{(4-d)n \left( \mu_*^2-1 \right) (2+\gamma+\log\pi) }{2 \mu_*\left( 3\mu_*^2-1 \right)} . \end{equation} Now that $\frac{1}{4-d}$ poles have cancelled and everything has been expanded to relevant order in $4-d$, we can take the theory at the fixed point (\ref{eq:phi4FixedPoint}). This yields for the previous equation \begin{equation} \frac{n(d-2)}{2 \mu} = \frac{n}{\mu_*} - \frac{5 \left( \mu_*^2-1 \right)^2 (2+\gamma+\log\pi) }{4 ( 3\mu_*^2-1 )} . \end{equation} Putting everything together, we notice all $\gamma+\log\pi$ terms cancel, and we get the final result \begin{equation} \lambda_{\bar \phi \phi} = \frac{n}{\mu_*}- \frac{2 \mu_*^4-7\mu_*^2 +3}{2 (3\mu_*^2-1)} + R(\mu_*) + c_{1F}(\mu_*,1) . \end{equation} Plugging explicit expressions from~\REf{eq:FiniteSum} and~\REf{eq:Zeta1Finite} results in \begin{equation} \lambda_{\bar \phi \phi} = \frac{n}{\mu_*}+\frac{2(3\mu_*^2+1)}{\left [ 2(3\mu_*^2-1) \right]^{3/2}}-\frac{3\mu_*^4-2\mu_*^2+3}{2(3\mu_*^2-1)} +\sum_{\ell=1}^\infty \left [ S_\ell(\mu_*) - c_{-1}(\mu_*) \ell -c_{0}(\mu_*) -\frac{c_{1}(\mu_*)}{\ell} \right ], \end{equation} where we noted \begin{equation} S_\ell(\mu) = S_\ell(\mu,1,4) , \qquad c_i(\mu) = c_i(\mu,1,4) . \end{equation} \section{Feynman diagram computation of $\protect\Delta_{(\protect\bar \protect\phi \protect\phi)^k}$}\label{app:phiphiDimensionFeynman} We compute diagrammatically the one-loop anomalous dimension of $(\bar \phi \phi)^k$ in theory \REf{eq:phi4}. We consider the MS renormalization of operators in the following momentum-space correlator\footnote{To be more precise, the operator $(\bar\phi\phi)^k$ mixes with other operators \cite{Brown:1979pq}. However, since this operator is a primary of the critical theory, the mixing cancels in that case. This means we can neglect the mixing when diagramatically computing the anomalous dimension off-criticality.}: \begin{equation} \langle (\bar \phi \phi)^k \bar\phi(p) \cdots \phi(p)\cdots \rangle = Z_{(\bar\phi\phi)^k} Z_\phi^{2k} \langle \left[ (\bar \phi \phi)^k \right] [\bar\phi](p)\cdots [\phi](p) \cdots \rangle \,, \end{equation} where there are $k$ insertions of field $\phi(p)$ and $\bar\phi(p)$. The field renormalization factor $Z_\phi$ has no one-loop contribution so we consider it equal to $1$. The bare $(\bar\phi\phi)^k$ operator is normalized : \begin{equation} \btf{x} \vertex [bigcross] (x) {}; \vertex [above left=.6cm of x] (a); \vertex [above right=.6cm of x] (c); \vertex [below left=.6cm of x] (b); \vertex [below right=.6cm of x] (d); \end{feynman}\end{tikzpicture} ~=~ 1\,. \end{equation} We do not draw exterior lines in the diagrams. There are three diagrams at one-loop level, of which we compute the divergent part: \begin{equation} \btf{x} \vertex [bigcross] (x) {}; \vertex at ($(x)$) (xb); \vertex [right=1cm of x] (y); \vertex [above left=.6cm of x] (a); \vertex [below left=.6cm of x] (b); \vertex [above right=.6cm of y] (c); \vertex [below right=.6cm of y] (d); \diagram*[inline=(y)] { (xb) -- [out=45, in=135, with arrow=0.5,looseness=1.3] (y), (xb) -- [out=-45, in=-135, with arrow=0.5, looseness=1.3] (y), (y) -- [with arrow=0.6] (c), (y) -- [with arrow=0.6] (d), }; \end{feynman}\end{tikzpicture} ~=~ \btf{x} \vertex (x); \vertex [bigcross, right=1cm of x] (y) {}; \vertex at ($(y)$) (yb); \vertex [above left=.6cm of x] (a); \vertex [below left=.6cm of x] (b); \vertex [above right=.6cm of y] (c); \vertex [below right=.6cm of y] (d); \diagram*[inline=(y)] { (a) -- [fermion] (x), (b) -- [fermion] (x), (x) -- [out=45, in=135, with arrow=0.5,looseness=1.3] (yb), (x) -- [out=-45, in=-135, with arrow=0.5, looseness=1.3] (yb), }; \end{feynman}\end{tikzpicture} ~=~ \frac{k(k-1)}{4}(-\lambda) \frac{1}{8\pi^2 \varepsilon} + O(\varepsilon^0) \qquad \btf{q} \vertex (q); \vertex [above=0.5cm of q] (x); \vertex [bigcross] at (x) (xb) {}; \vertex [below=0.5cm of q] (y); \vertex [above left = .6cm of x] (a); \vertex [above right = .6cm of x] (c); \vertex [below left = .6cm of y] (b); \vertex [below right = .6cm of y] (d); \diagram*[inline=(q)] { (y) -- [out=135, in=-135, with arrow=0.5,looseness=1.3] (x), (x) -- [out=-45, in=45, with arrow=0.5,looseness=1.3] (y), (b) -- [fermion] (y), (y) -- [with arrow=0.6] (d), }; \end{feynman}\end{tikzpicture} ~=~ k^2(-\lambda)\frac{1}{8\pi^2 \varepsilon} + O(\varepsilon^0)\,. \end{equation} Summing all diagrams yields \begin{equation} \langle (\bar \phi \phi)^k \bar\phi(p) \cdots \phi(p)\cdots \rangle = 1 - \frac{k(3k-1)\lambda}{16 \pi^2 \varepsilon} + O(\lambda\varepsilon^0,\lambda^2)\,, \end{equation} from which we get \begin{equation} Z_{(\bar\phi\phi)^k} = 1-\frac{k(3k-1)\lambda}{16 \pi^2 \varepsilon} + O(\lambda^2)\,. \end{equation} The one-loop anomalous dimension is then given by \begin{equation} \gamma_{(\bar\phi\phi)^k} = -\lambda \varepsilon \frac{\partial \log Z_{(\bar\phi\phi)^2}}{\partial \lambda} = \frac{k(3k-1)\lambda}{16\pi^2}+ O(\lambda^2)\,. \end{equation} At the Wilson-Fisher fixed point (\ref{eq:phi4FixedPoint}) the dimension is \begin{equation} \Delta_{(\bar\phi\phi)^k} = 2 k \left( \frac{d}{2}-1\right) + \gamma_{(\bar\phi\phi)^k} = 2k + \frac{3k(k-2)}{5} \varepsilon + O(\varepsilon^2)\,. \end{equation} \newpage \bibliographystyle{utphys}
1,116,691,501,347
arxiv
\section{Introduction} The spin-1/2 antiferromagnetic Heisenberg XXZ chain is one of the most fundamental models for one-dimensional quantum magnetism, which is given by the Hamiltonian \begin{align} \mathcal{H}=\sum_{j=1}^{N} \( S_{j}^x S_{j+1}^x + S_{j}^y S_{j+1}^y + \d S_{j}^z S_{j+1}^z \), \label{Ham} \end{align} where $S_j^{\alpha} = \sg_j^{\alpha}/2$ with $\sg_j^{\alpha}$ being the Pauli matrices acting on the $j$-th site, $\d$ is the anisotropy parameter, and $N$ is the number of lattice sites of this system. Here we impose the periodic boundary condition $\vec{S}_{j+N}=\vec{S}_j$. For $\d>1$, it is called the massive XXZ model where the system is gapful. Meanwhile for $-1<\d\leq1$ case, the system is gapless and called the massless XXZ model. Especially we call it XXX model for the isotropic case $\d=1$. The exact eigenvalues and eigenvectors of this model can be obtained by the Bethe Ansatz method \cite{Bethe, TakaBook}. We shall give a brief survey of this method below. First we assume the wave function $|\Psi\ket$ with $M$ down spins in the form \begin{align} |\Psi\ket&=\sum_{1\leq n_1<\cdots<n_M\leq N} \,\,\, \sum_{\sg\in S_n} A(\sg)\exp\[i\sum_{j=1}^M k_{\sg(j)}n_j\] |n_1,...,n_M\ket, \nn\\ &A(\sg)=\epsilon(\sg)\prod_{l<j}\(e^{i(k_{\sg(l)}+k_{\sg(j)})} +1-2\d e^{ik_{\sg(l)}}\), \label{wf} \end{align} where $\sg$ denotes an element of the symmetric group $S_n$, $\epsilon(\sg)$ is the sign of the permutation, and $|n_1,...,n_M\ket$ signifies the state where the spins at the positions $n_1,\cdots,n_M$ are downward and all the other spins directing upward. If the quasi-momenta $\{k_1,k_2,\cdots,k_M\}$ satisfy the Bethe ansatz equations \begin{align} e^{ik_jN}=\prod_{l\neq j}^M\(-\frac{e^{i(k_j+k_l)}+1-2\d e^{ik_j}}{e^{i(k_j+k_l)}+1-2\d e^{ik_l}}\), \qquad (j=1,2,\cdots,M), \end{align} the wave function (\ref{wf}) becomes the eigenvector of the Hamiltonian (\ref{Ham}): \begin{align} \mathcal{H}|\Psi\ket=E|\Psi\ket, \qquad E=\frac{N\d}4+\sum_{j=1}^M(\cos k_j-\d). \end{align} In the thermodynamic limit $N\to\infty$, the ground state energy per site $e_0$ of the massless XXZ model, for example, is computable by analyzing the Bethe ansatz equations \begin{align} e_0=\frac{\d}4-\frac{\sin(\pi\nu)}{2\pi}\int^{\infty}_{-\infty}dt \, \frac{\sh[(1-\nu)t]}{\sh t\ch(\nu t)}, \label{e0} \end{align} where we parametrize the anisotropy parameter as $\d=\cos(\pi\nu)$. Especially in the XXX case $\d=1$, the ground state energy per site (\ref{e0}) can be simplified into \begin{align} e_0=\frac14-\ln2. \label{e0xxx} \end{align} In the same way other physical quantities in the thermodynamic limit such as specific heat, magnetic susceptibility, elementary excitations, etc..., can be exactly evaluated even at finite temperature by the Bethe ansatz method \cite{TakaBook}. The exact calculation of the correlation functions, however, is still a difficult problem even in the simplest case for static correlation functions at zero temperature. The exceptional case is ${\d=0}$, where the system reduces to a lattice free-fermion model by the Jordan-Wigner transformation. In this case, we can calculate arbitrary correlation functions by means of Wick's theorem \cite{Lieb61,McCoy68}. However, there have been rapid developments recently in the exact evaluations of correlation functions for ${\d\ne0}$ case also. Below we shall give the historical review of them, concentrating mainly on the static correlation functions of the XXX model $\d=1$ at zero temperature for the infinite system $N\to\infty$. Until quite recently only the correlation functions within three lattice sites had been known: \begin{align} \zcor{1}{2}&=\frac{1}{12}-\frac{1}{3}\za{1}=-0.1477157268533151 \cdots , \label{first_neighbor} \\ \zcor{1}{3}&=\frac{1}{12}-\frac{4}{3}\za{1}+\za{3}=0.06067976995643530 \cdots , \label{second_neighbor} \end{align} where $\zeta_a(s)$ is the alternating zeta function defined by ${\zeta_a(s) \equiv \sum_{n=1}^{\infty} (-1)^{n-1}/n^s}$, which is related to Riemann zeta function ${\zeta(s) \equiv \sum_{n=1}^{\infty} 1/n^s}$ as ${\zeta_a(s) = (1-2^{1-s})\zeta(s)}$. Note that the alternating zeta function is regular at ${s=1}$ and is given by ${\zeta_a(1)}=\ln 2$. The nearest-neighbor correlation function (\ref{first_neighbor}) is derived directly from the ground state energy (\ref{e0xxx}) obtained by Hulth\'{e}n in 1938 \cite{Hulthen}. The first nontrivial correlation function (\ref{second_neighbor}) was derived by Takahashi in 1977 via the strong coupling expansion of the ground state energy for the half-filled Hubbard model \cite{Taka77}. Note also that another derivation of the second-neighbor correlation function (\ref{second_neighbor}) was given by Dittrich and Inozemtsev in 1997 \cite{DI}. These method, however, can not be generalized to calculate further correlation functions on more than four lattice sites, unfortunately. Meanwhile using the representation theory of the quantum affine algebra $U_q(\hat{sl_2})$, Kyoto Group (Jimbo, Miki, Miwa, Nakayashiki) derived a multiple integral representation for arbitrary correlation functions of the massive XXZ antiferromagnetic chain in 1992 \cite{JMMN, JMBook}, which is before long extended to the XXX case \cite{Nakayashiki, KIEU} and the massless XXZ case \cite{JM}. More recently the same integral representations were reproduced by Kitanine, Maillet, Terras \cite{KMT} in the framework of Quantum Inverse Scattering Method. They have also succeeded in generalizing the integral representations to the XXZ model with an external magnetic field \cite{KMT}. More recently the integral formulas are extended to the XXZ model even at finite temperature ! \cite{GKS04,GKS05}. In this way it has been established now the correlation functions on $n$ lattice sites are represented by $n$-dimensional integrals in general. However, these multiple integral representations for correlation functions, though completely exact, have not been used widely especially among physicists. It is mainly because we can not evaluate accurate numerical values of correlation functions directly from the integral representation. Also it had been a puzzle that the exact expression of the correlation function on three lattice sites (\ref{second_neighbor}) had not been reproduced from the integral representation for a long time. The situation changed when Boos and Korepin devised an innovative method to evaluate these multiple integrals for XXX chain in 2001 \cite{BK1, BK2}. They showed that the integrand in the multiple integral formula can be reduced to a {\it canonical} form, which allows us to implement the integration. This method was at first applied to a special correlation function called Emptiness Formation Probability (EFP) \cite{KIEU} which signifies the probability to find a ferromagnetic string of length $n$: \begin{align} P(n) \equiv \left\bra \prod_{j=1}^{n} \( \frac{1}{2} +S_j^z \) \right\ket. \label{efp} \end{align} By performing the multiple integrals, the explicit forms of the EFP for up to $n=5$ was obtained \cite{BK1, BK2, BKNS}. The Boos-Korepin method was applied to calculate other correlation functions on four lattice sites in 2003 \cite{SSNT}. Especially the third-neighbor correlation function was obtained there as \begin{align} \zcor{1}{4}=& \frac{1}{12}-3\za{1}+\frac{74}{9}\za{3}-\frac{56}{9}\za{1}\za{3} -\frac{8}{3}\za{3}^2-\frac{50}{9}\za{5}+\frac{80}{9}\za{1}\za{5}\nn\\ =&-0.05024862725723524\cdots. \end{align} Other correlation functions on four lattice sites are given in Appendix A. Subsequently, the Boos-Korepin method was generalized to XXZ chain both in massless and massive regime and all the correlation functions within four lattice sites were obtained for general anisotropy \cite{KSTS03, TKS, KSTS04}. In principle, multiple integrals for any correlation functions can be performed by means of Boos-Korepin method. However, $P(5)$ is the only correlation function which was calculated by this method on five lattice sites, since it is getting critically harder to reduce the integrand to canonical form as the integral dimension increases. In the course of attempting to obtain further correlation functions, the alternative algebraic method to calculate the EFP was developed by Boos, Korepin and Smirnov in 2003 \cite{BKS}. They considered the inhomogeneous XXX model, in which each site carries an inhomogeneous parameter $\lam_j$. The homogeneous XXX model corresponds to the case with all the inhomogeneous parameters $\lam_j$ set to be $0$. Inhomogeneous correlation functions on $n$ lattice sites are considered to be functions of variables $\lams{1}{n}$. They derived the functional relations which the inhomogeneous EFP should satisfy by investigating the quantum Knizhnik-Zamolodchikov (qKZ) equations \cite{KZ, FR, Smirnov1, Smirnov2}, the solutions to which are connected with the inhomogeneous correlation functions \cite{JMBook, Nakayashiki, JM}. Moreover they suggested an ansatz for the form of the inhomogeneous EFP, which consists of only one transcendental function $\omega(\lam)$ with rational functions of inhomogeneous parameters $\lams{1}{n}$ as coefficients. (for the proof of the ansatz and further generalizations, see \cite{BJMST1, BJMST2, BJMST3, BJMST4, BJMST5, BJMST6}). It was shown that the functional relations together with the ansatz for the final form completely fix the explicit form of the inhomogeneous EFP. In this way the explicit forms of the inhomogeneous EFP for up to $n=6$ have been obtained, which gives a new result for $P(6)$ in the homogeneous limit $\lam_j\to0$. This Boos-Korepin-Smirnov method based on the qKZ equation was generalized to arbitrary correlation functions in \cite{BST}. Actually, in that paper, all the correlation functions on five lattice sites are obtained based on the functional relations for the general correlation functions (see Appendix A). Especially the fourth-neighbor correlation function is given by \begin{align} \zcor{1}{5}=& \frac{1}{12}-\frac{16}{3}\za{1}+\frac{290}{9}\za{3}-72\za{1}\za{3}-\frac{1172}{9}\za{3}^2 -\frac{700}{9}\za{5} \nn\\& +\frac{4640}{9}\za{1}\za{5}-\frac{220}{9}\za{3}\za{5} -\frac{400}{3}\za{5}^2+\frac{455}{9}\za{7}-\frac{3920}{9}\za{1}\za{7} \nn\\& +280\za{3}\za{7} = 0.03465277698272816 \cdots. \end{align} The main purpose of this paper is to report further results using the algebraic method in \cite{BST}. More explicitly we have succeeded in calculating all the correlation functions on six lattice sites using this algebraic method. We remark that, if we consider only the two-point correlation functions $\zcor{1}{n}$, it was already calculated up to the seventh-neighbor correlation function $\zcor{1}{8}$ \cite{SS, SST}. The method actually allows us to evaluate some other correlation functions such as string correlation functions \cite{BSS}, but not all the correlation functions. We also remark that there is a related but different approach for the evaluation of the general correlation functions, which is developed recently by Boos, Jimbo, Miwa, Smirnov and Takeyama \cite{BJMST1, BJMST2, BJMST3, BJMST4, BJMST5, BJMST6}. They have established a compact exponential formula for general correlation functions without heavy multiple integrals, which would be also useful to evaluate correlation functions analytically. We, however, do not discuss the formula in this paper. The plan of this paper is as follows. In section 2, the algebraic method to calculate general correlation functions is reviewed. In section 3, we present the analytical results for some physically interesting correlation functions, such as chiral correlation functions, dimer-dimer correlation functions, etc$\cdots$. In section 4, we show the eigenvalue-distribution of the reduced density matrix and calculate the von Neumann entropy. Summary and discussion are given in section 5. \section{Algebraic method to evaluate general correlation functions based on the qKZ equations} \setcounter{equation}{0} Below we describe the functional approach to evaluate general correlation functions established in \cite{BST}. Any correlation function can be expressed as a sum of density matrix elements $P^{\epDs{1}{n}}_{\eps{1}{n}}$, which are defined by the ground state expectation value of the product of elementary matrices: \begin{align} P^{\epDs{1}{n}}_{\eps{1}{n}}\equiv\bra E_1^{\epD{1}\ep{1}} \cdots E_n^{\epD{n}\ep{n}} \ket, \end{align} where $E_j^{\epD{j}\ep{j}}$ are $2 \times 2$ elementary matrices acting on the $j$-th site as \begin{align} E^{++}_j&=\begin{pmatrix}1&0\\0&0\\\end{pmatrix}_{\!\![j]}=\frac12+S_j^z, \quad E^{--}_j=\begin{pmatrix}0&0\\0&1\\\end{pmatrix}_{\!\![j]}=\frac12-S_j^z, \nn\\ E^{+-}_j&=\begin{pmatrix}0&1\\0&0\\\end{pmatrix}_{\!\![j]}=S_j^+=S_j^x+i S_j^y, \quad E^{-+}_j=\begin{pmatrix}0&0\\1&0\\\end{pmatrix}_{\!\![j]}=S_j^-=S_j^x-i S_j^y. \nn \end{align} For example, the density matrix elements on four lattice sites $P^{-++-}_{++--}$ can be written in terms of spin operators as \begin{align} P^{-++-}_{++--}=\left\bra S_1^-\(\frac12+S_2^z\)S_3^+\(\frac12-S_4^z\)\right\ket. \end{align} First we rewrite the functional relations for inhomogeneous density matrix elements, which follow from the qKZ equations: \begin{itemize} \item{Translational invariance} \begin{align} P^{\epDs{1}{n}}_{\eps{1}{n}}(\lam_1 + \lam, \ldots, \lam_n + \lam)= P^{\epDs{1}{n}}_{\eps{1}{n}}(\lams{1}{n}), \label{f1} \end{align} \item{Transposition, Negating and Reverse order relations} \begin{align} &P^{\epDs{1}{n}}_{\eps{1}{n}}(\lams{1}{n})= P^{\eps{1}{n}}_{\epDs{1}{n}}(\Mlams{1}{n}) \nn\\ &=P^{\MepDs{1}{n}}_{\Meps{1}{n}}(\lams{1}{n}) =P^{\epDs{n}{1}}_{\eps{n}{1}}(\Mlams{n}{1}) \end{align} \item{Intertwining relation} \begin{align} &\sum_{\TepD{j},\TepD{j+1}=\pm} R^{\epD{j}\epD{j+1}}_{\TepD{j}\TepD{j+1}}(\lam_j-\lam_{j+1}) P^{\ldots,\TepD{j+1},\TepD{j},\ldots}_{\ldots,\ep{j+1},\ep{j},\ldots} (\ldots, \lam_{j+1},\lam_j,\ldots) \nn\\ & = \sum_{\Tep{j},\Tep{j+1}=\pm} P^{\ldots,\epD{j},\epD{j+1},\ldots}_{\ldots,\Tep{j},\Tep{j+1},\ldots} (\ldots,\lam_j ,\lam_{j+1},\ldots) R^{\Tep{j}\Tep{j+1}}_{\ep{j}\ep{j+1}}(\lam_j-\lam_{j+1}), \end{align} where $R$ denotes the $R$-matrix of the XXX model: \begin{align} R(\lam)= \begin{pmatrix} R^{++}_{++}(\lam) & R^{++}_{+-}(\lam) & R^{++}_{-+}(\lam) & R^{++}_{--}(\lam) \\ R^{+-}_{++}(\lam) & R^{+-}_{+-}(\lam) & R^{+-}_{-+}(\lam) & R^{+-}_{--}(\lam) \\ R^{-+}_{++}(\lam) & R^{-+}_{+-}(\lam) & R^{-+}_{-+}(\lam) & R^{-+}_{--}(\lam) \\ R^{--}_{++}(\lam) & R^{--}_{+-}(\lam) & R^{--}_{-+}(\lam) & R^{--}_{--}(\lam) \\ \end{pmatrix}= \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & \frac{\lam}{\lam + 1} & \frac{1}{\lam + 1} & 0 \\ 0 & \frac{1}{\lam + 1} & \frac{\lam}{\lam + 1} & 0 \\ 0 & 0 & 0 & 1 \\ \end{pmatrix}. \end{align} \item{First recurrent relation} \begin{align} &P^{\epD{1}\epDs{2}{n}}_{\ep{1}\eps{2}{n}}(\lam+1,\lam,\lams{3}{n}) =-\delta_{\ep{1},-\ep{2}}\epD{1}\ep{2} P^{\epD{2}\epDs{3}{n}}_{-\epD{1}\eps{3}{n}}(\lam,\lams{3}{n}) \nn\\ &P^{\epD{1}\epDs{2}{n}}_{\ep{1}\eps{2}{n}}(\lam-1,\lam,\lams{3}{n}) =-\delta_{\epD{1},-\epD{2}}\ep{1}\epD{2} P^{-\ep{1}\epDs{3}{n}}_{\ep{2}\eps{3}{n}}(\lam,\lams{3}{n}) \label{rec1} \end{align} \item{Second recurrent relation} \begin{align} \lim_{\lam_{j} \rightarrow i \infty} P^{\epDs{1}{j},\ldots,\epD{n}}_{\eps{1}{j},\ldots,\ep{n}}(\lams{1}{j},\ldots,\lam_n) =\delta_{\ep{j},\epD{j}}\frac{1}{2} P^{\epDs{1}{j-1},\epDs{j+1}{n}}_{\eps{1}{j-1},\eps{j+1}{n}}(\lams{1}{j-1},\lams{j+1}{n}) \label{rec2} \end{align} \item{Identity relation} \begin{align} \sum_{\eps{1}{n}} &P^{\epDs{1}{n}}_{\eps{1}{n}}(\lams{1}{n}) =\sum_{\epDs{1}{n}}P^{\epDs{1}{n}}_{\eps{1}{n}}(\lams{1}{n}) \nn\\ &=P^{+,\ldots,+}_{+,\ldots,+}(\lams{1}{n})=P^{-,\ldots,-}_{-,\ldots,-}(\lams{1}{n}) \end{align} \item{Reduction relation} \begin{align} &P^{+,\epDs{2}{n}}_{+,\eps{2}{n}}(\lam_1,\lams{2}{n})+ P^{-,\epDs{2}{n}}_{-,\eps{2}{n}}(\lam_1,\lams{2}{n}) =P^{\epDs{2}{n}}_{\eps{2}{n}}(\lams{2}{n}) \nn\\ &P^{\epDs{1}{n-1},+}_{\eps{1}{n-1},+}(\lams{1}{n-1},\lam_n) +P^{\epDs{1}{n-1},-}_{\eps{1}{n-1},-}(\lams{1}{n-1},\lam_n) =P^{\epDs{1}{n-1}}_{\eps{1}{n-1}}(\lams{1}{n-1}) \label{f2} \end{align} \end{itemize} Additionally it is established that density matrix elements can be written in the form of \cite{BKS, BJMST1, BST} \begin{align} P^{\epDs{1}{n}}_{\eps{1}{n}}&(\lams{1}{n})= \(\prod^n_{j=1}\frac{\delta_{\ep{j},\epD{j}}}2\)\nn\\ &+\sum_{l=1}^{[\frac{n}{2}]}\sum_{\sg\in T_{n,l}} A^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg| \lam_{\sg(1)},\cdots,\lam_{\sg(n)}) \prod_{j=1}^l\omega(\lam_{\sg(2j-1)}-\lam_{\sg(2j)}), \label{form} \end{align} where $T_{n,l}$ is a subset of the symmetry group of degree $n$ defined as \begin{align} T_{n,l}=\{\sg\in S_n|&\sg(1)<\sg(3)<\cdots<\sg(2l-1), \nn\\ &\sg(2j-1)<\sg(2j) \quad {\rm for} \quad j=1,2,\cdots,l, \nn\\ &\sg(2l+1)<\sg(2l+2)<\cdots<\sg(n) \}. \end{align} $A^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg|\lams{1}{n})$ are rational functions of inhomogeneous parameters $\lams{1}{n}$ with known denominator \begin{align} &A^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg|\lams{1}{n}) = \frac{Q^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg| \lams{1}{n})}{D_{n,l}(\lams{1}{n})}, \nn\\ &D_{n,l}(\lams{1}{n})= \frac{\prod_{k=1}^l(\lam_{2k-1}-\lam_{2k}) \prod_{2l+1\leq k<j\leq n}(\lam_{k}-\lam_{j})} {\prod_{1\leq k<j\leq n}(\lam_{k}-\lam_{j})}. \end{align} $Q^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg| \lams{1}{n})$ are polynomials of known degree in inhomogeneous parameters $\lams{1}{n}$ with rational coefficients which have at most the same degree in each variable and also at most the same total degree as in the denominator. A transcendental function $\omega(\lam)$ in (\ref{form}) is defined by \begin{align} \omega(\lam)=\frac{1}{2}+\sum_{k=1}^{\infty}(-1)^k \frac{2k(\lam^2-1)}{\lam^2-k^2} =2\sum^{\infty}_{k=0}\lam^{2k}\{\za{2k-1}-\za{2k+1}\}. \end{align} Here note that the identity $\za{-1}=-3\zeta(-1)=1/4$. The following properties of the function $\omega(\lam)$ are needed for calculations of the recurrent relations (\ref{rec1})-(\ref{rec2}): \begin{align} \omega(i \infty)=0, \quad \omega(\lam \pm 1)=\frac{3}{2}\frac{1}{\lam^2-1} -\frac{\lam(\lam\pm 2)}{\lam^2-1}\omega(\lam). \end{align} Below we list the explicit forms of all the non-zero elements of inhomogeneous density matrices for $n=1,\,2$: \begin{align} &P^+_+(\lam_1)=P^-_-(\lam_1)=\frac12, \nn\\ &P^{++}_{++}(\lam_1,\lam_2)=P^{--}_{--}(\lam_1,\lam_2) =\frac14+\frac16\omega(\lam_1-\lam_2), \nn\\ &P^{+-}_{+-}(\lam_1,\lam_2)=P^{-+}_{-+}(\lam_1,\lam_2) =\frac14-\frac16\omega(\lam_1-\lam_2), \nn\\ &P^{+-}_{-+}(\lam_1,\lam_2)=P^{-+}_{+-}(\lam_1,\lam_2)=\frac13\omega(\lam_1-\lam_2). \end{align} It can be easily seen that these satisfy the functional relations (\ref{f1})-(\ref{f2}). By taking the homogeneous limit $\lam_1,\lam_2\to0$, we obtain density matrix elements for the physically interesting homogeneous case: \begin{align} &P^+_+=P^-_-=\frac12, &P^{++}_{++}=P^{--}_{--}=\frac13-\frac13\za{1}, \nn\\ &P^{+-}_{+-}=P^{-+}_{-+}=\frac16+\frac13\za{1}, &P^{+-}_{-+}=P^{-+}_{+-}=\frac16-\frac23\za{1}. \end{align} In the paper \cite{BST}, it has been argued that the functional relations (\ref{f1})-(\ref{f2}) together with the ansatz for the form of inhomogeneous correlation functions (\ref{form}) completely determine the unknown rational coefficients in the polynomials $Q^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg| \lams{1}{n})$ and therefore the full form of the inhomogeneous correlation functions. As an example, we give the explicit form of the density matrix element for $n=3$ \begin{align} &Q^{-++}_{++-}(l\!=\!1,\sg\!=\!(123)|\lam_1,\lam_2,\lam_3)=\frac16-\frac1{12}\lam_{12}, \nn\\ &Q^{-++}_{++-}(l\!=\!1,\sg\!=\!(231)|\lam_1,\lam_2,\lam_3)=\frac16+\frac1{12}\lam_{12}, \nn\\ &Q^{-++}_{++-}(l\!=\!1,\sg\!=\!(132)|\lam_1,\lam_2,\lam_3) =\frac16+\frac16\lam_{13}\lam_{23}-\frac1{12}(\lam_{13}+\lam_{23}), \nn\\ &P^{-++}_{++-}(\lam_1,\lam_2,\lam_3)=\frac{2-\lam_{12}}{12\lam_{13}\lam_{23}}\omega_{12}+ \frac{2+\lam_{23}}{12\lam_{21}\lam_{31}}\omega_{23}+ +\frac{2+2\lam_{12}\lam_{32}-(\lam_{12}+\lam_{32})}{12\lam_{12}\lam_{32}}\omega_{13}, \end{align} where we have used the abbreviations $\omega_{jk}=\omega(\lam_{jk})$, $\lam_{jk}=\lam_{j}-\lam_{k}$. By taking the homogeneous limit $\lam_1,\lam_2,\lam_3\to0$, we obtain the homogeneous density matrix element \begin{align} P^{-++}_{++-}=\frac1{12}-\frac43\za{1}+\za{3}. \end{align} In this way all the correlation functions on five lattice sites have been obtained in \cite{BST}. In this paper we have obtained further results for {\it six} lattice sites. There are 24 independent correlation functions among them, explicit forms of which are collected in Appendix B. Concerning two-point correlation functions $\zcor{1}{n}$, on the other hand, an efficient way to calculate them has been developed \cite{SS, SST}. Namely, without evaluating all the density matrix elements, a two-point correlation function can be derived from its generating function \begin{align} \Pk{n} \equiv \left\bra \prod^n_{j=1} \left\{ \(\frac{1}{2}+S^z_j\) +\kp\(\frac{1}{2}-S^z_j\) \right\} \right\ket \label{gf} \end{align} through the relation \begin{align} \zcor{1}{n}= \frac{1}{2} \frac{\partial^2}{\partial \kp^2} \Big\{ \Pk{n} -2\Pk{n-1} +\Pk{n-2} \Big\} \Bigg|_{\kp=1} - \frac{1}{4}. \end{align} Note that the generating function $\Pk{n}$ can be considered as a generalization of the Emptiness Formation Probability (\ref{efp}), which is reproduced if we set $\kp=0$ in (\ref{gf}). From the functional relations for general density matrix elements (\ref{f1})-(\ref{f2}), it can be shown that the generating function $\Pk{n}$ satisfies the following closed functional relations: \begin{itemize} \item{Translational invariance} \begin{align} \Pk{n}(\lam_1 + \lam, \ldots, \lam_n + \lam)=\Pk{n}(\lams{1}{n}) \end{align} \item{Negating relation} \begin{align} \Pk{n}(\Mlams{1}{n})=\Pk{n}(\lams{1}{n}) \end{align} \item{Symmtry relation} \begin{align} \Pk{n}(\lams{1}{n})=\Pk{n}(\lams{\sigma(1)}{\sigma(n)}), \label{sym} \end{align} where $\sigma$ denotes any element of the symmetry group $S_n$. \item{First recurrent relation} \begin{align} \Pk{n}(\lams{1}{n-1},\lam_{n-1}\pm 1)=\kp \Pk{n-2}(\lams{1}{n-2}) \end{align} \item{Second recurrent relation} \begin{align} \lim_{\lam_n\to i \infty}\Pk{n}(\lams{1}{n-1},\lam_n)=\frac{1+\kp}{2}\Pk{n-1}(\lams{1}{n-1}) \end{align} \end{itemize} One can explicitly calculate the generating functions $\Pk{n}$ recursively with respect to $n$ from these functional relations together with the ansatz for the final form \begin{align} \Pk{n}(\lams{1}{n})&= \(\frac{1+\kp}2\)^n +\sum_{l=1}^{[\frac{n}{2}]}\sum_{\sg\in T_{n,l}} \Ak{n,l}(\lam_{\sg(1)},\cdots,\lam_{\sg(n)}) \prod_{j=1}^l\omega(\lam_{\sg(2j-1)}-\lam_{\sg(2j)}), \nn\\ &\Ak{n,l}(\lams{1}{n}) = \frac{\Qk{n,l}(\lams{1}{n})}{D_{n,l}(\lams{1}{n})}, \nn\\ \end{align} where $T_{n,l}$, $D_{n,l}(\lams{1}{n})$ and $\omega(\lam)$ are the same as in (\ref{form}) and $\Qk{n,l}(\lams{1}{n})$ are polynomials containing the parameter $\kp$. The great advantage of this method is that the polynomials $\Qk{n,l}(\lams{1}{n})$ do not depend on the permutation $\sg\in T_{n,l}$ due to the symmetry relation (\ref{sym}), unlike the case of general density matrix elements $Q^{\epDs{1}{n}}_{\eps{1}{n}}(l,\sg| \lams{1}{n})$. This fact considerably reduces the amount of bothersome calculation. Actually by using this method, up to the seventh-neighbor correlation function $\zcor{1}{8}$ have been calculated \cite{SST}. Calculating all the density matrix elements is much harder. We, therefore, have succeeded only up to six sites yet. \section{Applications to physically interesting correlation functions} \setcounter{equation}{0} In this section, we discuss several physically interesting correlation functions, which can be evaluated exactly from our results of all the density matrix elements for $n=6$. \subsection{Chiral correlation function} First let us consider the vector chiral correlation function \begin{align} \vch{j}{j+1}=6\(\zcorf{1}{2}{j}{j+1}{x}{z}{x}{z}-\zcorf{1}{2}{j}{j+1}{x}{z}{z}{x}\), \end{align} which measures a chirality of the spin alignment. It has been observed that they have simple expressions \cite{BST, MT} compared with the other correlation functions on the same lattice sites \begin{align} \vch{3}{4}&=\frac12\(\za{1}-\za{3}\)=-0.1041977484048752\cdots,\\ \vch{4}{5}&=\frac12\(\za{1}-\za{3}\)-\frac54\(\za{3}-\za{5}\)=-0.01597638205835821\cdots. \end{align} Here we have newly obtained the vector chiral correlation function for six lattice sites, which has also a simple form though it contains some quadratic terms \begin{align} \vch{5}{6}&=\frac12\za{1}-\frac{11}3\za{3}+9\za{5}-\frac{35}6\za{7}+\frac43\za{1}\za{3}\nn\\& \quad+\frac43\za{3}^2-\frac{32}3\za{1}\za{5}-\frac43\za{3}\za{5}+\frac{28}3\za{1}\za{7}\nn\\ &=-0.01774606473688137\cdots. \end{align} It is interesting to note that the numerical values of these are all negative and their absolute values are oscillating. Furthermore we obtain the scalar chiral correlation function defined as below, which reveals an intriguing factorization: \begin{align} &\left\bra\[\(\vec{S}_1\times\vec{S}_2\)\cdot\vec{S}_3\] \[\(\vec{S}_4\times\vec{S}_5\)\cdot\vec{S}_6\]\right\ket \nn\\&\qquad=6(\zcors{x}{y}{z}{x}{y}{z}+\zcors{x}{y}{z}{y}{z}{x}+\zcors{x}{y}{z}{z}{x}{y} \nn\\&\qquad\qquad-\zcors{x}{y}{z}{z}{y}{x}-2\zcors{x}{y}{z}{x}{z}{y}) \nn\\&\qquad=\frac76(\za{1}-1/4)(\za{7}-\za{5})-\frac16(\za{3}-\za{1})(\za{5}-\za{3}) \nn\\&\qquad=0.008133862120680087\cdots. \end{align} Note that the numerical value in this case is positive. These numerical values may indicate a classical picture of the spin alignment for the antiferromagnetic ground state of quantum Heisenberg chain. \subsection{One-particle Green function} By the Jordan-Wigner transformation \begin{align} S_j^+=\prod_{k=1}^{j-1}(1-2c_k^{\dagger}c_k)c_j, \quad S_j^-=\prod_{k=1}^{j-1}(1-2c_k^{\dagger}c_k)c_j^{\dagger}, \end{align} the XXX model is transformed into the isotropic spinless fermion model. Let us consider the one-particle Green function $\green{n}$ for this model. Here the bracket $\bra\cdots\ket_f$ means the expectation value in the half-filled state of the spinless fermion model. The first-neighbor one-particle Green function $\green{2}$ is directly obtained from the Hulthen's result (\ref{first_neighbor}) as \begin{align} \green{2}=\bra S_1^+S_2^-\ket=2\zcor{1}{2}=\frac16-\frac23\za{1}=-0.2954314537066302\cdots. \end{align} The first non-trivial result for this one-particle Green function has been obtained in \cite{SSNT} \begin{align} \green{4}&=4\bra S_1^+S_2^zS_3^zS_4^-\ket=8\zcorf{1}{2}{3}{4}{x}{z}{z}{x}\nn\\ &=\frac1{30}-2\za{1}+\frac{338}{45}\za{3}-\frac{40}{9}\za{1}\za{3}-\frac{32}{15}\za{3}^2-\frac{52}{9}\za{5}+\frac{64}{9}\za{1}\za{5}\nn\\&=0.08228771668643698\cdots. \end{align} because $\green{n}=0$ if $n$ is odd. Here we have newly obtained the fifth-neighbor one-particle Green function \begin{align} \green{6}&=16\bra S_1^+S_2^zS_3^zS_4^zS_5^zS_6^-\ket=32\zcors{x}{z}{z}{z}{z}{x}\nn\\ &=\frac{1}{70}-\frac{10}{3}\za{1}+74\za{3}-\frac{3608}{9}\za{1}\za{3}-\frac{90832}{45}\za{3}^2-\frac{100288}{135}\za{3}^3\nn\db&-\frac{207464}{315}\za{5}+\frac{133456}{15}\za{1}\za{5}+\frac{3088}{9}\za{3}\za{5}+\frac{200576}{45}\za{1}\za{3}\za{5}\nn\db&-\frac{60704}{45}\za{3}^2\za{5}-\frac{1943840}{63}\za{5}^2+\frac{242816}{9}\za{1}\za{5}^2+\frac{46112}{9}\za{3}\za{5}^2\nn\db&+\frac{490880}{189}\za{5}^3+\frac{89918}{45}\za{7}-\frac{308392}{9}\za{1}\za{7}+\frac{623128}{9}\za{3}\za{7}\nn\db&-\frac{424928}{9}\za{1}\za{3}\za{7}-\frac{645568}{45}\za{3}^2\za{7}+\frac{89536}{9}\za{5}\za{7}\nn\db&-\frac{98176}{9}\za{3}\za{5}\za{7}-\frac{236432}{9}\za{7}^2+\frac{343616}{9}\za{1}\za{7}^2-\frac{7052}{5}\za{9}\nn\db&+\frac{128928}{5}\za{1}\za{9}-\frac{196304}{3}\za{3}\za{9}+\frac{645568}{15}\za{1}\za{3}\za{9}\nn\db&+\frac{98176}{5}\za{3}^2\za{9}+\frac{135104}{3}\za{5}\za{9}-\frac{196352}{3}\za{1}\za{5}\za{9} \nn\\&=-0.04497471675792834\cdots. \end{align} It will be an interesting problem to study the asymptotic behavior of one-particle Green function in detail. Conformal field theory predict that \begin{align} \green{x}\sim \cos\(k_Fx\)x^{-5/4},\quad k_F=\pi/2, \end{align} omitting the logarithmic correction \cite{KY}. Unfortunately our new exact results are not sufficient to confirm the asymptotic formula. \subsection{Dimer-dimer correlation function} Next let us consider the dimer-dimer correlation function defined as \begin{align} D_n\equiv\dimer{n}{n+1}=3\zcorf{1}{2}{n}{n+1}{z}{z}{z}{z}+6\zcorf{1}{2}{n}{n+1}{x}{x}{z}{z}, \end{align} which should asymptotically coincide with the square of the ground state energy \begin{align} \lim_{n\to\infty}D_n=e_0^2=\(\frac14-\za{1}\)^2=0.1963794236382287\cdots. \end{align} From our results of all the density matrix elements for $n=6$, we can exactly evaluate the dimer-dimer correlation functions $D_n$ for up to $n=5$: \begin{align} D_3-e_0^2&=\frac56\za{3}-\frac43\za{1}\za{3}-\za{3}^2-\frac56\za{5}+\frac{10}3\za{1}\za{5} -\za{1}^2\nn\\&=0.06082478294036410\cdots,\db D_4-e_0^2&=\frac56\za{3}-\frac{14}3\za{1}\za{3}-\frac{43}3\za{3}^2-\frac{15}4\za{5}+\frac{160}3\za{1}\za{5}-5\za{3}\za{5}\nn\\&-\frac{50}3\za{5}^2+\frac{35}{12}\za{7}-\frac{140}3\za{1}\za{7}+35\za{3}\za{7}-\za{1}^2\nn\\&=-0.02773785800119889\cdots,\db D_5-e_0^2&=\frac56\za{3}-10\za{1}\za{3}-\frac{221}3\za{3}^2-\frac{320}9\za{3}^2-\frac{263}{30}\za{5}+\frac{862}3\za{1}\za{5}\nn\db&-\frac{196}5\za{3}\za{5}+\frac{640}3\za{1}\za{3}\za{5}-\frac{904}{15}\za{3}^2\za{5}-1381\za{5}^2\nn\db&+\frac{3616}3\za{1}\za{5}^2+\frac{680}3\za{3}\za{5}^2+\frac{1040}9\za{5}^3+\frac{161}6\za{7}-\frac{3220}3\za{1}\za{7}\nn\db&+3038\za{3}\za{7}-\frac{6328}3\za{1}\za{3}\za{7}-\frac{1904}3\za{3}^2\za{7}+\frac{1204}3\za{5}\za{7}\nn\db&-\frac{1456}3\za{3}\za{5}\za{7}-\frac{3773}3\za{7}^2+\frac{5096}3\za{1}\za{7}^2-\frac{189}{10}\za{9}+798\za{1}\za{9}\nn\db&-\frac{14224}5\za{3}\za{9}+1904\za{1}\za{3}\za{9}+\frac{4368}5\za{3}^2\za{9}+2156\za{5}\za{9}\nn\db&-2912\za{1}\za{5}\za{9}-\za{1}^2 \nn\\&=0.01892813120084483\cdots, \end{align} The difference $D_n-e_0^2$ also should decay algebraicly. \subsection{$AP(n)$: a probability to find an antiferromagnetic string of length $n$} Emptiness Formation Probability $P(n)$ (\ref{efp}) represents the probability to find a ferromagnetic string of length $n$. Similarly we may consider a probability to find an antiferromagnetic string of length $n$, which we denote by $AP(n)$ \begin{align} AP(n)\equiv P^{+-+-\cdots}_{+-+-\cdots}+P^{-+-+\cdots}_{-+-+\cdots}. \end{align} From our results we can exactly evaluate the $AP(n)$ for up to $n=6$ \begin{align} AP(2)&=P^{+-}_{+-}+P^{-+}_{-+}=\frac{1}{3}+\frac{2}{3}\za{1}=0.7954314537066302\cdots,\db AP(3)&=P^{+-+}_{+-+}+P^{-+-}_{-+-}=\frac{1}{6}-\frac{2}{3}\za{1}+\za{3}=0.6061112236630655\cdots,\db AP(4)&=P^{+-+-}_{+-+-}+P^{-+-+}_{-+-+}\nn\\&=\frac{1}{15}-\frac{8}{15}\za{3}+\frac{4}{3}\za{1}\za{3}+\frac{2}{5}\za{3}^2+\frac{2}{3}\za{5}-\frac{4}{3}\za{1}\za{5} \nn\\&=0.4938083479102196\cdots,\db AP(5)&=P^{+-+-+}_{+-+-+}+P^{-+-+-}_{-+-+-}\nn\\&=\frac{1}{30}-\frac{2}{3}\za{1}+\frac{91}{15}\za{3}-\frac{124}{9}\za{1}\za{3}-\frac{134}{5}\za{3}^2-\frac{355}{18}\za{5}+\frac{328}{3}\za{1}\za{5}\nn\\&-\frac{38}{9}\za{3}\za{5}-\frac{250}{9}\za{5}^2+\frac{259}{18}\za{7}-\frac{854}{9}\za{1}\za{7}+\frac{175}{3}\za{3}\za{7}\nn\\& =0.3943245947356898\cdots,\db AP(6)&=P^{+-+-+-}_{+-+-+-}+P^{-+-+-+}_{-+-+-+}\nn\\&=\frac{1}{70}-\frac{1}{5}\za{1}+\frac{44}{45}\za{3}+\frac{36}{5}\za{1}\za{3}+\frac{2566}{45}\za{3}^2\nn\db&+\frac{4072}{135}\za{3}^3+\frac{9754}{1575}\za{5}-\frac{10984}{45}\za{1}\za{5}-\frac{122}{75}\za{3}\za{5}\nn\db&-\frac{8144}{45}\za{1}\za{3}\za{5}+\frac{1368}{25}\za{3}^2\za{5}+\frac{374128}{315}\za{5}^2-\frac{5472}{5}\za{1}\za{5}^2\nn\db&-\frac{1868}{9}\za{3}\za{5}^2-\frac{19868}{189}\za{5}^3-\frac{1936}{45}\za{7}+\frac{50848}{45}\za{1}\za{7}\nn\db&-\frac{39794}{15}\za{3}\za{7}+\frac{9576}{5}\za{1}\za{3}\za{7}+\frac{26152}{45}\za{3}^2\za{7}-\frac{1868}{5}\za{5}\za{7}\nn\db&+\frac{19868}{45}\za{3}\za{5}\za{7}+\frac{47299}{45}\za{7}^2-\frac{69538}{45}\za{1}\za{7}^2+\frac{902}{25}\za{9}\nn\db&-\frac{4464}{5}\za{1}\za{9}+\frac{190052}{75}\za{3}\za{9}-\frac{26152}{15}\za{1}\za{3}\za{9}\nn\db&-\frac{19868}{25}\za{3}^2\za{9}-\frac{27028}{15}\za{5}\za{9}+\frac{39736}{15}\za{1}\za{5}\za{9} \nn\\&=0.3239037769698205\cdots. \end{align} It is known that the EFP $P(n)$ shows a Gaussian decay \cite{KLNS, KMST} \begin{align} P(n)\simeq An^{-\gamma}C^{-n^2}. \end{align} On the other hand, the analytical asymptotic behavior of $AP(n)$ has not been studied. However, Y. Nishiyama noticed that it decays roughly $AP(n)\simeq\[AP(2)\]^{n-1}$ from his numerical data before \cite{Nishiyama}. We compare the formula with our exact data in Table \ref{apn}. \begin{table}[h] \caption{Asymptotic behavior of $AP(n)$} \label{apn} \begin{center} \begin{tabular} {@{\hspace{\tabcolsep}\extracolsep{\fill}}ccc} \hline $n$ & $AP(n)$ Exact & $\[AP(2)\]^{n-1}$ \\ \hline 2 & 0.795431 & 0.795431 \\ 3 & 0.606111 & 0.632711 \\ 4 & 0.493808 & 0.503278 \\ 5 & 0.394325 & 0.400323 \\ 6 & 0.323904 & 0.318430 \\ \hline \end{tabular} \end{center} \end{table} \section{Reduced density matrix and entanglement entropy} \setcounter{equation}{0} At zero temperature, the density matrix for the infinite system can be written as \begin{align} \rho_{\rm T}\equiv|{\rm GS}\ket\bra{\rm GS}|, \end{align} where $|{\rm GS}\ket$ denotes the antiferromagnetic ground state. Let us consider a finite sub-chain of length $n$ in the infinite chain, the rest of which can be regarded as the environment (Figure \ref{rd}). The reduced density matrix for this sub-chain is obtained by tracing out the environment from the infinite chain: \begin{align} \rho_n\equiv{\rm tr}_E\rho_{\rm T}=\[P^{\epDs{1}{n}}_{\eps{1}{n}}\]_{\ep{j},\epD{j}=\pm}. \end{align} \begin{figure} \begin{center} \includegraphics[width=0.7\textwidth]{rd.eps} \caption{Finite sub-chain of length $n$ in the infinite chain} \label{rd} \end{center} \end{figure} From our results, we have computed all the eigenvalues $\omega_{\alpha}$ $(\alpha=1,2,\cdots,2^n)$ of the reduced density matrix $\rho_n$ for up to $n=6$, which are shown in Figure \ref{ev}. \begin{figure} \includegraphics[width=0.9\textwidth]{evEven.eps} \includegraphics[width=0.9\textwidth]{evOdd.eps} \caption{Eigenvalue-distribution of density matrices} \label{ev} \end{figure} We have found that the smallest eigenvalue $\omega_{2^n}$ is, for any $n$, the emptiness formation probability $P(n)$. This is $(n+1)$-fold degenerate since the reduced density matrix has the block-diagonalized form by the magnetization of the sub-chain $M_{z}=\sum_{j=1}^n\epD{j}=\sum_{j=1}^n\ep{j}$, each of which has the non-degenerate smallest eigenvalue $P(n)$. Or in other words, it is a consequence of $SU(2)$ invariance of the Hamiltonian. From these results we can calculate the von Neumann entropy (Entanglement entropy). \begin{align} S(n)\equiv-{\rm tr}\rho_n\log_2\rho_n =-\sum_{\alpha=1}^{2^n}\omega_{\alpha}\log_2\omega_{\alpha}, \end{align} which is considered to measure how the ground state is entangled \cite{VLRK}. The exact numerical values of $S(n)$ up to $n=6$ are shown in Table \ref{von}. \begin{table} \begin{center} \caption{von Neumann entropy $S(n)$ of a finite sub-chain of length $n$} \label{von} \begin{tabular} {@{\hspace{\tabcolsep}\extracolsep{\fill}}cccc} \hline $S$(1)&$S$(2)&$S$(3)&$S$(4)\\ \hline 1&1.3758573262887466&1.5824933209573855&1.7247050949099274\\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular} {@{\hspace{\tabcolsep}\extracolsep{\fill}}cc} \hline $S$(5)&$S$(6)\\ \hline 1.833704916848315&1.922358833819333\\ \hline \end{tabular} \end{center} \end{table} It can be seen that as $n$ grows the von Neumann entropy is increasing in small steps. The asymptotic behavior of this entropy is discussed in \cite{VLRK}. In the massive region $\Delta>1$, the von Neumann entropy will be saturated as $n$ grows, which means the ground state is well approximated by a subsystem of a finite length corresponding to the large eigenvalues of reduced density matrix. On the other hand, in the massless case $-1<\Delta\leq1$, the conformal field theory predict that the von Neumann entropy shows a logarithmic divergence \cite{HLW}, of which the explicit form for the XXX case $\Delta=1$ reads \begin{align} S(n)\simeq\frac13\log_2n+k. \end{align} We estimate the numerical value of the constant term $k=1.06209$ by the extrapolation $S(n)-\frac13\log_2n=c_0+c_1/n+c_2/n^2$. Our exact results for up to $n=6$ agree quite well with the asymptotic formula as shown in Figure \ref{ent}. \begin{figure} \includegraphics[width=0.9\textwidth]{ent.eps} \caption{von Neumann entropy $S(n)$ of a finite sub-chain of length $n$} \label{ent} \end{figure} \section{Summary and discussion} We have succeeded in obtaining the analytical forms of all the density matrix elements on six lattice sites for Heisenberg chain using the algebraic method based on the qKZ equations developed in \cite{BST}. We have obtained several physically interesting correlation functions, such as chiral correlation functions, dimer-dimer correlation functions, etc$\cdots$. Subsequently we have calculated all the eigenvalues of the reduced density matrix $\rho_n$ for up to $n=6$. We observe that the smallest eigenvalue is the emptiness formation probability $P(n)$ and is $(n+1)$-fold degenerate. Of course, it may be more desirable if we could describe the eigenvector corresponding to the largest eigenvalue. From these results we have computed the von Neumann entropy, which shows a good agreement with the asymptotic formula derived via the conformal field theory. \section*{Acknowledgement} The authors are grateful to H. Boos, V.E. Korepin, Y. Nishiyama and K. Sakai for fruitful collaborations of early stage of this work. Especially we thank V.E. Korepin for suggesting us to calculate the entanglement entropy. We also thank M. Bortz and M. Batchelor for valuable discussions. This work is in part supported by Grant-in-Aid for the Scientific Research (B) No. 18340112. from the Ministry of Education, Culture, Sports, Science and Technology, Japan. It is also supported by JSPS and ARC under the Japan-Australian joint project in cooperative science program. \begin{appendix} \section{Known results for up to $n=5$} In this appendix we present all the independent correlation functions for up to $n=5$ except for the two-point correlation functions $\zcor{1}{n}$ given in Introduction. First we give the correlation functions on four lattice sites \cite{SSNT}: \begin{align} \zcorf{1}{2}{3}{4}{x}{x}{z}{z}=& \frac{1}{240}+\frac{1}{12}\za{1}-\frac{91}{180}\za{3}+\frac{2}{9}\za{1}\za{3}+\frac{1}{15}\za{3}^2+\frac{7}{18}\za{5}-\frac{2}{9}\za{1}\za{5}\nn\\ =&0.02750969925180030\cdots,\db \zcorf{1}{2}{3}{4}{x}{z}{x}{z}=& \frac{1}{240}-\frac{1}{6}\za{1}+\frac{77}{90}\za{3}-\frac{5}{9}\za{1}\za{3}-\frac{4}{15}\za{3}^2-\frac{13}{18}\za{5}+\frac{8}{9}\za{1}\za{5}\nn\\ =&-0.007080326815007911\cdots,\db \zcorf{1}{2}{3}{4}{x}{z}{z}{x}=& \frac{1}{240}-\frac{1}{4}\za{1}+\frac{169}{180}\za{3}-\frac{5}{9}\za{1}\za{3}-\frac{4}{15}\za{3}^2-\frac{13}{18}\za{5}+\frac{8}{9}\za{1}\za{5}\nn\\ =&0.01028596458580462\cdots,\db \zcorf{1}{2}{3}{4}{z}{z}{z}{z}=&\zcorf{1}{2}{3}{4}{x}{x}{z}{z} +\zcorf{1}{2}{3}{4}{x}{z}{x}{z}+\zcorf{1}{2}{3}{4}{x}{z}{z}{x}. \end{align} Note that on the antiferromagnetic ground state without magnetic field, the correlation functions with an odd number of spin operators vanish. Considering the isotropy of the Hamiltonian (\ref{Ham}), we can see that the independent correlation functions on four lattice sites are restricted to the above ones and the two-point correlation function $\zcor{1}{4}$. Correspondingly the independent correlation functions on five lattice sites are \cite{BST} \begin{align} \zcorf{1}{2}{3}{5}{x}{x}{z}{z}=& \frac{1}{240}+\frac{1}{12}\za{1}-\frac{517}{360}\za{3}+\frac{25}{9}\za{1}\za{3}+\frac{203}{45}\za{3}^2+\frac{305}{72}\za{5} \nn\\& -\frac{172}{9}\za{1}\za{5}+\frac{4}{9}\za{3}\za{5}+\frac{40}{9}\za{5}^2-\frac{35}{12}\za{7}+\frac{49}{3}\za{1}\za{7} \nn\\& -\frac{28}{3}\za{3}\za{7} = -0.009892435084700294 \cdots, \db \zcorf{1}{2}{3}{5}{x}{z}{x}{z}=& \frac{1}{240}-\frac{1}{4}\za{1}+\frac{301}{120}\za{3}-6\za{1}\za{3}-\frac{1079}{90}\za{3}^2-\frac{65}{8}\za{5} \nn\\& +\frac{146}{3}\za{1}\za{5}-\frac{37}{18}\za{3}\za{5}-\frac{110}{9}\za{5}^2+\frac{35}{6}\za{7}-42\za{1}\za{7} \nn\\& +\frac{77}{3}\za{3}\za{7} = 0.002788973399542967 \cdots, \db \zcorf{1}{2}{3}{5}{x}{z}{z}{x}=& \frac{1}{240}-\frac{5}{12}\za{1}+\frac{569}{180}\za{3}-\frac{61}{9}\za{1}\za{3}-\frac{1109}{90}\za{3}^2-\frac{155}{18}\za{5} \nn\\& +\frac{448}{9}\za{1}\za{5}-\frac{37}{18}\za{3}\za{5}-\frac{110}{9}\za{5}^2+\frac{35}{6}\za{7}-42\za{1}\za{7} \nn\\& +\frac{77}{3}\za{3}\za{7} = -0.005056660894819286 \cdots, \db \zcorf{1}{2}{3}{5}{z}{z}{z}{z}=& \zcorf{1}{2}{3}{5}{x}{x}{z}{z}+\zcorf{1}{2}{3}{5}{x}{z}{x}{z} +\zcorf{1}{2}{3}{5}{x}{z}{z}{x}, \db \zcorf{1}{2}{4}{5}{x}{x}{z}{z}=& \frac{1}{240}+\frac{1}{4}\za{1}-\frac{419}{120}\za{3}+\frac{70}{9}\za{1}\za{3} +\frac{221}{15}\za{3}^2+\frac{889}{72}\za{5} \nn\\& -\frac{184}{3}\za{1}\za{5} +\frac{17}{9}\za{3}\za{5}+\frac{130}{9}\za{5}^2-\frac{329}{36}\za{7} +\frac{476}{9}\za{1}\za{7} \nn\\& -\frac{91}{3}\za{3}\za{7} = 0.01857662093837095 \cdots, \db \zcorf{1}{2}{4}{5}{x}{z}{x}{z}=& \frac{1}{240}-\frac{5}{12}\za{1}+\frac{1883}{360}\za{3}-\frac{112}{9}\za{1}\za{3} -\frac{1102}{45}\za{3}^2-\frac{457}{24}\za{5} \nn\\& +\frac{908}{9}\za{1}\za{5} -\frac{11}{3}\za{3}\za{5}-\frac{220}{9}\za{5}^2+\frac{511}{36}\za{7} -\frac{784}{9}\za{1}\za{7} \nn\\& +\frac{154}{3}\za{3}\za{7} = -0.001089368972914637 \cdots, \db \zcorf{1}{2}{4}{5}{x}{z}{z}{x}=& \frac{1}{240}-\frac{1}{2}\za{1}+\frac{497}{90}\za{3}-\frac{112}{9}\za{1}\za{3} -\frac{1102}{45}\za{3}^2-\frac{77}{4}\za{5} \nn\\& +\frac{908}{9}\za{1}\za{5} -\frac{11}{3}\za{3}\za{5}-\frac{220}{9}\za{5}^2+\frac{511}{36}\za{7} -\frac{784}{9}\za{1}\za{7} \nn\\& +\frac{154}{3}\za{3}\za{7} = 0.001573361370145065 \cdots, \db \zcorf{1}{2}{4}{5}{z}{z}{z}{z}=& \zcorf{1}{2}{4}{5}{x}{x}{z}{z}+\zcorf{1}{2}{4}{5}{x}{z}{x}{z} +\zcorf{1}{2}{4}{5}{x}{z}{z}{x}. \end{align} \section{Explicit form for all the correlation functions on $6$ lattice sites} \setcounter{equation}{0} In this appendix the explicit results for all the correlation functions on $6$ lattice sites are shown. There are $24$ independent correlation functions among them. Any other correlation functions can be computed by the following relations: \begin{align} &\zcorf{1}{3}{4}{6}{z}{z}{z}{z}=\zcorf{1}{3}{4}{6}{x}{x}{z}{z}+\zcorf{1}{3}{4}{6}{x}{z}{x}{z} +\zcorf{1}{3}{4}{6}{x}{z}{z}{x}, \\[5pt]& \zcorf{1}{2}{5}{6}{z}{z}{z}{z}=\zcorf{1}{2}{5}{6}{x}{x}{z}{z}+\zcorf{1}{2}{5}{6}{x}{z}{x}{z} +\zcorf{1}{2}{5}{6}{x}{z}{z}{x},\\[5pt]& \zcorf{1}{4}{5}{6}{z}{z}{z}{z}=\zcorf{1}{4}{5}{6}{x}{x}{z}{z}+\zcorf{1}{4}{5}{6}{x}{z}{x}{z} +\zcorf{1}{4}{5}{6}{x}{z}{z}{x},\\[5pt]& \zcorf{1}{3}{5}{6}{z}{z}{z}{z}=\zcorf{1}{3}{5}{6}{x}{x}{z}{z}+\zcorf{1}{3}{5}{6}{x}{z}{x}{z} +\zcorf{1}{3}{5}{6}{x}{z}{z}{x}, \end{align} \begin{align} &\zcors{x}{x}{y}{y}{z}{z}=\zcors{x}{x}{x}{x}{y}{y} -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{x}{y}{z}{z}{y},\\[5pt]& \zcors{x}{y}{x}{z}{y}{z}=\zcors{x}{x}{x}{y}{x}{y} -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{y}{x}{z}{z}{y},\\[5pt]& \zcors{x}{y}{y}{z}{z}{x}=\zcors{x}{x}{x}{y}{y}{x} -\zcors{x}{x}{y}{z}{z}{y}-\zcors{x}{y}{x}{z}{z}{y},\\[5pt]& \zcors{x}{y}{z}{x}{y}{z}=\zcors{x}{x}{y}{x}{x}{y} -\zcors{x}{x}{y}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y},\\[5pt]& \zcors{x}{y}{z}{y}{z}{x}=\zcors{x}{x}{y}{x}{y}{x} -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{y}{z}{x}{z}{y},\\[5pt]& \zcors{x}{y}{z}{z}{x}{y}=\zcors{x}{y}{x}{x}{x}{y} -\zcors{x}{y}{x}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y}, \end{align} \begin{align} &\zcors{x}{x}{y}{y}{x}{x}=\zcors{x}{y}{z}{z}{y}{x} +\zcors{x}{x}{x}{x}{y}{y}+\zcors{x}{y}{x}{x}{x}{y}\nn\\& -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{x}{y}{z}{z}{y} -\zcors{x}{y}{x}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y},\\[5pt]& \zcors{x}{y}{x}{x}{y}{x}=\zcors{x}{y}{z}{z}{y}{x} +\zcors{x}{x}{x}{y}{x}{y}+\zcors{x}{x}{y}{x}{x}{y}\nn\\& -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{x}{y}{z}{z}{y} -\zcors{x}{y}{x}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y},\\[5pt]& \zcors{x}{y}{y}{y}{y}{x}=\zcors{x}{y}{z}{z}{y}{x} +\zcors{x}{x}{x}{y}{y}{x}+\zcors{x}{x}{y}{x}{y}{x}\nn\\& -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{x}{y}{z}{z}{y} -\zcors{x}{y}{x}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y}, \end{align} \begin{align} &\zcors{x}{x}{x}{x}{x}{x}=\zcors{x}{y}{z}{z}{y}{x} +\zcors{x}{x}{x}{x}{y}{y}+\zcors{x}{x}{x}{y}{x}{y}\nn\\& +\zcors{x}{x}{x}{y}{y}{x}+\zcors{x}{x}{y}{x}{x}{y} +\zcors{x}{x}{y}{x}{y}{x}+\zcors{x}{y}{x}{x}{x}{y}\nn\\& -\zcors{x}{x}{y}{z}{y}{z}-\zcors{x}{x}{y}{z}{z}{y} -\zcors{x}{y}{x}{z}{z}{y}-\zcors{x}{y}{z}{x}{z}{y}. \end{align} Below we shall give the explicit results for the independent ones. \begin{align} \zcor{1}{6}&= \frac{1}{12}-\frac{25}{3}\za{1}+\frac{800}{9}\za{3}-\frac{1192}{3}\za{1}\za{3}-\frac{15368}{9}\za{3}^2\nn\\&-608\za{3}^3-\frac{4228}{9}\za{5}+\frac{64256}{9}\za{1}\za{5}-\frac{976}{9}\za{3}\za{5}\nn\\&+3648\za{1}\za{3}\za{5}-\frac{3328}{3}\za{3}^2\za{5}-\frac{76640}{3}\za{5}^2 \nn\\&+\frac{66560}{3}\za{1}\za{5}^2+\frac{12640}{3}\za{3}\za{5}^2+\frac{6400}{3}\za{5}^3+\frac{9674}{9}\za{7}\nn\\&+56952\za{3}\za{7}-\frac{225848}{9}\za{1}\za{7}-\frac{116480}{3}\za{1}\za{3}\za{7}\nn\\&-\frac{35392}{3}\za{3}^2\za{7}+7840\za{5}\za{7}-8960\za{3}\za{5}\za{7}\nn\\&-\frac{66640}{3}\za{7}^2+31360\za{1}\za{7}^2-686\za{9}\nn\\&+18368\za{1}\za{9}-53312\za{3}\za{9}+35392\za{1}\za{3}\za{9}\nn\\&+16128\za{3}^2\za{9}+38080\za{5}\za{9}-53760\za{1}\za{5}\za{9}\nn\\& =-0.03089036664760932\cdots \end{align} \begin{align} \zcorf{1}{3}{4}{6}{x}{x}{z}{z}&= \frac{1}{240}+\frac{1}{10}\za{1}-\frac{259}{90}\za{3}+\frac{463}{45}\za{1}\za{3}+\frac{1951}{45}\za{3}^2\nn\\&+\frac{2404}{135}\za{3}^3+\frac{1348}{75}\za{5}-\frac{8918}{45}\za{1}\za{5}-\frac{3127}{225}\za{3}\za{5}\nn\\&-\frac{4808}{45}\za{1}\za{3}\za{5}+\frac{7804}{225}\za{3}^2\za{5}+\frac{33598}{45}\za{5}^2\nn\\&-\frac{31216}{45}\za{1}\za{5}^2-\frac{1196}{9}\za{3}\za{5}^2-\frac{1808}{27}\za{5}^3-\frac{413}{9}\za{7}\nn\\&+\frac{36421}{45}\za{1}\za{7}-\frac{8393}{5}\za{3}\za{7}+\frac{54628}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{16744}{45}\za{3}^2\za{7}-\frac{10864}{45}\za{5}\za{7}+\frac{12656}{45}\za{3}\za{5}\za{7}\nn\\&+\frac{29008}{45}\za{7}^2-\frac{44296}{45}\za{1}\za{7}^2+\frac{1533}{50}\za{9}\nn\\&-\frac{3108}{5}\za{1}\za{9}+\frac{120344}{75}\za{3}\za{9}-\frac{16744}{15}\za{1}\za{3}\za{9}\nn\\&-\frac{12656}{25}\za{3}^2\za{9}-\frac{16576}{15}\za{5}\za{9}+\frac{25312}{15}\za{1}\za{5}\za{9}\nn\\& =0.003681507672875026\cdots \end{align} \begin{align} \zcorf{1}{3}{4}{6}{x}{z}{x}{z}&= \frac{1}{240}-\frac{19}{60}\za{1}+\frac{1007}{180}\za{3}-\frac{1252}{45}\za{1}\za{3}-\frac{1978}{15}\za{3}^2\nn\\&-\frac{6256}{135}\za{3}^3-\frac{979}{25}\za{5}+\frac{25322}{45}\za{1}\za{5}+\frac{538}{225}\za{3}\za{5}\nn\\&+\frac{12512}{45}\za{1}\za{3}\za{5}-\frac{6292}{75}\za{3}^2\za{5}-\frac{85687}{45}\za{5}^2\nn\\&+\frac{25168}{15}\za{1}\za{5}^2+\frac{2864}{9}\za{3}\za{5}^2+\frac{4352}{27}\za{5}^3+\frac{1771}{18}\za{7}\nn\\&-\frac{89929}{45}\za{1}\za{7}+\frac{191513}{45}\za{3}\za{7}-\frac{44044}{15}\za{1}\za{3}\za{7}\nn\\&-\frac{40096}{45}\za{3}^2\za{7}+\frac{26866}{45}\za{5}\za{7}-\frac{30464}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{73402}{45}\za{7}^2+\frac{106624}{45}\za{1}\za{7}^2-\frac{3227}{50}\za{9}\nn\\&+\frac{7322}{5}\za{1}\za{9}-\frac{298886}{75}\za{3}\za{9}+\frac{40096}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{30464}{25}\za{3}^2\za{9}+\frac{41944}{15}\za{5}\za{9}-\frac{60928}{15}\za{1}\za{5}\za{9}\nn\\& =-0.001116347734065082\cdots \end{align} \begin{align} \zcorf{1}{3}{4}{6}{x}{z}{z}{x}&= \frac{1}{240}-\frac{13}{20}\za{1}+\frac{479}{60}\za{3}-\frac{1537}{45}\za{1}\za{3}-\frac{6709}{45}\za{3}^2\nn\\&-\frac{2452}{45}\za{3}^3-\frac{10201}{225}\za{5}+\frac{9524}{15}\za{1}\za{5}+\frac{37}{25}\za{3}\za{5}\nn\\&+\frac{4904}{15}\za{1}\za{3}\za{5}-\frac{22616}{225}\za{3}^2\za{5}-\frac{102592}{45}\za{5}^2\nn\\&+\frac{90464}{45}\za{1}\za{5}^2+\frac{1148}{3}\za{3}\za{5}^2+\frac{1744}{9}\za{5}^3+\frac{973}{9}\za{7}\nn\\&-\frac{103544}{45}\za{1}\za{7}+\frac{229138}{45}\za{3}\za{7}-\frac{158312}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{16072}{15}\za{3}^2\za{7}+\frac{31906}{45}\za{5}\za{7}-\frac{12208}{15}\za{3}\za{5}\za{7}\nn\\&-\frac{9898}{5}\za{7}^2+\frac{42728}{15}\za{1}\za{7}^2-\frac{3507}{50}\za{9}\nn\\&+\frac{8512}{5}\za{1}\za{9}-\frac{119742}{25}\za{3}\za{9}+\frac{16072}{5}\za{1}\za{3}\za{9}\nn\\&+\frac{36624}{25}\za{3}^2\za{9}+\frac{16968}{5}\za{5}\za{9}-\frac{24416}{5}\za{1}\za{5}\za{9}\nn\\& =0.003069653070471227\cdots \end{align} \begin{align} \zcorf{1}{2}{5}{6}{x}{x}{z}{z}&= \frac{1}{240}+\frac{29}{60}\za{1}-\frac{568}{45}\za{3}+\frac{1024}{15}\za{1}\za{3}+\frac{15863}{45}\za{3}^2\nn\\&+\frac{5872}{45}\za{3}^3+\frac{25486}{225}\za{5}-\frac{69922}{45}\za{1}\za{5}-\frac{13598}{225}\za{3}\za{5}\nn\\&-\frac{11744}{15}\za{1}\za{3}\za{5}+\frac{53816}{225}\za{3}^2\za{5}+\frac{246547}{45}\za{5}^2\nn\\&-\frac{215264}{45}\za{1}\za{5}^2-\frac{2728}{3}\za{3}\za{5}^2-\frac{4144}{9}\za{5}^3-\frac{61327}{180}\za{7}\nn\\&+\frac{271264}{45}\za{1}\za{7}-\frac{61474}{5}\za{3}\za{7}+\frac{376712}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{38192}{15}\za{3}^2\za{7}-\frac{79436}{45}\za{5}\za{7}+\frac{29008}{15}\za{3}\za{5}\za{7}\nn\\&+\frac{23373}{5}\za{7}^2-\frac{101528}{15}\za{1}\za{7}^2+\frac{11977}{50}\za{9}\nn\\&-\frac{22722}{5}\za{1}\za{9}+\frac{290752}{25}\za{3}\za{9}-\frac{38192}{5}\za{1}\za{3}\za{9}\nn\\&-\frac{87024}{25}\za{3}^2\za{9}-\frac{40068}{5}\za{5}\za{9}+\frac{58016}{5}\za{1}\za{5}\za{9}\nn\\& =0.02384723373803033\cdots \end{align} \begin{align} \zcorf{1}{2}{5}{6}{x}{z}{x}{z}&= \frac{1}{240}-\frac{23}{30}\za{1}+\frac{563}{30}za{3}-\frac{4678}{45}\za{1}\za{3}-\frac{8114}{15}\za{3}^2\nn\\&-\frac{27224}{135}\za{3}^3-\frac{38389}{225}\za{5}+\frac{35666}{15}\za{1}\za{5}+\frac{18902}{225}\za{3}\za{5}\nn\\&+\frac{54448}{45}\za{1}\za{3}\za{5}-\frac{82984}{225}\za{3}^2\za{5}-\frac{42242}{5}\za{5}^2\nn\\&+\frac{331936}{45}\za{1}\za{5}^2+\frac{12616}{9}\za{3}\za{5}^2+\frac{19168}{27}\za{5}^3+\frac{23177}{45}\za{7}\nn\\&-\frac{414911}{45}\za{1}\za{7}+\frac{284228}{15}\za{3}\za{7}-\frac{580888}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{176624}{45}\za{3}^2\za{7}+\frac{122164}{45}\za{5}\za{7}-\frac{134176}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{324968}{45}\za{7}^2+\frac{469616}{45}\za{1}\za{7}^2-\frac{18123}{50}\za{9}\nn\\&+\frac{34748}{5}\za{1}\za{9}-\frac{1343944}{75}\za{3}\za{9}+\frac{176624}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{134176}{25}\za{3}^2\za{9}+\frac{185696}{15}\za{5}\za{9}-\frac{268352}{15}\za{1}\za{5}\za{9}\nn\\& =-0.001365096861940014\cdots \end{align} \begin{align} \zcorf{1}{2}{5}{6}{x}{z}{z}{x}&= \frac{1}{240}-\frac{17}{20}\za{1}+\frac{872}{45}\za{3}-\frac{4688}{45}\za{1}\za{3}-\frac{24352}{45}\za{3}^2\nn\\&-\frac{27224}{135}\za{3}^3-\frac{77453}{450}\za{5}+\frac{107078}{45}\za{1}\za{5}+\frac{18952}{225}\za{3}\za{5}\nn\\&+\frac{54448}{45}\za{1}\za{3}\za{5}-\frac{82984}{225}\za{3}^2\za{5}-\frac{42242}{5}\za{5}^2\nn\\&+\frac{331936}{45}\za{1}\za{5}^2+\frac{12616}{9}\za{3}\za{5}^2+\frac{19168}{27}\za{5}^3+\frac{30961}{60}\za{7}\nn\\&-\frac{46109}{5}\za{1}\za{7}+\frac{284228}{15}\za{3}\za{7}-\frac{580888}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{176624}{45}\za{3}^2\za{7}+\frac{122164}{45}\za{5}\za{7}-\frac{134176}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{324968}{45}\za{7}^2+\frac{469616}{45}\za{1}\za{7}^2-\frac{18123}{50}\za{9}\nn\\&+\frac{34748}{5}\za{1}\za{9}-\frac{1343944}{75}\za{3}\za{9}+\frac{176624}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{134176}{25}\za{3}^2\za{9}+\frac{185696}{15}\za{5}\za{9}-\frac{268352}{15}\za{1}\za{5}\za{9}\nn\\& =0.001592580594206881\cdots \end{align} \begin{align} \zcorf{1}{4}{5}{6}{x}{x}{z}{z}&= \frac{1}{240}+\frac{1}{20}\za{1}-\frac{1093}{360}\za{3}+\frac{659}{45}\za{1}\za{3}+\frac{5621}{90}\za{3}^2\nn\\&+\frac{2704}{135}\za{3}^3+\frac{38411}{1800}\za{5}-\frac{2452}{9}\za{1}\za{5}-\frac{3857}{450}\za{3}\za{5}\nn\\&-\frac{5408}{45}\za{1}\za{3}\za{5}+\frac{8272}{225}\za{3}^2\za{5}+\frac{39289}{45}\za{5}^2\nn\\&-\frac{33088}{45}\za{1}\za{5}^2-\frac{1256}{9}\za{3}\za{5}^2-\frac{1904}{27}\za{5}^3-\frac{1589}{30}\za{7}\nn\\&+\frac{14581}{15}\za{1}\za{7}-\frac{17633}{9}\za{3}\za{7}+\frac{57904}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{17584}{45}\za{3}^2\za{7}-\frac{1393}{5}\za{5}\za{7}+\frac{13328}{45}\za{3}\za{5}\za{7}\nn\\&+\frac{33124}{45}\za{7}^2-\frac{46648}{45}\za{1}\za{7}^2+\frac{1729}{50}\za{9}\nn\\&-714\za{1}\za{9}+\frac{137732}{75}\za{3}\za{9}-\frac{17584}{15}\za{1}\za{3}\za{9}\nn\\&-\frac{13328}{25}\za{3}^2\za{9}-\frac{18928}{15}\za{5}\za{9}+\frac{26656}{15}\za{1}\za{5}\za{9}\nn\\& =0.009188091173609528\cdots \end{align} \begin{align} \zcorf{1}{4}{5}{6}{x}{z}{x}{z}&= \frac{1}{240}-\frac{11}{30}\za{1}+\frac{254}{45}\za{3}-\frac{144}{5}\za{1}\za{3}-\frac{6517}{45}\za{3}^2\nn\\&-\frac{7556}{135}\za{3}^3-\frac{18091}{450}\za{5}+\frac{5578}{9}\za{1}\za{5}+\frac{253}{75}\za{3}\za{5}\nn\\&+\frac{15112}{45}\za{1}\za{3}\za{5}-\frac{22928}{225}\za{3}^2\za{5}-\frac{105686}{45}\za{5}^2\nn\\&+\frac{91712}{45}\za{1}\za{5}^2+\frac{3484}{9}\za{3}\za{5}^2+\frac{5296}{27}\za{5}^3+\frac{9793}{90}\za{7}\nn\\&-\frac{106547}{45}\za{1}\za{7}+\frac{47236}{9}\za{3}\za{7}-\frac{160496}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{48776}{45}\za{3}^2\za{7}+\frac{3682}{5}\za{5}\za{7}-\frac{37072}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{91826}{45}\za{7}^2+\frac{129752}{45}\za{1}\za{7}^2-\frac{1848}{25}\za{9}\nn\\&+1778\za{1}\za{9}-\frac{371518}{75}\za{3}\za{9}+\frac{48776}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{37072}{25}\za{3}^2\za{9}+\frac{52472}{15}\za{5}\za{9}-\frac{74144}{15}\za{1}\za{5}\za{9}\nn\\& =-0.003158274321296133\cdots \end{align} \begin{align} \zcorf{1}{4}{5}{6}{x}{z}{z}{x}&= \frac{1}{240}-\frac{37}{60}\za{1}+\frac{2887}{360}\za{3}-\frac{1651}{45}\za{1}\za{3}-\frac{4813}{30}\za{3}^2\nn\\&-\frac{7556}{135}\za{3}^3-\frac{28313}{600}\za{5}+\frac{6124}{9}\za{1}\za{5}-\frac{7}{450}\za{3}\za{5}\nn\\&+\frac{15112}{45}\za{1}\za{3}\za{5}-\frac{22928}{225}\za{3}^2\za{5}-\frac{106436}{45}\za{5}^2\nn\\&+\frac{91712}{45}\za{1}\za{5}^2+\frac{3484}{9}\za{3}\za{5}^2+\frac{5296}{27}\za{5}^3+\frac{20461}{180}\za{7}\nn\\&-\frac{108892}{45}\za{1}\za{7}+\frac{47551}{9}\za{3}\za{7}-\frac{160496}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{48776}{45}\za{3}^2\za{7}+\frac{3682}{5}\za{5}\za{7}-\frac{37072}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{91826}{45}\za{7}^2+\frac{129752}{45}\za{1}\za{7}^2-\frac{1848}{25}\za{9}\nn\\&+1778\za{1}\za{9}-\frac{371518}{75}\za{3}\za{9}+\frac{48776}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{37072}{25}\za{3}^2\za{9}+\frac{52472}{15}\za{5}\za{9}-\frac{74144}{15}\za{1}\za{5}\za{9}\nn\\& =0.005680615367538651\cdots \end{align} \begin{align} \zcorf{1}{3}{5}{6}{x}{x}{z}{z}&= \frac{1}{240}+\frac{17}{60}\za{1}-\frac{2617}{360}\za{3}+\frac{1546}{45}\za{1}\za{3}+\frac{2935}{18}\za{3}^2\nn\\&+\frac{8008}{135}\za{3}^3+\frac{10811}{200}\za{5}-\frac{6430}{9}\za{1}\za{5}-\frac{3481}{150}\za{3}\za{5}\nn\\&-\frac{16016}{45}\za{1}\za{3}\za{5}+\frac{8216}{75}\za{3}^2\za{5}+\frac{111916}{45}\za{5}^2\nn\\&-\frac{32864}{15}\za{1}\za{5}^2-\frac{3752}{9}\za{3}\za{5}^2-\frac{5696}{27}\za{5}^3-\frac{13097}{90}\za{7}\nn\\&+\frac{121282}{45}\za{1}\za{7}-\frac{16730}{3}\za{3}\za{7}+\frac{57512}{15}\za{1}\za{3}\za{7}\nn\\&+\frac{52528}{45}\za{3}^2\za{7}-\frac{11921}{15}\za{5}\za{7}+\frac{39872}{45}\za{3}\za{5}\za{7}\nn\\&+\frac{95746}{45}\za{7}^2-\frac{139552}{45}\za{1}\za{7}^2+\frac{4921}{50}\za{9}\nn\\&-2016\za{1}\za{9}+\frac{394898}{75}\za{3}\za{9}-\frac{52528}{15}\za{1}\za{3}\za{9}\nn\\&-\frac{39872}{25}\za{3}^2\za{9}-\frac{54712}{15}\za{5}\za{9}+\frac{79744}{15}\za{1}\za{5}\za{9}\nn\\& =-0.008335007472438759\cdots \end{align} \begin{align} \zcorf{1}{3}{5}{6}{x}{z}{x}{z}&= \frac{1}{240}-\frac{11}{20}\za{1}+\frac{1979}{180}\za{3}-\frac{833}{15}\za{1}\za{3}-\frac{4919}{18}\za{3}^2\nn\\&-\frac{13612}{135}\za{3}^3-\frac{75923}{900}\za{5}+\frac{10660}{9}\za{1}\za{5}+\frac{10547}{450}\za{3}\za{5}\nn\\&+\frac{27224}{45}\za{1}\za{3}\za{5}-\frac{41492}{225}\za{3}^2\za{5}-\frac{189064}{45}\za{5}^2\nn\\&+\frac{165968}{45}\za{1}\za{5}^2+\frac{6308}{9}\za{3}\za{5}^2+\frac{9584}{27}\za{5}^3+\frac{41321}{180}\za{7}\nn\\&-\frac{200018}{45}\za{1}\za{7}+\frac{28217}{3}\za{3}\za{7}-\frac{290444}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{88312}{45}\za{3}^2\za{7}+\frac{59927}{45}\za{5}\za{7}-\frac{67088}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{162484}{45}\za{7}^2+\frac{234808}{45}\za{1}\za{7}^2-\frac{3892}{25}\za{9}\nn\\&+3318\za{1}\za{9}-\frac{665042}{75}\za{3}\za{9}+\frac{88312}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{67088}{25}\za{3}^2\za{9}+\frac{92848}{15}\za{5}\za{9}-\frac{134176}{15}\za{1}\za{5}\za{9}\nn\\& =0.0008303788046606665\cdots \end{align} \begin{align} \zcorf{1}{3}{5}{6}{x}{z}{z}{x}&= \frac{1}{240}-\frac{43}{60}\za{1}+\frac{487}{40}\za{3}-\frac{2599}{45}\za{1}\za{3}-\frac{2491}{9}\za{3}^2\nn\\&-\frac{13612}{135}\za{3}^3-\frac{156821}{1800}\za{5}+\frac{3596}{3}\za{1}\za{5}+\frac{5161}{225}\za{3}\za{5}\nn\\&+\frac{27224}{45}\za{1}\za{3}\za{5}-\frac{41492}{225}\za{3}^2\za{5}-\frac{189214}{45}\za{5}^2\nn\\&+\frac{165968}{45}\za{1}\za{5}^2+\frac{6308}{9}\za{3}\za{5}^2+\frac{9584}{27}\za{5}^3+\frac{10409}{45}\za{7}\nn\\&-\frac{200543}{45}\za{1}\za{7}+\frac{28238}{3}\za{3}\za{7}-\frac{290444}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{88312}{45}\za{3}^2\za{7}+\frac{59927}{45}\za{5}\za{7}-\frac{67088}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{162484}{45}\za{7}^2+\frac{234808}{45}\za{1}\za{7}^2-\frac{3892}{25}\za{9}\nn\\&+3318\za{1}\za{9}-\frac{665042}{75}\za{3}\za{9}+\frac{88312}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{67088}{25}\za{3}^2\za{9}+\frac{92848}{15}\za{5}\za{9}-\frac{134176}{15}\za{1}\za{5}\za{9}\nn\\& =-0.001340720075108033\cdots \end{align} \begin{align} \zcors{x}{x}{x}{x}{y}{y}&= \frac{1}{2240}+\frac{11}{240}\za{1}-\frac{23}{16}\za{3}+\frac{382}{45}\za{1}\za{3}+\frac{7961}{180}\za{3}^2\nn\\&+\frac{148}{9}\za{3}^3+\frac{90583}{6300}\za{5}-\frac{5887}{30}\za{1}\za{5}-\frac{4061}{450}\za{3}\za{5}\nn\\&-\frac{296}{3}\za{1}\za{3}\za{5}+\frac{6716}{225}\za{3}^2\za{5}+\frac{430573}{630}\za{5}^2\nn\\&-\frac{26864}{45}\za{1}\za{5}^2-\frac{340}{3}\za{3}\za{5}^2-\frac{402}{7}\za{5}^3-\frac{32909}{720}\za{7}\nn\\&+\frac{69223}{90}\za{1}\za{7}-\frac{138161}{90}\za{3}\za{7}+\frac{47012}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{952}{3}\za{3}^2\za{7}-\frac{39919}{180}\za{5}\za{7}+\frac{1206}{5}\za{3}\za{5}\za{7}\nn\\&+\frac{34811}{60}\za{7}^2-\frac{4221}{5}\za{1}\za{7}^2+\frac{6543}{200}\za{9}\nn\\&-\frac{2908}{5}\za{1}\za{9}+\frac{36322}{25}\za{3}\za{9}-952\za{1}\za{3}\za{9}\nn\\&-\frac{10854}{25}\za{3}^2\za{9}-\frac{4973}{5}\za{5}\za{9}+\frac{7236}{5}\za{1}\za{5}\za{9}\nn\\& =-0.005996024922536831\cdots \end{align} \begin{align} \zcors{x}{x}{x}{y}{x}{y}&= \frac{1}{2240}-\frac{1}{240}\za{1}-\frac{643}{1440}\za{3}+\frac{179}{60}\za{1}\za{3}+\frac{1333}{90}\za{3}^2\nn\\&+\frac{223}{45}\za{3}^3+\frac{301507}{50400}\za{5}-\frac{6139}{90}\za{1}\za{5}-\frac{5221}{900}\za{3}\za{5}\nn\\&-\frac{446}{15}\za{1}\za{3}\za{5}+\frac{2018}{225}\za{3}^2\za{5}+\frac{251033}{1260}\za{5}^2\nn\\&-\frac{8072}{45}\za{1}\za{5}^2-34\za{3}\za{5}^2-\frac{1084}{63}\za{5}^3-\frac{14513}{720}\za{7}\nn\\&+\frac{11753}{45}\za{1}\za{7}-\frac{162149}{360}\za{3}\za{7}+\frac{14126}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{476}{5}\za{3}^2\za{7}-\frac{24209}{360}\za{5}\za{7}+\frac{1084}{15}\za{3}\za{5}\za{7}\nn\\&+\frac{3241}{20}\za{7}^2-\frac{3794}{15}\za{1}\za{7}^2+\frac{731}{50}\za{9}\nn\\&-\frac{1959}{10}\za{1}\za{9}+\frac{21257}{50}\za{3}\za{9}-\frac{1428}{5}\za{1}\za{3}\za{9}\nn\\&-\frac{3252}{25}\za{3}^2\za{9}-\frac{1389}{5}\za{5}\za{9}+\frac{2168}{5}\za{1}\za{5}\za{9}\nn\\& =0.001715129839332883\cdots \end{align} \begin{align} \zcors{x}{x}{x}{y}{y}{x}&= \frac{1}{2240}-\frac{7}{240}\za{1}-\frac{239}{1440}\za{3}+\frac{86}{45}\za{1}\za{3}+\frac{1123}{90}\za{3}^2\nn\\&+\frac{223}{45}\za{3}^3+\frac{82489}{16800}\za{5}-\frac{5309}{90}\za{1}\za{5}-\frac{2843}{450}\za{3}\za{5}\nn\\&-\frac{446}{15}\za{1}\za{3}\za{5}+\frac{2018}{225}\za{3}^2\za{5}+\frac{247883}{1260}\za{5}^2\nn\\&-\frac{8072}{45}\za{1}\za{5}^2-34\za{3}\za{5}^2-\frac{1084}{63}\za{5}^3-\frac{2785}{144}\za{7}\nn\\&+\frac{45563}{180}\za{1}\za{7}-\frac{160259}{360}\za{3}\za{7}+\frac{14126}{45}\za{1}\za{3}\za{7}\nn\\&+\frac{476}{5}\za{3}^2\za{7}-\frac{24209}{360}\za{5}\za{7}+\frac{1084}{15}\za{3}\za{5}\za{7}\nn\\&+\frac{3241}{20}\za{7}^2-\frac{3794}{15}\za{1}\za{7}^2+\frac{731}{50}\za{9}\nn\\&-\frac{1959}{10}\za{1}\za{9}+\frac{21257}{50}\za{3}\za{9}-\frac{1428}{5}\za{1}\za{3}\za{9}\nn\\&-\frac{3252}{25}\za{3}^2\za{9}-\frac{1389}{5}\za{5}\za{9}+\frac{2168}{5}\za{1}\za{5}\za{9}\nn\\& =-0.002589603677721155\cdots \end{align} \begin{align} \zcors{x}{x}{y}{x}{x}{y}&= \frac{1}{2240}-\frac{11}{240}\za{1}+\frac{1537}{1440}\za{3}-\frac{421}{60}\za{1}\za{3}-\frac{317}{8}\za{3}^2\nn\\&-\frac{79}{5}\za{3}^3-\frac{623431}{50400}\za{5}+\frac{7949}{45}\za{1}\za{5}+\frac{15851}{1800}\za{3}\za{5}\nn\\&+\frac{474}{5}\za{1}\za{3}\za{5}-\frac{6404}{225}\za{3}^2\za{5}-\frac{808669}{1260}\za{5}^2\nn\\&+\frac{25616}{45}\za{1}\za{5}^2+108\za{3}\za{5}^2+\frac{3452}{63}\za{5}^3+\frac{15553}{360}\za{7}\nn\\&-\frac{64771}{90}\za{1}\za{7}+\frac{172993}{120}\za{3}\za{7}-\frac{44828}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{1512}{5}\za{3}^2\za{7}+\frac{75247}{360}\za{5}\za{7}-\frac{3452}{15}\za{3}\za{5}\za{7}\nn\\&-\frac{32809}{60}\za{7}^2+\frac{12082}{15}\za{1}\za{7}^2-\frac{1593}{50}\za{9}\nn\\&+\frac{5503}{10}\za{1}\za{9}-\frac{68441}{50}\za{3}\za{9}+\frac{4536}{5}\za{1}\za{3}\za{9}\nn\\&+\frac{10356}{25}\za{3}^2\za{9}+\frac{4687}{5}\za{5}\za{9}-\frac{6904}{5}\za{1}\za{5}\za{9}\nn\\& =-0.002089292530622660\cdots \end{align} \begin{align} \zcors{x}{y}{x}{x}{x}{y}&= \frac{1}{2240}-\frac{19}{240}\za{1}+\frac{95}{48}\za{3}-\frac{2087}{180}\za{1}\za{3}-\frac{22057}{360}\za{3}^2\nn\\&-\frac{3134}{135}\za{3}^3-\frac{12221}{630}\za{5}+\frac{2707}{10}\za{1}\za{5}+\frac{3971}{360}\za{3}\za{5}\nn\\&+\frac{6268}{45}\za{1}\za{3}\za{5}-\frac{1897}{45}\za{3}^2\za{5}-\frac{121259}{126}\za{5}^2\nn\\&+\frac{7588}{9}\za{1}\za{5}^2+\frac{1441}{9}\za{3}\za{5}^2+\frac{15340}{189}\za{5}^3+\frac{44329}{720}\za{7}\nn\\&-\frac{191611}{180}\za{1}\za{7}+\frac{194381}{90}\za{3}\za{7}-\frac{13279}{9}\za{1}\za{3}\za{7}\nn\\&-\frac{20174}{45}\za{3}^2\za{7}+\frac{2798}{9}\za{5}\za{7}-\frac{3068}{9}\za{3}\za{5}\za{7}\nn\\&-\frac{14777}{18}\za{7}^2+\frac{10738}{9}\za{1}\za{7}^2-\frac{1763}{40}\za{9}\nn\\&+\frac{4029}{5}\za{1}\za{9}-\frac{12269}{6}\za{3}\za{9}+\frac{20174}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{3068}{5}\za{3}^2\za{9}+\frac{4222}{3}\za{5}\za{9}-\frac{6136}{3}\za{1}\za{5}\za{9}\nn\\& =0.0008959339234083256\cdots \end{align} \begin{align} \zcors{x}{x}{y}{x}{y}{x}&= \frac{1}{2240}-\frac{13}{240}\za{1}+\frac{619}{480}\za{3}-\frac{229}{30}\za{1}\za{3}-\frac{14687}{360}\za{3}^2\nn\\&-\frac{79}{5}\za{3}^3-\frac{74279}{5600}\za{5}+\frac{2723}{15}\za{1}\za{5}+\frac{5197}{600}\za{3}\za{5}\nn\\&+\frac{474}{5}\za{1}\za{3}\za{5}-\frac{6404}{225}\za{3}^2\za{5}-\frac{810139}{1260}\za{5}^2\nn\\&+\frac{25616}{45}\za{1}\za{5}^2+108\za{3}\za{5}^2+\frac{3452}{63}\za{5}^3+\frac{2633}{60}\za{7}\nn\\&-\frac{8687}{12}\za{1}\za{7}+\frac{173287}{120}\za{3}\za{7}-\frac{44828}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{1512}{5}\za{3}^2\za{7}+\frac{75247}{360}\za{5}\za{7}-\frac{3452}{15}\za{3}\za{5}\za{7}\nn\\&-\frac{32809}{60}\za{7}^2+\frac{12082}{15}\za{1}\za{7}^2-\frac{1593}{50}\za{9}\nn\\&+\frac{5503}{10}\za{1}\za{9}-\frac{68441}{50}\za{3}\za{9}+\frac{4536}{5}\za{1}\za{3}\za{9}\nn\\&+\frac{10356}{25}\za{3}^2\za{9}+\frac{4687}{5}\za{5}\za{9}-\frac{6904}{5}\za{1}\za{5}\za{9}\nn\\& =0.001508610170462532\cdots \end{align} \begin{align} \zcors{x}{y}{x}{z}{z}{y}&= \frac{1}{6720}-\frac{1}{80}\za{1}+\frac{25}{288}\za{3}-\frac{16}{45}\za{1}\za{3}-\frac{121}{90}\za{3}^2\nn\\&-\frac{73}{135}\za{3}^3+\frac{1541}{50400}\za{5}+\frac{413}{90}\za{1}\za{5}-\frac{539}{450}\za{3}\za{5}\nn\\&+\frac{146}{45}\za{1}\za{3}\za{5}-\frac{211}{225}\za{3}^2\za{5}-\frac{32891}{1260}\za{5}^2\nn\\&+\frac{844}{45}\za{1}\za{5}^2+\frac{32}{9}\za{3}\za{5}^2+\frac{344}{189}\za{5}^3-\frac{137}{120}\za{7}\nn\\&-\frac{217}{15}\za{1}\za{7}+\frac{2578}{45}\za{3}\za{7}-\frac{1477}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{448}{45}\za{3}^2\za{7}+\frac{107}{15}\za{5}\za{7}-\frac{344}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{2219}{90}\za{7}^2+\frac{1204}{45}\za{1}\za{7}^2+\frac{207}{200}\za{9}\nn\\&+\frac{103}{10}\za{1}\za{9}-\frac{8017}{150}\za{3}\za{9}+\frac{448}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{344}{25}\za{3}^2\za{9}+\frac{634}{15}\za{5}\za{9}-\frac{688}{15}\za{1}\za{5}\za{9}\nn\\& =0.0007223711440396728\cdots \end{align} \begin{align} \zcors{x}{x}{y}{z}{y}{z}&= \frac{1}{6720}+\frac{1}{80}\za{1}-\frac{353}{720}\za{3}+\frac{569}{180}\za{1}\za{3}+\frac{327}{20}\za{3}^2\nn\\&+\frac{163}{27}\za{3}^3+\frac{26927}{5040}\za{5}-\frac{655}{9}\za{1}\za{5}-\frac{109}{30}\za{3}\za{5}\nn\\&-\frac{326}{9}\za{1}\za{3}\za{5}+\frac{488}{45}\za{3}^2\za{5}+\frac{63055}{252}\za{5}^2\nn\\&-\frac{1952}{9}\za{1}\za{5}^2-\frac{370}{9}\za{3}\za{5}^2-\frac{3940}{189}\za{5}^3-\frac{209}{12}\za{7}\nn\\&+\frac{4291}{15}\za{1}\za{7}-\frac{22497}{40}\za{3}\za{7}+\frac{3416}{9}\za{1}\za{3}\za{7}\nn\\&+\frac{1036}{9}\za{3}^2\za{7}-\frac{5869}{72}\za{5}\za{7}+\frac{788}{9}\za{3}\za{5}\za{7}\nn\\&+\frac{7609}{36}\za{7}^2-\frac{2758}{9}\za{1}\za{7}^2+\frac{251}{20}\za{9}\nn\\&-\frac{433}{2}\za{1}\za{9}+\frac{15961}{30}\za{3}\za{9}-\frac{1036}{3}\za{1}\za{3}\za{9}\nn\\&-\frac{788}{5}\za{3}^2\za{9}-\frac{1087}{3}\za{5}\za{9}+\frac{1576}{3}\za{1}\za{5}\za{9}\nn\\& =0.001419281470187951\cdots \end{align} \begin{align} \zcors{x}{x}{y}{z}{z}{y}&= \frac{1}{6720}+\frac{1}{120}\za{1}-\frac{11}{24}\za{3}+\frac{571}{180}\za{1}\za{3}+\frac{733}{45}\za{3}^2\nn\\&+\frac{163}{27}\za{3}^3+\frac{26003}{5040}\za{5}-\frac{145}{2}\za{1}\za{5}-\frac{73}{20}\za{3}\za{5}\nn\\&-\frac{326}{9}\za{1}\za{3}\za{5}+\frac{488}{45}\za{3}^2\za{5}+\frac{63041}{252}\za{5}^2\nn\\&-\frac{1952}{9}\za{1}\za{5}^2-\frac{370}{9}\za{3}\za{5}^2-\frac{3940}{189}\za{5}^3-\frac{3107}{180}\za{7}\nn\\&+\frac{51443}{180}\za{1}\za{7}-\frac{67477}{120}\za{3}\za{7}+\frac{3416}{9}\za{1}\za{3}\za{7}\nn\\&+\frac{1036}{9}\za{3}^2\za{7}-\frac{5869}{72}\za{5}\za{7}+\frac{788}{9}\za{3}\za{5}\za{7}\nn\\&+\frac{7609}{36}\za{7}^2-\frac{2758}{9}\za{1}\za{7}^2+\frac{251}{20}\za{9}\nn\\&-\frac{433}{2}\za{1}\za{9}+\frac{15961}{30}\za{3}\za{9}-\frac{1036}{3}\za{1}\za{3}\za{9}\nn\\&-\frac{788}{5}\za{3}^2\za{9}-\frac{1087}{3}\za{5}\za{9}+\frac{1576}{3}\za{1}\za{5}\za{9}\nn\\& =-0.002041037236271903\cdots \end{align} \begin{align} \zcors{x}{y}{z}{x}{z}{y}&= \frac{1}{6720}-\frac{7}{240}\za{1}+\frac{1169}{1440}\za{3}-\frac{941}{180}\za{1}\za{3}-\frac{3391}{120}\za{3}^2\nn\\&-\frac{1474}{135}\za{3}^3-\frac{150971}{16800}\za{5}+\frac{5662}{45}\za{1}\za{5}+\frac{1237}{200}\za{3}\za{5}\nn\\&+\frac{2948}{45}\za{1}\za{3}\za{5}-\frac{1474}{75}\za{3}^2\za{5}-\frac{281161}{630}\za{5}^2\nn\\&+\frac{5896}{15}\za{1}\za{5}^2+\frac{671}{9}\za{3}\za{5}^2+\frac{7148}{189}\za{5}^3+\frac{21893}{720}\za{7}\nn\\&-\frac{22673}{45}\za{1}\za{7}+\frac{5013}{5}\za{3}\za{7}-\frac{10318}{15}\za{1}\za{3}\za{7}\nn\\&-\frac{9394}{45}\za{3}^2\za{7}+\frac{2179}{15}\za{5}\za{7}-\frac{7148}{45}\za{3}\za{5}\za{7}\nn\\&-\frac{17059}{45}\za{7}^2+\frac{25018}{45}\za{1}\za{7}^2-\frac{4441}{200}\za{9}\nn\\&+\frac{1917}{5}\za{1}\za{9} -\frac{71282}{75}\za{3}\za{9}+\frac{9394}{15}\za{1}\za{3}\za{9}\nn\\&+\frac{7148}{25}\za{3}^2\za{9}+\frac{9748}{15}\za{5}\za{9}-\frac{14296}{15}\za{1}\za{5}\za{9}\nn\\& =-0.0001528594146694370\cdots \end{align} \begin{align} \zcors{x}{y}{z}{z}{y}{x}&= \frac{1}{6720}-\frac{1}{24}\za{1}+\frac{41}{36}\za{3}-\frac{109}{18}\za{1}\za{3}-\frac{571}{18}\za{3}^2\nn\\&-\frac{529}{45}\za{3}^3-\frac{134587}{12600}\za{5}+\frac{6328}{45}\za{1}\za{5}+\frac{2737}{450}\za{3}\za{5}\nn\\&+\frac{1058}{15}\za{1}\za{3}\za{5}-\frac{4852}{225}\za{3}^2\za{5}-\frac{617377}{1260}\za{5}^2\nn\\&+\frac{19408}{45}\za{1}\za{5}^2+82\za{3}\za{5}^2+\frac{872}{21}\za{5}^3+\frac{2599}{80}\za{7}\nn\\&-\frac{8197}{15}\za{1}\za{7}+\frac{10999}{10}\za{3}\za{7}-\frac{33964}{45}\za{1}\za{3}\za{7}\nn\\&-\frac{1148}{5}\za{3}^2\za{7}+\frac{7132}{45}\za{5}\za{7}-\frac{872}{5}\za{3}\za{5}\za{7}\nn\\&-\frac{2086}{5}\za{7}^2+\frac{3052}{5}\za{1}\za{7}^2-\frac{4581}{200}\za{9}\nn\\&+\frac{4121}{10}\za{1}\za{9}-\frac{26024}{25}\za{3}\za{9}+\frac{3444}{5}\za{1}\za{3}\za{9}\nn\\&+\frac{7848}{25}\za{3}^2\za{9}+\frac{3576}{5}\za{5}\za{9}-\frac{5232}{5}\za{1}\za{5}\za{9}\nn\\& =-0.0003767104281403536\cdots \end{align} \end{appendix}
1,116,691,501,348
arxiv
\section*{Abstract} \textit{The evidence is growing that machine and deep learning methods can learn the subtle differences between the language produced by people with various forms of cognitive impairment such as dementia and cognitively healthy individuals. Valuable public data repositories such as TalkBank have made it possible for researchers in the computational community to join forces and learn from each other to make significant advances in this area. However, due to variability in approaches and data selection strategies used by various researchers, results obtained by different groups have been difficult to compare directly. In this paper, we present TRESTLE (\textbf{T}oolkit for \textbf{R}eproducible \textbf{E}xecution of \textbf{S}peech \textbf{T}ext and \textbf{L}anguage \textbf{E}xperiments), an open source platform that focuses on two datasets from the TalkBank repository with dementia detection as an illustrative domain. Successfully deployed in the hackallenge (Hackathon/Challenge) of the International Workshop on Health Intelligence at AAAI 2022, TRESTLE provides a precise digital blueprint of the data pre-processing and selection strategies that can be reused via TRESTLE by other researchers seeking comparable results with their peers and current state-of-the-art (SOTA) approaches. } \section*{Introduction} In the ``Last Words'' letter to ``Computational Linguistics'' in 2008, Pedersen pointed out that the computational linguistics community was experiencing a reproducibility crisis \cite{pedersen-2008-last}. In that letter, Pedersen provided strong arguments that all published computational linguistic research needs to be accompanied by working software to enable its replication in order to be credible, and that it was ``unreasonable to expect that reproducibility be possible based on the description provided in a publication.'' Ten years later, in 2018, another group of researchers decided to follow up on Pedersen's ``last words'' to investigate the extent to which workers in computational linguistics were willing and able to share their code for the sake of reproducibility. Wieling et al.~\cite{wieling-etal-2018-squib} surveyed 395 publications and found that the code was available either immediately or upon request for only one third of these papers. Furthermore, when they tried to replicate the results for a selection of 10 papers, they were only able to do so for six papers and obtained the exact same results as had been published for only one. These results highlight the magnitude of this persistent problem that is not unique to the computational linguistics community and has been noted in the machine learning (ML) \cite{Kapoor2022}, psychology \cite{Yong2013}, and biomedical natural language processing (NLP) \cite{cohen-etal-2018-three, Digan2020, mieskes:hal-02282794} research fields as well. The work presented in this paper addresses the broader problem of reproducibility by focusing on a specific subproblem of replicability as set forth by Cohen et al. \cite{cohen-etal-2018-three} in at least one narrowly defined interdisciplinary area of research - computational approaches to characterizing changes in speech and language characteristics caused by cognitive impairment resulting from neurodegenerative conditions such as the Alzheimer's disease (AD). This is an important area to address because AD is a debilitating condition with no known cure that affects every aspect of cognition, including language use. Over 50 million people have been diagnosed with AD dementia, and this number is anticipated to triple by 2050 \cite{patterson2018state, prince2016world, world2017global}. Previous studies \cite{lyu2018review, petti2020systematic} have demonstrated that machine learning methods can learn to distinguish between language from healthy controls and dementia patients, automatic analysis of spoken language can potentially provide accurate, easy-to-use, safe, and cost-effective tools for monitoring AD-related cognitive markers. However, a persistent challenge in this work has been the difficulty involved in reproducing prior work and comparing results across studies on account of the use of different diagnosis-related subsets (i.e., probable vs. possible dementia), aggregation strategies (i.e., one vs. multiple transcripts per participant), performance metrics and cross-validation protocols. This challenging issue is exacerbated by the fact that space available for publication of results is typically highly limited and even when a publication venue allows appendices, the description of the methods provided by authors can be highly variable and subject to misinterpretation and uncertainty when trying to reproduce the methods. Consistent with previous finding \cite{wieling-etal-2018-squib}, some researchers provide code while others do not, and the code that is provided typically includes only the implementation of core machine learning methods and does not include scripts needed for data selection and exact execution of validation strategies. To address this challenge, we developed TRESTLE (\textbf{T}oolkit for \textbf{R}eproducible \textbf{E}xecution of \textbf{S}peech \textbf{T}ext and \textbf{L}anguage \textbf{E}xperiments) for DementiaBank (DB), one of the most popular repositories to host data for the computational linguistics and machine learning communities to build state-of-the-art (SOTA) models on identifying subtle language differences used by dementia patients and healthy controls\footnote{To see a full list of publications that uses data from DementiaBank, see \url{https://dementia.talkbank.org/publications/bib.pdf}}. Particularly, TRESTLE supports data pre-processing for the Pitt corpus \cite{becker1994natural} and other corpora such as transcripts from the Wisconsin Longitudinal Study (WLS) \cite{herd2014cohort} - both are formatted using the CHAT \cite{10.1162/coli.2000.26.4.657} protocol.\footnote{For more details about CHAT protocol, please check the manual here:\url{https://talkbank.org/manuals/CHAT.pdf}} TRESTLE provides an opportunity for researchers to submit a manifest that includes the precise pre-processing parameters, data selection, and user-defined criteria for ``dementia" and ``control". Therefore, individuals can freely design their own pre-processing parameters and use \textit{exactly the same} data that their peers have provided, allowing for comparable and reproducible evaluation outcomes for their analytical models. To the best of our knowledge, this is the first toolkit that provides the infrastructure to enable direct comparisons between experiments conducted on DementiaBank datasets While it is currently designed and tested with data contained in the DementiaBank portion of the TalkBank\cite{macwhinney2007talkbank} repository\footnote{\url{https://www.talkbank.org/}}, it can be easily extended to other public datasets following the CHAT protocol to facilitate reproducibility and comparability of experimental results and the ease of establishing and improving the SOTA in the ML research community. The code for the toolkit is publicly available on GitHub\footnote{\url{https://github.com/LinguisticAnomalies/harmonized-toolkit}}. \section*{TRESTLE Design Overview} In theory, if researcher B intends to reproduce the results of methods developed by researcher A, all that researcher B would need to do is ask researcher A for a copy of the data used to obtain the results. In practice, there are many barriers to executing this scenario including the fact that the owners of even publicly available datasets typically do not allow individual researchers to redistribute their data. Therefore, if researcher A makes any modifications to the original data for the purposes of experimentation, these modifications remain with researcher A, as they are not typically propagated back to the original dataset. Researcher B wishing to replicate and improve upon A's results has to obtain the original data from the owner of the data and figure out how to make the same modifications to the original data as were made by researcher A. While researcher A typically does provide in a publication the information describing the data selection and modification decisions, researcher B still has to essentially reconstruct these modifications. Clearly, this situation is error-prone and not conducive to making rapid scientific progress. The main motivation for creating TRESTLE stems from the need for a convenient and error-resistant way of communicating the details of a researcher's experimental design to other researchers so they can replicate the experimental conditions in order to test their own methods and compare results to those obtained by previous researchers. Motivated by this need for replicability, the key design feature of TRESTLE is the generation of a machine-readable manifest that captures all of the data selection and pre-processing decisions that were made while running an experiment on the supported datasets. The manifest is intended to be disseminated along with publishing the results of experiments and used as a blueprint to replicate the exact same experimental set up and conditions by others. The objective is to avoid the situation in which a group of researchers develops a new machine learning algorithm for discrimination between dementia cases and controls based on speech and language features, experimentally validates the algorithm on a dataset and publishes the results but another group is not able to reproduce their results because of either insufficient information provided in the publication or misinterpretation of the information or both. An even worse situation may arise where the results are replicated (e.g. same or similar metrics are obtained) but the experimental conditions differ in some subtle ways. Both of these scenarios may lead to meaningless comparisons or significant difficulty in conducting meaningful comparisons and thereby hindering the research community's ability to build on each other's work. TRESTLE is also designed to make pre-processing decisions as explicit as possible while providing the flexibility for researchers to add their own pre-processing scripts needed to replicate their results. The motivation for providing this functionality for TRESTLE is secondary to the main motivation for replicating the experimental conditions because pre-processing could be considered a part of one's methodology. For example, including pause fillers (um's and ah's) in training a neural model of speech affected by dementia may be viewed as a novel methodological decision that would contribute to better classification accuracy. As such, pre-processing in general may not lend itself well to standardization. However, in more complex scenarios in which pre-processing itself involves using statistical models or other tools with multiple user-controlled parameters (e.g., target audio sampling rate, noise reduction techniques, etc.) it is also important to capture these parameters precisely and explicitly and provide them together with any software code to subsequent researchers so as to enable them to reproduce these methods. The parameters used during sound and text pre-processing/conversion are also stored in the manifest file. In addition to the generation of the manifest, TRESTLE comes with a set of standard pre-processing utilities for text and audio, as demonstrated in Figure~\ref{fig:trestle_overview}. TRESTLE is divided into two sub-modules, a) pre-processing text data (Figure~\ref{fig:text}), and b) pre-processing audio data (Figure~\ref{fig:audio}) that is fully aligned with the corresponding text transcript. Each sub-module contains a number of parameters that users can define in their own pre-processing manifest. Block~\ref{alg:text_module} and Block~\ref{alg:audio_module} show the general flow of using TRESTLE for pre-processing text and audio samples, respectively. \begin{figure}[htbp] \begin{subfigure}{0.5\textwidth} \centering \includegraphics[width=\linewidth]{TRESTLE-text.drawio.png} \caption{The overview of the text pre-processing submodule} \label{fig:text} \end{subfigure} \begin{subfigure}{0.5\textwidth} \centering \includegraphics[width=\linewidth]{TRESTLE-audio.drawio.png} \caption{The overview of the audio pre-processing submodule} \label{fig:audio} \end{subfigure} \caption{TRESTLE design overview} \label{fig:trestle_overview} \end{figure} \begin{algorithm} \caption{General flow of TRESTLE \textbf{text }pre-processing sub-module. \textit{Italic} indicates inputs from users} \label{alg:text_module} Which dataset you are pre-processing? wls or db?: \textit{db} Where are the .cha files located?: \textit{file-locations} Remove 'clear throat'? (Y/N): \textit{y} Remove open parentheses e.g, (be)coming? (Y/N): \textit{y} Remove open square brackets eg. [: overflowing]? (Y/N): \textit{y} Remove disfluencies prefixed with '\&'? (Y/N): \textit{y} Remove unintelligible words? (Y/N): \textit{y} Remove pauses eg. (.) or (..)? (Y/N): \textit{y} Remove forward slashes in square brackets? (Y/N): \textit{y} Remove noise indicators e.g. \&=breath? (Y/N): :\textit{y} Remove square brackets indicating an error code? (Y/N): \textit{y} Remove all non-alphanumeric characters? (Y/N): \textit{y} Replace multiple spaces with a single space? (Y/N): \textit{y} Capitalize the first character? (Y/N): \textit{y} Add period at the end of every sentence? (Y/N): \textit{y} Add newline at the end of every sentence? (Y/N): \textit{n} You data will be stored as .tsv file. Please enter the output path and file name for your pre-processed transcripts: \textit{output.tsv} Please stand by, your pre-processing script will be generated shortly... Your text pre-processing json file has been generated! Running text pre-processing script now... Your dataset is now pre-processed! \end{algorithm} Block~\ref{alg:text_module} demonstrates the pre-processing features currently supported by TRESTLE. As illustrated in Block~\ref{alg:trans}, the raw input CHAT (\texttt{.cha}) file contains several tags indicating participants' behavior during the interview. TRESTLE allows users to remove tags/indicators such as clear throat indicator, open parentheses or brackets, noise, disfluencies, non-words or pauses from the verbatim transcript, if desired. Furthermore, users can choose whether or not to capitalize the first character of each sentence, or add newline at the end of the sentence. Depending on the type of analysis the user intends to do, some or all of these extra-linguistic or para-linguistic elements may need to be either removed or used in the analysis, as demonstrated in several previous studies \cite{orimaye2017predicting, cohen-pakhomov-2020-tale, li-etal-2022-gpt}. These binary user-controlled parameters are stored in the manifest file in JSON format. Other TRESTLE users can apply the same pre-processing parameters to raw transcripts by using this manifest file, or modify the manifest if comparability to previous work is not desired, giving the authors the option to choose their own criteria but ensure that the criteria are explicit and can be subsequently precisely replicated by others. \begin{algorithm} \caption{General flow of TRESTLE \textbf{audio} pre-processing sub-module. \textit{Italic} indicates inputs from users} \label{alg:audio_module} Which dataset you are pre-processing? wls or db?: \textit{db} Where are the .mp3 files located?: \textit{file-locations} Where do you want to store the trimmed audio segments? \textit{audio-segments-locations} Enter sample rate: \textit{16000} Feature extraction methods, selecting from FTT or MFCC or NONE: \textit{ftt} Enter number of FTT windows size or MFCC, 0 for NONE: \textit{2} Scaling MFCC? y/n: \textit{n} Your audio pre-processing json file has been generated! Running audio pre-processing script now... Starting to convert .mp3 to .wav Finished! Starting to resample audio to target sample rate... Finished! Your dataset is now pre-processed! \end{algorithm} Some other barriers to replicability stem from the variability in how raw audio data is pre-processed and prepared for ML. For example, the Pitt corpus audio data is in 16 bit, 16 kHz sampling rate (i.e., 256 kilobits/second bit rate) uncompressed WAVE format whereas the WLS data is in compressed MP3 format encoded at 44.1 kHz sampling rate but 124 kilobits/second bit rate - about half the bit rate of the Pitt corpus. It may be important for studies that use the audio from these datasets to convert the audio to a single specific format needed for analysis and with the understanding of the implications of any such conversion for resulting audio quality. In order to enable these conversions in TRESTLE, we included the Sound eXchange\footnote{\url{http://sox.sourceforge.net/}} library for resampling audio samples. TRESTLE additionally supports feature extraction algorithms such as the Fourier transform (FT) and Mel-frequency cepstral coefficients (MFCC). These user-controlled parameters are then applied to each text or audio file from the Pitt or WLS datasets. The text sub-module mere all pre-processed utterance-level\texttt{.cha} transcripts to a \texttt{.tsv} file and saves it to the user-specified destination. Furthermore, when pre-processing a dataset in which the text and audio are fully aligned with each other (i.e., Pitt corpus, partial WLS dataset), the text sub-module maintains a list of timestamps indicating the intervals of test administrator speech to the corresponding \texttt{.json} file for further pre-processing in the audio sub-module. The audio sub-module converts audio files to a user-defined format (e.g. single-channel PCM waveform, sampled at 16 kHz). The audio sub-module also generates utterance-level audio segments. When working with the text sub-module together with the corresponding utterance-level transcripts, TRESTLE enables the followup application for automatic speech recognition (ASR) models. \section*{Datasets} \begin{figure}[htbp] \centering \small \includegraphics[scale=0.25]{cookie_theft.jpg} \caption{Cookie Theft stimulus} \label{fig:cookie_theft} \end{figure} TRESTLE currently directly supports data pre-processing for two publicly available and datasets from TalkBank: a) the Pitt corpus\footnote{\url{https://dementia.talkbank.org/access/English/Pitt.html}}, b) and the WLS corpus\footnote{\url{https://dementia.talkbank.org/access/English/WLS.html}}. The details of the two datasets are included in Table~\ref{tab:data}, and a sample transcript formatted in the CHAT protocol from Pitt corpus is shown in Block~\ref{alg:trans}. These datasets, including raw audio, manual transcripts, linguistic annotations, and metadata with demographic, clinical and neuropsychological test characteristis are publicly available from TalkBank. The Pitt corpus contains audio recordings and manually transcribed transcripts of neuropsychological tests including the ``Cookie Theft'' picture description task from the Boston Diagnostic Aphasia Examination \cite{goodglass1983boston}. In this task, participants were asked to describe everything they see occurring in Figure~\ref{fig:cookie_theft}. Participant responses were audio recorded and subsequently transcribed verbatim. Participants were tested multiple times resulting in multiple transcripts per participant. In total, there are 242 recordings/transcripts from the 99 healthy controls and 257 recordings/transcripts from the 169 participants with AD-related diagnoses. Neurological examination results, including results of the Mini-Mental State Exam (MMSE) and Clinical Dementia Rating (CDR) are also included in the Pitt corpus. \begin{table}[htbp] \centering \small \caption{Dataset description.} \begin{tabular}{|ll|l|l|} \hline \multicolumn{2}{|l|}{\textbf{Characteristics}} & \textbf{Pitt} & \textbf{WLS} \\ \hline \multicolumn{2}{|l|}{Age, mean (SD)} & 69.2 (8.9) & 70.4 (4.4) \\ \hline \multicolumn{1}{|l|}{\multirow{2}{*}{Gender, \textit{n} (\%)}} & Male & 200 (39.2) & 694 (50.7)\\ \cline{2-4} \multicolumn{1}{|l|}{} & Female & 310 (60.8) & 675 (49.3) \\ \hline \multicolumn{2}{|l|}{Education, mean (SD)} & 12.5 (3.1)& 13.5 (3.1)\\ \hline \multicolumn{2}{|l|}{MMSE, mean (SD)} & 20.7 (7.4) & NA (NA) \\ \hline \end{tabular} \label{tab:data} \end{table} \begin{algorithm} \caption{The first few lines of a sample transcript from the Pitt corpus in the CHAT protocol. The morphology and grammar tiers have been omitted for readability. The integers at the end of each line represent start and end times of the utterance in that line. Please check CHAT protocol for more details about tagging in the transcript.} \label{alg:trans} @PID: 11312/t-00002420-1\\ @Begin\\ @Languages: eng\\ @Participants: PAR Participant, INV Investigator\\ @ID: eng$\vert$Pitt$\vert$PAR$\vert$57;$\vert$male$\vert$ProbableAD$\vert\vert$Participant$\vert$18$\vert\vert$\\ @ID: eng$\vert$Pitt$\vert$INV$\vert\vert\vert\vert\vert$Investigator$\vert\vert\vert$\\ @Media: 001-0, audio\\ @Comment: another audio testing file overlaps in background\\ *INV: this is the picture . 0\_2581 \newline *PAR: mhm . {[+ exc]} 2581\_3426\newline *INV: just tell me everything that you see happening in that picture . 3426\_6661\newline *PAR: +$<$ alright . {[+ exc]} 6000\_6897\newline *PAR: there's \&um a young boy that's getting a cookie jar . 6897\_12218\newline *PAR: and it [//] he's \&uh in bad shape because \&uh the thing is fallin(g) over . 12218\_18718\newline *PAR: and in the picture the mother is washin(g) dishes and doesn't see it . 18718\_24822\newline ...\newline @End \end{algorithm} The WLS is a longitudinal study of 694 men and 675 women who graduated from Wisconsin high schools in 1957, where the participants were interviewed up to six times between 1957 and 2011. Cognitive evaluations and ``Cookie Theft'' picture description task were introduced to the later rounds of interview on WLS, which are presented in the CHAT-formatted (\texttt{.cha}) files. All of the participants in the WLS were considered to be cognitively healthy upon entry into the study. Some may developed dementia in later years; however, the neurological diagnostic information is not currently publicly available. Defining the ``dementia'' and ``control'' categories is not entirely straightforward and creates a barrier to reproducibility even if the criteria are described. For example, typical studies involved with the Pitt corpus focus on 169 participants classified as having \textit{possible} or \textit{probable} AD based on clinical or pathological examination, as well as 99 healthy controls. However, 10 of 99 healthy controls later acquired a dementia-related diagnosis - 7 of 10 being diagnosed with probable AD and the remaining 3 having an indeterminate diagnostic status at baseline. This complicates data analysis since individuals' diagnostic statuses may change over time and how this change is treated in a given study may significantly affect the results. The paucity of neurological diagnoses in the WLS also complicates further data analysis. One way to categorize WLS participants into those with potential cognitive impairment and those without is to use the available verbal fluency neuropsychological test scores \cite{10.3389/fcomp.2021.642517}, as verbal fluency (ability to name words belonging to a semantic category) is significantly impaired in dementia and has been recommended for clinical use as a screening instrument for dementia \cite{Canning556}. However, various verbal fluency score cutoffs for dementia have been proposed in the literature and different authors may follow the literature that they trust in selecting the cutoffs. It would not be reasonable to try to impose a single specific cutoff on all studies using the WLS data. \section*{Results} We deployed TRESTLE at the Data Hackallenge\footnote{\url{https://w3phiai2022.w3phi.com/hackathon.html}} of the International Workshop on Health Intelligence\footnote{\url{https://w3phiai2022.w3phi.com/index.html}}, which was co-hosted at AAAI 2022. During the hackallenge, each group of participants used TRESTLE to generate specific subsets of the data along with the pre-processing TRESTLE manifest. Each group was instructed to select data samples from the Pitt or WLS set (or both) using the criteria provided in the corresponding metadata. Each group was also asked to label each selected data sample as ``positive'' (``dementia'') or ``negative'' (``controls'') based on their preferred criteria. Each group was also asked to develop an analytical method (pipeline) of their choosing for discriminating between those categories. Finally, each group was asked to to select an evaluation strategy of their choosing for their analytical method. In the second phase of the hackallenge, each team was asked to evaluate the other group's pre-processing manifests and run their analysis pipeline using the data selection, category definition, and evaluation strategy information provided in the other group's manifest to replicate the other group's experimental design so that the results could be directly compared. We provided a baseline manifest, following the current SOTA on such tasks with text transcripts \cite{Balagopalan2020ToBO}. For the baseline system, we fine-tuned BERT \cite{Devlin2019BERTPO} on the Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSS) \cite{Luz2020AlzheimersDR} dataset, which is a subset of the Pitt corpus that is matched for age and gender. The baseline manifest with evaluation metrics is shown in Block~\ref{alg:baseline}. Our baseline accuracy and AUC were both 0.77. \begin{algorithm*}[htbp] \caption{Sample TRESTLE's baseline manifest in json. Full baseline manifest is available on TRESTLE's GitHub repository} \label{alg:baseline} \{ ``pre\_process'': ``scripts/text\_process.json'', \hfill \textit{\# pointing to the user-defined text pre-processing parameters} ``data\_uids'':[``001-2'', ``005-2'', ``006-4'', ...], \hfill \textit{\# the sample list of ADReSS dataset, where $-n$ represents the $n$-th visit} ``positive\_uids'': [``001-2", ``005-2", ``010-3", ``018-0", ...], \hfill \textit{\# the sample list of ``dementia'' cases from ADReSS training and test set, where $-n$ represents the $n$-th visit} ``training\_uids": [``001-2", ``005-2", ``006-4", ...], \hfill \textit{\# the sample list of ADReSS training set, where $-n$ represents the $n$-th visit} ``test\_uids": [``035-1", ``045-0", ``049-1", ...], \hfill \textit{\# the sample list of ADReSS test set, where $-n$ represents the $n$-th visit} ``method": ``fine-tune BERT", \hfill \textit{\# very short description of method} ``evaluation": \{``ACC": 0.77, ``AUC": 0.77\}\hfill \textit{\# evaluation metrics used for the reported method} \} \end{algorithm*} In addition to our group, two other groups participated in the hackallenge. Both of these teams decided to use both text transcripts and audio recordings from the Pitt corpus and WLS; however, as we anticipated, the two groups had significantly different approaches to selecting data subsets, criteria for classification, and evaluation metrics and strategies. Table~\ref{tab:data-selection} demonstrates the differences between the data selection strategies between two teams who made the final submission. Both team successfully evaluated their methods on manifests of our and the other team and outperformed our baseline model performance with their own models, as seen in Table~\ref{tab:performance}. \begin{table}[htbp] \centering \small \caption{Criteria defined by the data hackallenge participants for the data selection.} \begin{tabular}{|l|l|p{11cm}|} \hline \textbf{Team} & \textbf{Dataset} & \textbf{Cutoff} \\ \hline Baseline & Pitt & ADReSS subset of Pitt corpus \\ \hline \multirow{2}{*}{Team 1} & Pitt & MMSE $\le$ 24 as dementia, otherwise healthy controls \\ \cline{2-3} & WLS & Verbal fluency score 16 for individuals aged $<$ 60, 14 for age between 60 and 79, 12 for age $\ge$ 79 as dementia group, otherwise healthy controls \\ \hline \multirow{2}{*}{Team 2} & Pitt & Diagnosis code 100 as dementia group, diagnosis code 800 as healthy controls \\ \cline{2-3} & WLS & Category fluency test score of 21 as cutoff \\ \hline \end{tabular} \label{tab:data-selection} \end{table} \begin{table}[htbp] \centering \small \caption{Best model performances from participants of data hackallenge. Note that these results should be compared in light of the differences in the cutoffs to define categories, as shown in Table~\ref{tab:data-selection}, and differences in the analytical model design. Please refer to the workshop proceeding \cite{workshop} for more details.} \begin{tabular}{|l|lll|} \hline \multirow{2}{*}{\textbf{Team}} & \multicolumn{3}{l|}{\textbf{Performance}} \\ \cline{2-4} & \multicolumn{1}{l|}{Accuracy} & \multicolumn{1}{l|}{AUC} & F1\\ \hline Baseline & \multicolumn{1}{l|}{0.77} & \multicolumn{1}{l|}{0.77} & NA \\ \hline Team 1 & \multicolumn{1}{l|}{0.94} & \multicolumn{1}{l|}{0.92} & 0.84 \\ \hline Team 2 & \multicolumn{1}{l|}{0.84} & \multicolumn{1}{l|}{0.92} & 0.77 \\ \hline \end{tabular} \label{tab:performance} \end{table} \section*{Discussion} The results of the hackallenge were encouraging as the teams were able to use TRESTLE to achieve directly comparable results to those of the other teams without having to request any additional information or code. One of the key advantages that this hackallenge experiment has demonstrated is the elimination of any uncertainty in comparing results. TRESTLE facilitates the ability to compare results across multiple studies by providing all the necessary context for doing so - including the cutoffs used to define diagnostic categories. If the categories are not defined the same way in two studies, then by definition, the results of these studies cannot be directly compared. TRESTLE provides the information necessary to make this determination unambiguously. The main purpose of the toolkit, however, is to enable researchers to replicate the experimental setup, especially the data selection, exactly as performed by another team so that the only difference between the studies is the classification algorithm. TRESTLE presented here has several limitations. First, it only supports data pre-processing for the Pitt corpus and WLS set and does not support pre-processing of the remaining data in DementiaBank. Secondly, the Pitt corpus and the WLS data are in American English, and many participants of these two studies are representative of White, non-Hispanic American men and women with an average of 12 years education. As result, TRESTLE currently has limited applicability to other ethnic groups and languages, though this may change as data from more diverse samples become available. Thirdly, TRESTLE runs two sub-modules using bash scripts; it may make TRESTLE more difficult to use for researchers who have less experience with programming. Finally, while TRESTLE only supports text or audio sample pre-processing specified on the ``Cookie Theft'' picture description task of the Pitt and WLS dataset in the current iteration, the design of TRESTLE offers the flexibility to generalize to any CHAT-formatted corpus. We believe it can be further iterated and improved for broader data pre-processing of corpora that are hosted on TalkBank for various downstream linguistic or Natural Language Processing (NLP) tasks, including those involving conversational, childhood language, multi-language and clinical datasets. With our access to Dementia Bank, we choose to focus our initial implementation of TRESTLE on the dementia-related corpus. Showing the feasibility of our approach with these data, we plan to further develop TRESTLE to support more datasets and data formats. \section*{Conclusion} To address, at least in part, the pervasive challenge of reproducibility, we created TRESTLE, an application that provides researchers working on detection of speech and language characteristics of dementia with a way to replicate each other's experimental setup and explicitly communicate in a machine-readable fashion the parameters used in data pre-processing. TRESTLE was successfully used for the intended purpose in a hackallenge but clearly needs further development to enable wider adoption by the biomedical NLP and computational linguistic communities. Despite the limitations, TRESTLE also has a number of strengths. It provides the researcher with the ability to convey the details of their experiments (e.g. sample selection, category definition, exceptions) in a very transparent and reproducible fashion. This part of TRESTLE is not limited to the Pitt and WLS datasets and can be easily extended to any text and/or audio collection of data. While using the pre-processing modules requires some programming experience and these modules currently support only the Pitt and WLS datasets, they do encapsulate some of the standard practices, tools and methods for text and audio pre-processing. Last but not least, TRESTLE is an open-source package freely available on GitHub to the machine learning and all other communities to use and contribute to. \section*{Acknowledgement} This research was supported by grants from the National Institute on Aging (AG069792). \bibliographystyle{vancouver}
1,116,691,501,349
arxiv
\section{Introduction} Salient object detection (SOD) task aims to locate the most attractive and interesting objects or regions from an image, which is consistent with the human visual attention mechanism, and has been applied to image segmentation \cite{sun2019saliency, DBLP:journals/tcsv/ShiXZZWLZ22, DBLP:journals/tcsv/JiSLCM21,crmACMMM20-1,BCNet,crmcovid1,crmcovid2}, object tracking \cite{li2019siamrpn++, DBLP:journals/tcsv/LinFHGT21}, image enhancement \cite{crmbrain,crmJEI,crmCVPR21,crmSPIC,crmsrijcai,crmSRInpaintor,crmdsr2019tip,crmblindSR22}, and other vision tasks. In recent years, fully-supervised SOD models based on deep learning have made great breakthroughs in performance \cite{chen2020global,crmCoADNet,crmDPANet,crmDBLP:journals/tip/WenYZCSZZBD21,crm2020tc,crmDBLP:journals/tmm/MaoJCGSK22,crmglnet,crmMM21}, but these models usually require a large number of pixel-level labels for training, while such labeling costs are obviously very expensive. Therefore, weakly-supervised or unsupervised SOD methods have received increasing attention from both academia and industry, aiming to reduce or get rid of the reliance on the elaborately labeled data. Some related areas such as weakly supervised semantic segmentation \cite{9440699,DBLP:journals/pr/ZhangXWHLZ22,DBLP:journals/corr/abs-2108-01296,DBLP:conf/aaai/ZhangXWSH20,zhangijcai2022}, light field SOD \cite{DBLP:journals/corr/abs-2204-13456,crm-acmmm}, remote sensing SOD \cite{DBLP:journals/corr/abs-2202-03501,crmRRNet,crm2019tgrs,crm-nc,dafnet,crm2022rsi}, and visual grounding \cite{9667277,DBLP:journals/pami/SunXLLG21} have also been developed. \begin{figure}[!t] \subfloat[RGB Image]{\includegraphics[width=0.3\linewidth]{Image/original_rgb.jpg}} \hfill \subfloat[Scribble Label]{\includegraphics[width=0.3\linewidth]{Image/scribble-people.jpg}} \hfill \subfloat[Point Label]{\includegraphics[width=0.3\linewidth]{Image/point.png}} \newline \subfloat[Pixel-level Label]{\includegraphics[width=0.3\linewidth]{Image/pixel-people.jpg}} \hfill \subfloat[Image-level Label]{\includegraphics[width=0.3\linewidth]{Image/image-level-people.png}} \hfill \subfloat[Coarse Label]{\includegraphics[width=0.3\linewidth]{Image/coarse-people.png}} \caption{Several types of saliency supervision. (a) Original RGB image; (b) Weakly-supervised scribble label; (c) Weakly-supervised point label; (d) Pixel-level label; (e) Weakly-supervised image-level label; (f) Unsupervised coarse label.} \label{level} \end{figure} According to the given labeled data, weakly-supervised/unsupervised SOD methods can be roughly divided into the following categories: (1) Weakly-supervised scribble label supervision as shown in Fig. \ref{level}(b), that is, the parts of the foreground and background regions of each training sample are outlined in a scribble way. \begin{figure}[!t] \centering \includegraphics[scale=0.28]{Image/General-Framework.pdf} \caption{(a) A simple solution for training the SOD model with coarse and real labels. (b) The proposed alternate learning framework for weakly-supervised SOD task under the hybrid label, consisting of a Refine Network (R-Net) and a Saliency Network (S-Net). These two networks cooperate with each other and train alternately. During training, both networks employ a group-wise incremental mechanism to address the imbalance between real-labeled data and pseudo-labeled data, and use a credibility verification mechanism to ensure that the two networks can provide credible labels. } \label{fig:general-framework} \end{figure} (2) Weakly-supervised point label supervision as shown in Fig. \ref{level}(c), that is, the foreground and background regions of each training sample are marked with only one point respectively. (3) Weakly-supervised image-level label supervision as shown in Fig. \ref{level}(e), that is, only the category of salient object is known during training. (4) Unsupervised coarse label supervision as shown in Fig. \ref{level}(f), that is, the saliency map generated by the existing unsupervised traditional methods is used as the label of the training sample. Generally speaking, the weaker the supervision information, the more limited the detection performance. Compared to image-level labels and sparse labels (\textit{i}.\textit{e}., scribble and point labels) as supervision information, the information given by coarse label is uncontrollable and may introduce some inevitable distracting noise, so it provides weaker supervision than others. Different from the above-mentioned forms of supervision, we construct a new supervision form for the first time to solve the weakly-supervised SOD task by releasing part of the real labels on the basis of unsupervised labels, called hybrid labels. The hybrid label supervision consists of two parts, that is, a small number of pixel-level real labels and a large number of coarse labels generated by the existing unsupervised SOD models (\textit{e}.\textit{g}., one tenth of the fully-supervised pixel-wise annotations). Such weakly supervision form is expected to achieve better detection performance at a smaller annotation cost. But with this kind of supervision, the weakly-supervised SOD task becomes more challenging due to the unreliability of coarse labels and the imbalance between real-labeled and coarse-labeled data. Specifically, on the one hand, the coarse labels are generated by traditional unsupervised methods and necessarily contain a lot of noise and mislabeling. If the network is trained with such labels all the time, the network will gradually become chaotic and disabled, seriously affecting the final performance. On the other hand, the proportion of real labels and coarse labels in the hybrid label setting is severely imbalanced (\textit{e}.\textit{g}., 1:9), and the network learning will collapse if the mixed training is directly performed. Therefore, in order to address these issues, we propose a new weakly-supervised SOD framework with hybrid labels from the perspective of pipeline structure and training strategy. For such a new weakly-supervised learning task set in this paper, the key problems we need to solve are also different from the existing methods, and thus our model framework and technical implementation are also different. To address the problem of unreliable coarse label and imbalanced sample size under this new hybrid supervision, different from the previous single-stage SOD framework \cite{wang2017learning,zhang2018deep,DBLP:journals/tcsv/ZhengTZML21}, we decouple the weakly-supervised SOD task into two sub-tasks of coarse label refinement and salient object detection, and construct a joint learning framework as shown in Fig. \ref{fig:general-framework}(b), consisting of a Refinement Network (R-Net) and a Saliency Network (S-Net). These two networks cooperate with each other and train alternately. To achieve the R-Net, a two-stream encoder-decoder model equipped with Blender with Guidance and Aggregation Mechanisms (BGA) is designed for coarse label refinement, including a saliency-refinement mainstream branch and an RGB-image guidance branch. On the one hand, considering the uncertainty and noise of coarse labels, a separate RGB-image guidance branch is introduced to form the two-stream structure and provide effective guidance information from the raw RGB data. On the other hand, we propose a BGA to achieve two-stage feature decoding, where the guidance stage aims to gain relatively robust baseline performance for mainstream branch by the guidance branch information, and the aggregation stage is to integrate the encoder features, previous decoder features, and global features by considering the roles of different features. The S-Net is a replaceable salient object detection network supervised by the pseudo label\footnote{For clarity, the coarse label in our paper specifically refer to labels generated by unsupervised SOD model and used as input to R-Net. The pseudo label is saliency map obtained by testing with the trained R-Net or S-Net.} generated by the current R-Net, with the original RGB image as input and output of the pseudo label for the subsequent round learning of R-Net. By using such a decoupled architecture, not only the negative impact of coarse labels on S-Net can be effectively reduced, but also the number of training samples can be expanded with the refined coarse labels generated by the trained R-Net, thereby enhancing the learning ability of the network. Besides, in the face of imbalanced training on the hybrid-labeled data, some well-designed training strategies are crucial to guarantee the effectiveness and efficiency of network training. Specifically, we design three ingenious training strategies: (1) Alternate iteration mechanism. In order to ensure that sufficient and effective samples participate in training, we alternately perform iterative training of R-Net and S-Net. The two networks provide better labels for each other. (2) Group-wise incremental mechanism. In order to avoid the imbalance of inputting a large number of pseudo-labeled samples and a small number of real-labeled samples at the same time, we group the training set and gradually increase the amount of data with pseudo labels in each training iteration, thereby stepwise learning the effective feature representations. (3) Credibility verification mechanism. In order to ensure that the two networks can provide credible labels to each other during the iteration process, starting from the second iteration, we design a validation phase on the validation set containing 100 images, and only the best model that satisfies the validation conditions can be used to generate pseudo labels for the corresponding data to participate in the next step of training. In general, the three training strategies constrain the training process from three aspects, \textit{i}.\textit{e}., quantity allocation, training method and reliability judgment, so as to achieve the effective training of the network. The main contributions of this paper mainly lie in three aspects, including task setting, technical framework and training strategy: \begin{itemize} \item For the first time, we launch a new weakly-supervised SOD task based on hybrid labels, with a large number of coarse labels and a small number of real labels as supervision. To this end, we decouple this task into two sub-tasks of coarse label refinement and salient object detection, and design the corresponding R-Net and S-Net. Moreover, our method achieves competitive performance on five widely used benchmark datasets using only one-tenth of the real labels in fully-supervised setting. \item We design a BGA in the R-Net to achieve two-stage feature decoding, where the guidance stage is used to introduce the guidance information from the RGB-image guidance branch to guarantee a relatively robust performance baseline, and the aggregation stage is to dynamically integrate different levels of features according to their modification or supplementation roles. \item In order to guarantee the effectiveness and efficiency of network training, from the perspective of quantity allocation, training method and reliability judgment, we design the alternate iteration mechanism, group-wise incremental mechanism, and credibility verification mechanism. \end{itemize} \section{RELATED WORK} \subsection{Fully supervised salient object detection} Inspired by image semantic segmentation, Zhao \textit{et al}. ~\cite{DBLP:conf/cvpr/ZhaoOLW15} proposed a fully supervised model based on CNN to integrate local and global features to predict the saliency map. Wang \textit{et al}. ~\cite{DBLP:conf/eccv/WangWLZR16} adopted a recurrent CNN to refine the predicted saliency map step by step. Most of them follow an encoder-decoder architecture similar to FCN \cite{long2015fully}, on this basis, in order to obtain more accurate and convincing detection results, the researchers carried out a series of elaborate network designs. Several recent works \cite{DBLP:conf/iccv/ZhangWLWR17,hou2017deeply,DBLP:conf/ijcai/DengHZXQHH18,DBLP:conf/aaai/HuZQFH18,DBLP:conf/cvpr/ZhangDLH018,DBLP:conf/cvpr/ZhangWQLW18,wei2020f3net,DBLP:conf/aaai/WangCZZ0G20,DBLP:conf/cvpr/PangZZL20,DBLP:conf/cvpr/WeiWWSH020,DBLP:conf/eccv/ZhaoPZLZ20} integrated features in multiple layers of CNN to exploit the context information at different semantic levels. Among them, Hou \textit{et al}. ~\cite{hou2017deeply} introduced short connection to the skip-layer structure for capturing fine details. Deng \textit{et al}. ~\cite{DBLP:conf/ijcai/DengHZXQHH18} proposed an iterative method to optimize the saliency map, leveraging features generated by deep and shallow layers. Zhang \textit{et al}. ~\cite{DBLP:conf/cvpr/ZhangWQLW18} designed an attention guided network that selectively integrates multi-level contextual information in a progressive manner. Wei \textit{et al}. ~\cite{wei2020f3net} focused on the feature fusion strategies and proposed a SOD network that equipped with cross feature module and cascaded feedback decoder trained with a new pixel position aware loss. Pang \textit{et al}. ~\cite{DBLP:conf/cvpr/PangZZL20} investigated the multi-scale issue in salient object detection and proposed an effective and efficient network with the transformation-interaction-fusion strategy. In the SOD task, for obtaining results with elaborate boundaries, edge-guided or boundary-guided methods have been proposed. Qin \textit{et al}. ~\cite{feng2019attentive} proposed a boundary-aware model to segment salient object regions and predict the boundaries simultaneously. Li \textit{et al}. ~\cite{zhao2019egnet} proposed an edge-guided SOD network to learn the complementarity between salient edge information and salient object information in a single network. Feng \textit{et al}. ~\cite{feng2019attentive} designed an attentive feedback network by integrating some feedback network modules to explore the structure of objects better and proposed a new boundary-enhanced loss for learning exquisite boundaries. Wang \textit{et al}. ~\cite{wang2019salient} introduced the salient edge detection module into an essential pyramid attention structure for salient object detection and achieved superior performance. Although superior performance has been obtained, a fatal problem still exists is that they all require a mass of pixel-level labeled training data. Accordingly, how to obtain satisfactory detection results with fewer annotations has become a topic worth exploring, which also motivates our work. \begin{figure*}[!t] \centering \includegraphics[scale=0.35]{Image/R-Net.pdf} \caption{The overall framework of the proposed Refine Network (R-Net).} \label{fig:r-net} \end{figure*} \subsection{Weakly supervised salient object detection} Unlike fully-supervised SOD needs a complete pixel-level label for each training sample, weakly-supervised SOD model may utilize simpler labels, \textit{e}.\textit{g}., scribble/point/image-level label, as supervision signals to achieve comparable performance. Due to the low cost of labels and considerable prospects, it has received more and more attention. The WSS model \cite{wang2017learning} is the first weakly-supervised SOD method by using image-level label, which employs a global smooth pooling layer and a foreground inference scheme to make the network generate good prediction results even for unseen categories. Zeng \textit{et al}. ~\cite{zeng2019multi} utilized multiple labels (\textit{i}.\textit{e}., image-level labels and captions) to train a SOD model, and then a classification network and a caption generation network were designed to predict object class and generate captions, respectively. Zhang \textit{et al}. ~\cite{zhang2020weakly} used scribble labels as the supervision to train the network, including an auxiliary edge detection task to locate object edges explicitly and a gated structure-aware loss to place constraints on the scope of structure to be recovered. Apart from that, Zhang \textit{et al}. ~\cite{zhang2018deep} first applied an unsupervised method to generate coarse labels, then obtained refined saliency maps by modelling the noise in the coarse labels. Zheng \textit{et al}. ~\cite{DBLP:journals/tcsv/ZhengTZML21} first introduced saliency subitizing as the weak supervision and proposed a SOD model with saliency subitizing module and saliency updating module. Yu \textit{et al}. \cite{DBLP:conf/aaai/YuZXL21} proposed a local coherence loss to propagate the labels to unlabeled regions based on image features and pixel distance. Piao \textit{et al}. \cite{DBLP:conf/iccv/PiaoWZL21} introduced a new multiple-pseudo-label framework to integrate more comprehensive and accurate saliency cues from multiple labels, avoiding that the generated single label is inevitably affected by adopted refinement algorithms. Gao \textit{et al}. \cite{DBLP:conf/aaai/Gao00GZHZ22} proposed a point supervised saliency detection model, where an adaptive masked flood filling algorithm is designed to generate pseudo labels, and the transformer-based point-supervised SOD model and a Non-Salient Suppression (NSS) method are used to achieve two-stage saliency map generation and optimization. Yan \textit{et al}.\cite{DBLP:conf/aaai/YanWLZLL22} made the first attempt to achieve SOD by exploiting unsupervised domain adaption from synthetic data, and constructed a synthetic SOD dataset named UDASOD. Besides, weakly supervised video object segmentation/SOD methods can also provide us with some enlightenment. Zhao \textit{et al}. \cite{DBLP:conf/cvpr/Zhao0LBLH21} proposed the first weakly supervised video salient object detection model based on "fixation guided scribble annotations". And some methods used weakly-supervised approaches to video object segmentation by fusing information between different frames \cite{DBLP:journals/tcsv/LinXLZ22,DBLP:journals/tist/WeiLLFWC22,DBLP:conf/aaai/LinX0021}. In contrast, Zhou \textit{et al}. \cite{DBLP:conf/aaai/ZhouWZYL020} relied only on the current frame image and the corresponding optical flow data to achieve the zero-shot video object segmentation. En \textit{et al}. \cite{DBLP:journals/tip/EnDZ21} performed video object segmentation with the help of saliency information. In this paper, we construct a new label form for the first time to solve the weakly-supervised SOD task, namely hybrid labels, which only contains one-tenth of the real pixel-wise labelled samples. With the help of the proposed learning framework and training strategies, our method finally achieves encouraging performance. \section{PROPOSED METHOD} \subsection{Overview} At first, the hybrid labels used in this paper can be divided into two parts, \textit{i}.\textit{e}., a small number of pixel-level real labels and a large number of coarse labels, where the coarse labels are generated by a traditional unsupervised method (\textit{e}.\textit{g}., MB \cite{zhang2015minimum}). The overall framework is shown in Fig. \ref{fig:general-framework}, consisting of a Refine Network (R-Net) and a Saliency Network (S-Net). The R-Net is designed as a two-stream encoder-decoder architecture that takes original RGB image and coarse label as inputs and outputs updated pseudo-labels (more details will be introduced in Section \ref{R-Net}). The S-Net is a replaceable SOD network that takes the original RGB image as input and the pseudo labels generated by R-Net as supervision signal. The proposed framework is trained in alternating iterations. For the training process, the first thing to mention is that we divide the training samples into ten groups (note that only the samples in GROUP 1 include real labels) and incrementally load them into the training pool. On the one hand, the alternating iteration strategy makes the quality of pseudo labels continuously be optimized through the cooperation of these two networks. On the other hand, incremental loading of training samples enhances the guiding ability of real labels. As a result, the imbalance between the real label and pseudo label can be alleviated effectively. Specifically, taking the first iteration as an example, we first use the original RGB images and coarse labels of GROUP 1 as inputs to train the R-Net, then predict the corresponding pseudo labels of GROUP 2. Next, we input the GROUP 1 and GROUP 2 into S-Net for network training, then predict the corresponding pseudo labels of GROUP 3. At this point, this round of training is over. The samples in GROUP 1 and GROUP 3 will be used for the next iteration of R-Net training. The iteration is terminated until we run out of data. Ultimately, we only use the trained S-Net for testing and no longer need coarse label input. More details will be introduced in the Training Strategy with Hybrid Labels of Section \ref{training}. \subsection{Refinement Network (R-Net)}\label{R-Net} The R-Net is designed to refine the coarse labels and produce better pseudo labels that can be used for S-Net training. Intuitively, we only need to input coarse labels and corresponding RGB images into the network to achieve the label refinement. But in our setting, the coarse labels are generated by the traditional unsupervised method, which very noisy for some complex scenes, even inferior to the results obtained by the weakly-supervised deep learning methods. In this way, if only the Saliency-Refinement Mainstream Branch (mainstream branch for short) is used to directly refine the label, the difficulty can be imagined. Considering the uncertainty and noise of coarse labels, a separate RGB-Image Guidance Branch (referred to as the Guidance Branch) is introduced into the R-Net to form a two-stream encoding structure, which is used to provide some guidance information to the mainstream branch, such as object localization and integrity, thereby guaranteeing a relatively robust performance baseline. The whole framework of the R-Net is shown in Fig. \ref{fig:r-net}. The encoders of both streams are based on the ResNet-50 \cite{he2016deep} to extract the corresponding multi-level features. Then, we propose a Blender with Guidance and Aggregation Mechanisms (BGA) to achieve two-stage feature decoding, as shown on the right side of Fig. \ref{fig:r-net}. The role of the first stage is guidance, that is, to supplement the mainstream branch with the information of the guidance branch, ensuring that the mainstream branch has a relatively robust baseline performance. The role of the second stage is aggregation, that is, to integrate the encoder features, previous decoder features, and global features by considering the roles of different features. \subsubsection{The Guidance Stage} We hope that in the first stage, the RGB branch can provide guidance information (\textit{e}.\textit{g}., object localization and integrity) for the mainstream branch, guaranteeing its effective learning and robust performance baseline. The detailed architecture is shown on the top right corner of Fig. \ref{fig:r-net}. First, in order to ensure that enough saliency information can be transferred to the mainstream branch and mitigate unreliable noise from the coarse label input, we supplement and filter the features in the channel dimension. Specifically, the encoder features of corresponding layers in the two branches are first concatenated for complementation, and then channel attention is used to highlight essential channel features for filtering. This process can be formulated as: \begin{align} F_{com}^{i}=CA([f_{srm}^{i},f_{rgb}^{i}])\circledcirc[f_{srm}^{i},f_{rgb}^{i}], \end{align} where $F_{com}^{i}$ denote the complementation features after channel attention, $CA$ is the channel attention operation \cite{ca}, $f_{srm}^{i}$ and $f_{rgb}^{i}$ denote the encoder features of the ${{i}^{th}}$ layer in the mainstream branch and guidance branch, respectively, $\left[ \cdot ,\cdot \right]$ represents the concatenation operation along the channel dimension, and $\circledcirc$ means element-wise multiplication with channel-wise broadcasting. Secondly, in addition to the direct complement of channel dimensions, the RGB branch can also provide pixel-level spatial guidance information, which can both reinforce important regions and suppress irrelevant noise interference. Specifically, we use the spatial attention \cite{sa} to generate the spatial location mask that need to be emphasized from the perspective of RGB information, and use this to update the features of the refinement branch. \begin{align} F_{En}^{i}=Con{{v}_{1\times 1}}(SA(f_{rgb}^{i})\odot F_{com}^{i} + F_{com}^{i}), \end{align} where $F_{En}^i$ are the final output encoder features of the guidance stage after the spatial attention, $SA$ is the spatial attention operation \cite{DBLP:conf/eccv/WooPLK18}, $\odot$ is the element-wise multiplication, and $Con{{v}_{1\times 1}}$ denotes the convolutional layer with the kernel size of $1\times 1$. \subsubsection{The Aggregation Stage} As mentioned earlier, the second stage is mainly used to realize the fusion of multi-level features, including the encoder features of the corresponding layer generated in the first stage, the global features from the top encoder layer, and the decoder features of the previous layer. In order to implement the aggregation more effectively, we need to analyze the roles of various features. In general, both encoder features and global features should play an auxiliary role to obtain better decoder features in the feature decoding stage. The auxiliary functions can be divided into two aspects: one is to refine the decoder features under the guidance of global information; the other is to supplement the decoder features under the guidance of encoder features. First, the high-level semantic features from the top encoder layer are crucial for distinguishing salient objects, but as the decoding process proceeds, the semantic constraint will be gradually diluted. Therefore, in order to enforce semantic information throughout the decoding process, we generate the corresponding semantic guidance mask to refine the decoder features of each level. Specifically, we firstly combine the semantic features from two branches and the encoder features generated in the first stage through an importance weighting strategy \cite{CoADNet}: \begin{align} f_{s}^i=P^i\odot f_{g}+(1-P^i)\odot f_{En}^{i}, \end{align} where $f_{g}=conv([{f_{srm}^{5}},{f_{rgb}^{5}}])$ denote the fused semantic features from two branches, $P^i$ is the learned importance weight that controls the fusion rate of the features of $f_{g}$ and $f_{En}^{i}$ (more details can be found in \cite{CoADNet}). Then, the fusion features $f_{s}^i$ containing global semantic information are activated as a semantic mask, which is used to modify the upsampled decoder features: \begin{align} f_{DeR}^{i}=Up(f_{De}^{i+1})\odot \sigma (f_{s}^{i}), \end{align} where $f_{DeR}^{i}$ are the modified decoder features of the $i^{th}$ level, $f_{De}^{i+1}$ represent the original decoder features of the $(i+1)^{th}$ level, $Up$ represents the up-sampling operation by bilinear interpolation, and $\sigma$ is the sigmoid activation function. Second, as demonstrated in \cite{hou2017deeply}, the encoder features contain many valuable information that can complement the decoder feature learning, such as the shallower features including rich spatial information to better recover details, \textit{etc}. Therefore, we further supplement the modified decoder features with the filtered features via the spatial attention mechanism \cite{sa} to obtain more comprehensive saliency-related decoder features. This process can be formulated as: \begin{align} f_{De}^{i}=Up(f_{De}^{i+1})+f_{DeR}^{i}+SA(f_{En}^{i})\odot f_{En}^{i}, \end{align} where $f_{DeR}^{i}$ are the modified decoder features of the $i^{th}$ level, and $SA$ is the spatial attention operation \cite{sa}. \subsection{Training Strategy with Hybrid Labels}\label{training} \textbf{Training settings.} The pixel-level real labels and coarse labels are given in our training set, in which the coarse labels are only used as the input to the S-Net instead of supervision. At the same time, the pseudo label will be generated as supervision information during the network training process. In the implementation, we randomly select 1,000 samples from the DUTS-TR dataset \cite{wang2017learning} as the real-labeled training subset, and use the MB method \cite{zhang2015minimum} to generate the corresponding coarse labels of all samples in the DUTS-TR dataset (including the 1,000 samples mentioned earlier). As thus, these 1,000 samples (including RGB images, coarse labels, and real labels) can support the first iteration of R-Net training. In order to guarantee the effectiveness and efficiency of network training, we propose three key training mechanisms, including alternate iteration mechanism, group-wise incremental mechanism, and credibility verification mechanism, as illustrated in Fig. \ref{fig:my_label}. \begin{figure}[!t] \centering \includegraphics[scale=0.45]{Image/train_stage.pdf} \caption{Training strategy for group update based on real labels and pseudo labels.} \label{fig:my_label} \end{figure} \textbf{Alternate iteration mechanism.} As mentioned earlier, considering that coarse labels may contain a lot of noise, directly training the network under these supervisions will inevitably lead to poor performance. Therefore, we design a R-Net for label correction and a S-Net for salient object detection. In terms of network training, we train these two networks alternately and iteratively, thereby providing better pseudo labels for each other. In detail, the S-Net of the current iteration is trained using the pseudo labels generated by the trained R-Net of the current iteration, and the pseudo labels generated by the trained S-Net are further used for the next iteration of R-Net training. The two networks are trained in an alternating manner until all training samples are traversed, which is called alternate iteration mechanism. \textbf{Group-wise incremental mechanism.} Another important problem in the weakly-supervised SOD framework with hybrid label is sample imbalance caused by the difference in the number of real-labeled samples and coarse-labeled samples. If the unbalanced training samples are directly used for network training, it will cause ambiguity and unavailability of network learning. Therefore, we propose a group-wise incremental mechanism to avoid network collapse caused by importing a large amount of pseudo-labeled data at a time. Specifically, we divide all training samples (\textit{i}.\textit{e}., 1,000 real-labeled samples and 9,000 coarse-labeled samples) into ten groups equally, of which 1,000 samples with real labels are grouped into GROUP 1. In the first iteration, we only load the samples of GROUP 1 to train the R-Net, and then we use the trained R-Net to test the samples in GROUP 2 and obtain the the corresponding pseudo labels. Subsequently, all the samples in GROUP 1 with real labels and GROUP 2 with pseudo labels are used for S-Net training. Finally, the trained S-Net is used to test the samples in GROUP 3, and the generated saliency maps are used as the pseudo labels for the next iteration of R-Net training. At this point, the first iteration is completed. Noteworthy, in order to prevent the model from overfitting to real-labeled data and improve the robustness of the model, we do not load all real-labeled samples into training in the first iteration, but also use an incremental strategy. To be specific, in the first iteration, we select 500 samples from GROUP 1 as the real-labeled sample batch, and the remaining 500 samples are degenerated into contaminated-labeled sample batch through the rotation, cropping, and occlusion operations. In subsequent training iterations, we gradually reduce the number of contaminated-labeled data and increase the amount of real-labeled data. In the second iteration, we still train R-Net first, followed by the S-Net. For the R-Net training, 2,000 samples in GROUP 1 and GROUP 3 are used, where the number of real-labeled samples is increased to 600 and the number of contaminated-labeled samples is decreased to 400. Note that, the corresponding pseudo labels in GROUP 3 are obtained by testing the samples in GROUP 3 using the S-Net trained in the previous iteration. Then, the new trained R-Net is utilized to test the GROUP 4. And the samples in GROUPs 1, 2, 4 are used to train the S-Net again. In order to traverse the entire training dataset (\textit{i}.\textit{e}., 10,000 samples), 5 iterations need to be performed, and the training process for other iterations can be analogized. In addition, since both R-Net and S-Net are trained with hybrid labels, in order to ensure the effectiveness and performability of training, we first train the network under the pseudo-labeled samples and then fine-tune the real-labeled samples in a training epoch. \textbf{Credibility verification mechanism.} The purpose of alternating training of the two networks is to provide better pseudo labels for each other, so we introduce a credibility verification mechanism from the second iteration to ensure the validity of the provided labels. Only when the current model outperforms the previous best model on the validation set including 100 images, we use it to generate pseudo-labels for the corresponding group and participate in the next training step. Taking the validation process of S-Net as an example, in the second iteration, if the MAE score of the newly trained R-Net model using GROUP1 and GROUP3 data on the validation set is smaller than the MAE score \footnote{MAE refers to the mean absolute error, which represents the error between the prediction and the ground truth, and the smaller the value means the better.} of the previous best R-Net model (the R-Net model trained in the first iteration at this time), then we use the current R-Net model to test the GROUP 4 and generate the corresponding pseudo labels, otherwise we use the R-Net model trained in the first iteration to generate the pseudo labels. Validation of other iterations is similar to this process. \begin{figure*}[!t] \centering \includegraphics[scale=0.68]{Image/pr.pdf} \caption{PR curves curves on five common saliency datasets. Solid lines are fully-supervised methods, dashed lines are weakly-supervised and unsupervised methods.} \label{pr_fm} \end{figure*} \subsection{Loss Function} Referring to the traditional SOD method, we also use the binary cross-entropy loss as the loss function for R-Net training. As mentioned earlier, in order to reduce noise pollution from pseudo labels and enhance the guidance of real labels, we first train the network under the pseudo-labeled samples and then fine-tune the real-labeled samples in a training epoch. Specifically, we treat these two types of labels differently and change the binary cross-entropy loss to the following form: \begin{align} \footnotesize {\mathcal{l}_{r}}=-\sum\limits_{j\in {{D}^{r}}}{[Y_{j}^{r}\log R(X_{j}|\Phi )-(1-Y_{j}^{r})\log (1-R(X_{j}}|\Phi ))], \end{align} \begin{align} \footnotesize {\mathcal{l}_{p}}=-\sum\limits_{k\in {{D}^{p}}}{[Y_{k}^{p}\log R(X_{k}|\Phi )-(1-Y_{k}^{p})\log (1-R(X_{k}}|\Phi ))], \end{align} where ${\mathcal{l}_{r}}$ and $\mathcal{l}_{p}$ are all standard BCE losses, but the samples used in the calculation of the two loss functions are different. The loss ${\mathcal{l}_{r}}$ calculates the BCE loss of samples with real labels, while $\mathcal{l}_{p}$ calculates the BCE loss of samples with pseudo labels. $D^r$ and $D^p$ correspond to the training set with real labels and pseudo labels, respectively. $\{X,Y\}$ denotes the training sample in the corresponding set, where $X$ are the inputs of the R-Net including the RGB image and the corresponding coarse label, and $Y$ is the real label or pseudo label of the sample. $R(\cdot|\Phi )$ denotes the R-Net, and $\Phi$ represents the network parameters of R-Net. \begin{table*}[!t] \setstretch{1} \renewcommand{\arraystretch}{1.4} \caption{Quantitative results of different methods on five SOD benchmark datasets, $\uparrow$ and $\downarrow$ respectively indicate that the larger and smaller the score, the better. `F' means fully supervision, `I’ means image-level weakly supervision, and `S' means scribble-level weakly supervision, `Sub' means subitizing supervision, `M’ means multi-source weakly supervision, `Un’ is for unsupervision, and `H’ denotes hybird supervision. The best performance is marked in \textbf{BOLD}, and the second best performance is marked in \underline{UNDERLINE}.} \resizebox{\textwidth}{45mm}{ \begin{tabular}{ccc|ccc|ccc|ccc|ccc|ccc}\hline & & & \multicolumn{3}{c|}{DUTS-TE} & \multicolumn{3}{c|}{ECSSD} & \multicolumn{3}{c|}{HKU-IS} & \multicolumn{3}{c|}{PASCAL-S} & \multicolumn{3}{c}{THUR} \\ \hline & SUP & YEAR & ${{F}_{\beta }^{max}}\uparrow $ & ${{S}_{m}}\uparrow $ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow $ & ${{S}_{m}}\uparrow $ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow $ & ${{S}_{m}}\uparrow $ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow $ & ${{S}_{m}}\uparrow $ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow $ & ${{S}_{m}}\uparrow $ & $MAE\downarrow $ \\ \hline DGRL & F & 2018 & 0.805 & 0.842 & 0.050 & 0.913 & 0.903 & 0.041 & 0.900 & 0.894 & 0.036 & 0.837 & 0.836 & 0.072 & 0.746 & 0.813 & 0.076 \\ PiCANet & F & 2018 & 0.840 & 0.863 & 0.040 & 0.928 & 0.916 & \textbf{0.035} & 0.913 & 0.905 & \underline{0.031} & 0.848 & 0.846 & \underline{0.065} & - & - & - \\ PAGR & F & 2018 & 0.816 & 0.838 & 0.056 & 0.904 & 0.889 & 0.061 & 0.897 & 0.887 & 0.048 & 0.822 & 0.819 & 0.092 & 0.769 & 0.830 & 0.070 \\ MLMSNet & F & 2019 & 0.825 & 0.861 & 0.049 & 0.917 & 0.911 & 0.045 & 0.910 & 0.906 & 0.039 & 0.841 & 0.845 & 0.074 & 0.752 & 0.819 & 0.079 \\ CPD & F & 2019 & 0.840 & 0.869 & 0.043 & 0.926 & 0.918 & 0.037 & 0.911 & 0.905 & 0.034 & 0.842 & 0.847 & 0.072 & 0.774 & 0.834 & \underline{0.068} \\ AFNet & F & 2019 & 0.836 & 0.867 & 0.046 & 0.924 & 0.913 & 0.042 & 0.909 & 0.905 & 0.036 & 0.848 & 0.849 & 0.071 & - & - & - \\ BASNet & F & 2019 & 0.838 & 0.866 & 0.048 & 0.931 & 0.916 & 0.037 & 0.919 & 0.909 & 0.032 & 0.842 & 0.836 & 0.077 & - & - & - \\ PFAN & F & 2019 & 0.850 & 0.874 & 0.041 & 0.914 & 0.904 & 0.045 & 0.918 & \underline{0.914} & 0.032 & \textbf{0.866} & \underline{0.862} & \underline{0.065} & 0.722 & 0.781 & 0.104 \\ GCPANet & F & 2020 & \textbf{0.866} & \textbf{0.891} & \textbf{0.038} & \underline{0.936} & \textbf{0.927} & \textbf{0.035} & \textbf{0.926} & \textbf{0.920} & \underline{0.031} & \underline{0.859} & \textbf{0.866} & \textbf{0.062} & \textbf{0.784} & \textbf{0.840} & 0.070 \\ MINet & F & 2020 & \underline{0.863} & \underline{0.881} & \underline{0.039} & \textbf{0.937} & \underline{0.923} & \underline{0.036} & \underline{0.922} & \underline{0.914} & \textbf{0.030} & 0.856 & 0.855 & \textbf{0.062} & \underline{0.778} & \underline{0.836} & \textbf{0.066} \\ \hline SVF & Un & 2017 & - & - & - & 0.832 & 0.832 & 0.091 & - & - & - & 0.734 & 0.757 & 0.134 & - & - & - \\ MNL & Un & 2018 & 0.725 & - & 0.075 & 0.810 & - & 0.091 & 0.820 & - & 0.065 & 0.747 & - & 0.157 & - & - & - \\ WSS & I & 2017 & 0.633 & - & 0.100 & 0.767 & - & 0.108 & 0.773 & - & 0.078 & 0.697 & - & 0.184 & - & - & - \\ ASMO & I & 2018 & 0.568 & - & 0.115 & 0.762 & - & 0.068 & 0.762 & - & 0.088 & 0.653 & - & 0.205 & - & - & - \\ MSW & M & 2019 & 0.705 & 0.752 & 0.091 & 0.851 & 0.820 & 0.099 & 0.828 & 0.812 & 0.086 & 0.759 & 0.762 & 0.136 & - & - & - \\ MFNet & I & 2021 & 0.733 & 0.775 & 0.076 & 0.858 & 0.835 & 0.084 & 0.859 & 0.847 & 0.058 & 0.764 &0.768 & 0.117 & 0.731 & 0.795 & \underline{0.075} \\ WSSD & Sub & 2021 & - & - & - &\underline{0.873} & 0.827 & 0.119 & \underline{0.884} & \underline{0.870} & 0.082 & \underline{0.820} &\underline{0.814} & 0.128 & 0.703 & 0.768 & 0.114 \\ WSSA & S & 2020 & \underline{0.755} & \underline{0.803} & \underline{0.062} & 0.871 & \underline{0.865} & \underline{0.059} & 0.864 & 0.865 & \underline{0.047} & 0.788 & 0.796 & \underline{0.094} & \underline{0.736} & \underline{0.800} & 0.077 \\ Ours & H & & \textbf{0.803} & \textbf{0.837} & \textbf{0.050} & \textbf{0.899} & \textbf{0.886} & \textbf{0.051} & \textbf{0.892} & \textbf{0.887} & \textbf{0.038} & \textbf{0.827} & \textbf{0.828} & \textbf{0.076} & \textbf{0.755} & \textbf{0.813} & \textbf{0.069} \\ \hline \end{tabular}} \label{table-compare_value} \end{table*} In general, the whole loss of the R-Net consists of the dominant loss ${\mathcal{l}}_{dom}$ on the final prediction and three auxiliary losses $\mathcal{l}_{aux}^i$ on the side outputs generated by the middle three layers of the decoder, which is formulated as: \begin{equation} {{L}_{R}}={{\mathcal{l}}_{k,dom}}+\sum\limits_{i=1}^{3}{{{\lambda }_{i}}\mathcal{l}_{k,aux}^{i}}, \end{equation} where $k=\{r,p\}$ indexes the real-labeled data or pseudo-labeled data, and ${\lambda}_{i}$ are the hyper-parameters that control the weight of each auxiliary loss, which are set to $(0.2, 0.4, 0.8)$ in experiments. Since the S-Net in this paper is replaceable, we still follow the loss function of the original paper in the training order of `first real labels, then pseudo labels'. \section{EXPERIMENT} \subsection{Implementation Details and Setup} \subsubsection{Datasets} Five widely-used salient object detection benchmark datasets are employed to evaluate the entire performance, including: \begin{itemize} \item DUTS \cite{wang2017learning} dataset contains 10,553 training images (DUTS-TR) and 5,019 testing images (DUTS-TE), with pixel-wise saliency ground truth. \item ECSSD \cite{yan2013hierarchical} dataset consists of real images of complex scenes, containing 1,000 complex images with the corresponding the pixel-wise saliency ground truths. \item HKU-IS \cite{li2015visual} dataset includes 4,447 challenging images, most of which are low-contrast or have multiple salient objects. \item PASCAL-S \cite{li2014secrets} dataset consists of 850 images from the PASCAL VOC 2010 validation set, with multiple salient objects in the scene. \item THUR \cite{DBLP:journals/vc/ChengMHH14} dataset collects 15,000 images from the Internet and annotates each image with the corresponding pixel-level saliency ground truth. \end{itemize} \subsubsection{Evaluation Metrics} We adopt Precision-Recall (PR) curve \cite{crm2019tip,crm2020going}, max F-measure score \cite{crm2018tip,crm2019tc}, S-measure score \cite{fan2017structure}, and MAE score \cite{crmICME,crm2019tmm} as the evaluation metrics. As the PR curve is closer to the upper right corner, the model performance is better. The larger the F-measure and S-measure values, the better the performance, while the MAE score is just the opposite. \subsubsection{Implementation Details} We select the first 1,000 samples in the DUTS-TR dataset \cite{wang2017learning} as the real-labeled data, providing the pixel-wise real ground truth. Then, we use the MB method \cite{zhang2015minimum} to generate the saliency maps for all the images in the DUTS-TR dataset \cite{wang2017learning}, thereby forming the coarse-labeled set. The validation set includes a total of 100 images from the SOD dataset\cite{DBLP:conf/cvpr/MovahediE10}. We use the Pytorch toolbox to implement the proposed network and accelerate training by an NVIDIA GeForce RTX 3090 GPU card. We also implement our network by using the MindSpore Lite tool\footnote{\url{https://www.mindspore.cn/}}. The ResNet-50 is used as the backbone of the R-Net to extract encoding features, with initial parameters loaded from the pre-trained model on ImageNet \cite{deng2009imagenet}. The MINet \cite{DBLP:conf/cvpr/PangZZL20} is used as S-Net in our implementation. For the R-Net, the training images are first resized to $288\times 288$ by uniform resizing and random cropping. All training samples are then augmented using random horizontal flips and rotations. For the S-Net, we directly resize all images to $320\times 320$ during training and inference, and then apply the same augmentation strategy to R-Net. During training, the R-Net and S-Net are optimized by Adam optimizer with the batch size of 8, momentum of 0.9, and the weight decay of $5e^{-4}$. The initial learning rate is set to $1e^{-4}$, and divided by ten every ten epochs. The overall network is trained for a total of five iterations, and we design the same number of epochs (\textit{i}.\textit{e}., 30 epochs) for each iteration of the training process, whether it is R-Net or S-Net. Note that we only use the warm-up strategy in the first iteration. In the R-Net, we need to concatenate the RGB image and coarse label together into the backbone, for a total of 4 channels. Following the setting in \cite{DBLP:conf/cvpr/FuFJZ20}, we duplicate the original three channels of the ResNet model and its parameter once to form 6 channels, and then take the first four channels as the input layer of the new model. \begin{figure*}[!t] \centering \includegraphics[scale=0.82]{Image/compare_image.pdf} \caption{Visual comparisons with other state-of-the-art methods in various representative scenes.} \label{visualization_compare} \end{figure*} \subsection{Comparison with State-of-the-arts} We compare the proposed method with other state-of-the-art models, including fully-supervised methods (\textit{i}.\textit{e}., PAGR \cite{zhang2018progressive}, MLMSNet \cite{wu2019mutual}, CPD \cite{wu2019cascaded}, AFNet \cite{feng2019attentive}, BASNet \cite{qin2019basnet}, GCPANet \cite{chen2020global}, DGRL \cite{DBLP:conf/cvpr/WangZWL0RB18}, PiCANet \cite{DBLP:journals/tip/LiuHY20}, PFAN \cite{DBLP:conf/cvpr/ZhaoW19}, and MINet \cite{DBLP:conf/cvpr/PangZZL20}), weakly-supervised methods (\textit{i}.\textit{e}., MSW \cite{zeng2019multi}, WSSA \cite{zhang2020weakly}, ASMO \cite{li2018weakly}, WSS \cite{wang2017learning}, MFNet \cite{DBLP:conf/iccv/PiaoWZL21}, and WSSD \cite{DBLP:journals/tcsv/ZhengTZML21}) and unsupervised methods (\textit{i}.\textit{e}., SVF \cite{zhang2017supervision} and MNL\cite{zhang2018deep}). For a fair comparison, the saliency maps of the different methods are provided by the authors or obtained by running the released code with default parameters. \subsubsection{Quantitative Evaluation} First of all, the PR curves are shown in Fig. \ref{pr_fm}. Our method (red dashed line) achieves the best performance in most cases compared to other weakly supervised and unsupervised methods on five common datasets, which is consistent with the quantitative scores reported in Table \ref{table-compare_value}. Compared with the unsupervised SVF method \cite{zhang2017supervision} on the PASCAL-S dataset, the percentage gain of our method reaches $12.7\%$ for max F-measure, $9.4\%$ for S-measure and $43.3\%$ for MAE score. Compared with the WSSD method with subitizing supervision \cite{DBLP:journals/tcsv/ZhengTZML21} and MFNet method with image-level supervision \cite{DBLP:conf/iccv/PiaoWZL21} on the HKU-IS dataset, the percentage gain reaches $53.7\%$ and $34.5\%$ for MAE score. In addition, our method also achieves more competitive performance against the weakly-supervised SOD models with stronger supervision. For example, compared with the MSW method \cite{zeng2019multi} with a variety of combination supervision labels, the percentage gain of S-measure reaches $11.3\%$ on the DUTS-TE dataset, and the the percentage gain of max F-measure also wins $13.9\%$. For the scribble based weakly-supervised SOD method (\textit{e}.\textit{g}., WSSA \cite{zhang2020weakly}), although the proportion of annotations is relatively small, each sample clearly defines the foreground and background regions. That is, the supervision information given by the scribble is perfectly accurate. By contrast, the hybrid labels we use contain $90\%$ coarse labels with a lot of uncertain noise, but our model outperforms the WSSA method overall on all metrics across all datasets. For example, compared with the WSSA \cite{zhang2020weakly} on the DUTS-TE dataset, the percentage gains of max F-measure, S-measure, and MAE score reach $6.4\%$, $4.2\%$, and $7.1\%$, respectively. It is worth mentioning that our method catches up or even surpasses some fully-supervised methods on certain datasets (\textit{e}.\textit{g}., THUR dataset). In our proposed framework, we choose the MINet \cite{DBLP:conf/cvpr/PangZZL20} as our S-Net for training, and achieve the original performance of $80\%\sim90\%$ using only $1/10$ of the original training set. Of course, there is still a lot of room for improvement in the performance. \subsubsection{Qualitative Comparison} Some visual comparisons are shown in Fig. \ref{visualization_compare}. It can be seen that our method surpasses the unsupervised and weakly-supervised methods in terms of structural integrity and accuracy, and achieves comparable results to fully supervised methods. Our advantages are reflected in the following aspects: \begin{itemize} \item \emph{Advantages in background suppression}: Our model can effectively suppress noise and accurately locate the position of salient objects. For example, in the second image, the unsupervised SVF method \cite{zhang2017supervision} and scribble-based WSSA method \cite{zhang2020weakly} fail to accurately locate the boundary between the snow and the sheep from the complex backgrounds. Also, in the fourth image, some weakly-supervised methods (\textit{e}.\textit{g}., WSSD \cite{DBLP:journals/tcsv/ZhengTZML21}, WSSA \cite{zhang2020weakly}), as well as several fully-supervised methods (\textit{e}.\textit{g}., CPD \cite{wu2019cascaded}, PAGR \cite{zhang2018progressive}, PFAN \cite{DBLP:conf/cvpr/ZhaoW19}), are wrongly detect the hand as the salient object. In contrast, our model has better results in terms of location accuracy and background suppression. \item \emph{Advantages in detail depiction}: Our model has a better ability to capture detailed information such as sharp boundaries and complete structure. In the third image, other weakly-supervised methods either fail to detect the dog's limbs completely, or fail to distinguish the boundary between the limbs and the grass background. Similarly, neither the cow in the fifth image nor the person in the sixth image can be detected completely. In contrast, our method is not only able to detect the relatively complete structure of these salient objects, but also has clearer and sharper boundaries. In the last image, our method has a clear advantage in characterizing the horns and limbs of the cow compared to other weakly-supervised methods, especially our method can accurately detect the cow feet at a distance, producing more complete result. \item \emph{Advantages in low-contrast scene}: Our model can identify the salient object although in low-contrast scenes. For example, The color of the eagle¡¯s wings and the mountain behind it are so close that it is difficult for even fully-supervised methods to fully detect this region, such as CPD \cite{wu2019cascaded} and PAGR \cite{zhang2018progressive}. As you can easily imagine, all other weakly-supervised methods also fail in this region. Fortunately, thanks to the entire network architecture and the multi-dimensional feature fusion, the method proposed in this paper successfully detects the left wing of the eagle. Furthermore, in the eighth image, not only is the color of the leopard very close to the tree trunk, but part of its legs are covered by the tree trunk, which increases the difficulty of detection. However, our model can still detect the entire leopard's legs based on the relationship between objects, which even exceeds the detection ability of some strongly supervised models. \end{itemize} \subsection{Ablation Study} To validate the effectiveness of our proposed network, we conduct comprehensive ablation experiments on the HKU-IS, DUTS-TE, and PASCAL-S datasets, including the overall framework, the design of R-Net, and the training strategy. \begin{table}[!t] \centering \renewcommand\arraystretch{1.2} \begin{spacing}{1} \caption{The effectiveness analyses of overall framework on the PASCAL-S, DUTS-TE and HKU-IS datasets.} \label{as-1} \setlength{\tabcolsep}{0.6mm}{ \begin{tabular}{c|cc|cc|cc} \hline & \multicolumn{2}{c|}{PASCAL-S} & \multicolumn{2}{c|}{DUTS-TE} & \multicolumn{2}{c}{HKU-IS}\\ \cline{2-7} & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ \\ \hline M1 & 0.690 &0.161 & 0.622 & 0.136 & 0.775 & 0.101 \\ M2 & 0.783 & 0.120 & 0.741 & 0.089 & 0.855 & 0.070 \\ M3& 0.801 & 0.093 & 0.755 & 0.065 & 0.868 & 0.048 \\ Ours &0.827 & 0.076 & 0.803 & 0.050 &0.892 & 0.038 \\ \hline \end{tabular}} \end{spacing} \end{table} \renewcommand\arraystretch{2} \begin{figure}[!t] \centering \includegraphics[scale=0.315]{Image/ab1new.pdf} \caption{Visualization results for effectiveness of the overall framework.} \label{visualization-as-1} \end{figure} \subsubsection{Effectiveness of the overall framework} In the face of hybrid labels, Fig. \ref{fig:general-framework} shows two SOD framework pipelines. One is direct hybrid training, as shown in Fig. \ref{fig:general-framework}(a), or even training with only real labels, and the other is our proposed framework in this paper, where label refinement and SOD are alternately trained under hybrid labels. To verify the effectiveness of our overall framework, we design three ablation experiments. (1) M1: we first train the S-Net with 9,000 coarse-labeled samples and then fine-tune it with 1,000 real-labeled samples. (2) M2: we only train the S-Net with 1,000 real-labeled samples. (3) M3: we first train the S-Net using 1,000 samples with real labels, then use the trained S-Net to predict and update the original coarse labels for the remaining 9,000 samples, and finally retrain the S-Net by using the updated coarse-labeled samples and real-labeled samples. The quantitative results on the PASCAL-S, DUTS-TE and HKU-IS datasets are reported in Table \ref{as-1}, and some visual comparisons are shown in Fig. \ref{visualization-as-1}. Comparing M1 and M2, we can see that directly introducing coarse labels leads to a significant drop in performance, mainly due to the unreliable noise of coarse labels. Compared with these two schemes, our proposed framework guarantees that better results can still be achieved when training with coarse labels. For example, on the DUTS-TE dataset, the percentage gain of max F-measure against the M2 model is 8.4\%, and the percentage gain of max F-measure against the M1 model reaches 29.1\%. All these results demonstrate the effectiveness of our overall framework. As can be seen from Fig. \ref{visualization-as-1}, experiment M1 can only detect the main part of the object, with obvious omissions (such as the chicken on the far right in the first image and the pomegranate on the left in the second image). Furthermore, the results of experiment M1 are inferior to those of experiment M2 trained with only 1,000 real-labeled data. However, the M2 still contains a lot of noise and has very limited ability to describe the details (such as the duck's paws in the first image). In addition, the performance of experiment M3 outperforms the experiment M2, but our model still has obvious advantages in performance. For example, on the DUTS-TE dataset, compared with experiment M3, the max F-measure score of the full model is improved from 0.755 to 0.803 with a percentage gain of 6.3\%, and the MAE score is improved from 0.065 to 0.050 with a percentage gain of 23.1\%. On the HKU-IS dataset, the max F-measure score of the full model is improved from 0.868 to 0.892 with a percentage gain of 2.8\% compared with experiment M3, and the MAE score is improved from 0.048 to 0.038 with a percentage gain of 20.8\%. For the experiment M3, although the noise is significantly reduced, the performance is still inferior to our framework in terms of details, such as chicken feet. All these experiments verify the effectiveness of our overall framework. \renewcommand\arraystretch{1.4} \begin{table}[!t] \centering \small \caption{Ablation study of BGA on the PASCAL-S, HKU-IS and DUTS-TE datasets, where `B' is the baseline model, and `G' denotes the guidance stage, and the `A' represents the aggregation stage.} \label{as-2} \setlength{\tabcolsep}{0.6mm}{ \begin{tabular}{ccc|cc|cc|cc} \hline \multirow{2}{*}{B} & \multirow{2}{*}{\makecell[c]{G}} & \multirow{2}{*}{\makecell[c]{A}} & \multicolumn{2}{c|}{PASCAL-S}& \multicolumn{2}{c|}{DUTS-TE} & \multicolumn{2}{c}{HKU-IS} \\ \cline{4-9} & & &${{F}_{\beta}^{max}}\uparrow$ &${{MAE}}\downarrow$ &${{F}_{\beta}^{max}}\uparrow$ &${{MAE}}\downarrow$ &${{F}_{\beta}^{max}}\uparrow$ &${{MAE}}\downarrow$ \\ \hline \checkmark & & & 0.792 & 0.098 & 0.766 & 0.069 & 0.865 & 0.053 \\ \checkmark &\checkmark & & 0.803 & 0.086 & 0.786 & 0.056 & 0.880 &0.045 \\ \checkmark & &\checkmark & 0.809 & 0.083 & 0.791 & 0.053 & 0.882 & 0.043 \\ \checkmark &\checkmark & \checkmark &0.827 & 0.076 & 0.803 & 0.050 &0.892 & 0.038 \\ \hline \end{tabular}} \end{table} \begin{figure}[!t] \centering \includegraphics[scale=0.35]{Image/ab2.pdf} \caption{Visual comparisons for showing the benefits of the proposed modules.} \label{visualization-ab2} \end{figure} \subsubsection{Effectiveness of the R-Net} In the design of R-Net, the BGA module is a crucial core module, including the guidance stage and aggregation stage. In order to demonstrate the effectiveness of the designed BGA module R-Net, we conduct the ablation experiments on the PASCAL-S, DUTS-TE and HKU-IS datasets, and the quantitative and quantitative results are reported in Table \ref{as-2} and Fig. \ref{visualization-ab2}. First, we replace the BGA module with a simple concatenation-convolution fusion, thereby forming the baseline model. Based on the baseline model, we separately add the guidance and aggregation stages in the verification experiments. In the guidance stage, the saliency-refinement mainstream branch is supplemented and enriched with the guidance information (\textit{e}.\textit{g}., object localization and completeness) of the RGB-image guidance branch. As shown in the Table \ref{as-2}, compared with the baseline model on the DUTS-TE dataset, the max F-measure is improved from 0.766 to 0.786 by only introducing the guidance stage, with a percentage gain of 2.6\%. The visualization results in Fig. \ref{visualization-ab2} show that some irrelevant backgrounds are effectively suppressed (such as the left region in the first image), and some salient regions can also be recovered (such as the lower wing region in the third image), but there are also some cases where the detection is incomplete (such as top wing in the third image). In addition, the aggregation stage aims to more comprehensively integrate the corresponding encoder features, previous decoder features, and global context features. When only the aggregation stage is introduced, we can achieve better performance than the baseline model, and even slightly better than the model only with the guidance stage, which also illustrates the importance of effective multi-level fusion. For example, only with the aggregation stage on the DUTS-TE dataset, the max F-measure is improved from 0.766 to 0.791, with a percentage gain of 3.3\%. From the visualization results, it can be seen that the aggregation stage can better complete the object structure (such as the upper wing in the third image), but still introduces some additional noise and interference. By contrast, the model that includes both stages achieves the best performance. On the PASCAL-S, DUTS-TE and HKU-IS datasets, the percentage gain of the max F-measure reaches 4.4\%, 4.8\% and 3.1\% against the baseline model, respectively. Also, the structure of the final result is more complete, and irrelevant background regions are suppressed more thoroughly. Besides, to verify the effectiveness of the introduction of the RGB-image guidance branch in the R-Net, we add an ablation experiment. As a comparison, we remove the RGB-image guidance branch from the full model, denoted as w/o RGB. From the Table \ref{rm-rgb_branch}, we can see that the performance of the network degrades after removing the RGB branch on three testing datasets. For example, on the DUTS-TE dataset, compared with the model without the RGB branch, the max F-measure score is improved from 0.761 to 0.803 with a percentage gain of 5.5\%, and the MAE score is improved from 0.077 to 0.050 with a percentage gain of 35.1\%. \begin{table}[!t] \centering \renewcommand\arraystretch{1.4} \begin{spacing}{1} \caption{Ablation study of RGB Branch in R-Net on the PASCAL-S, DUTS-TE and HKU-IS datasets.} \label{rm-rgb_branch} \setlength{\tabcolsep}{0.15mm}{ \begin{tabular}{c|cc|cc|cc} \hline & \multicolumn{2}{c|}{PASCAL-S} & \multicolumn{2}{c|}{DUTS-TE} & \multicolumn{2}{c}{HKU-IS}\\ \cline{2-7} & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ \\ \hline \makecell[c]{w/o RGB } & 0.791 & 0.109 & 0.761 & 0.077 & 0.873 & 0.054 \\ Full model &0.827 & 0.076 & 0.803 & 0.050 &0.892 & 0.038 \\ \hline \end{tabular}} \end{spacing} \end{table} \subsubsection{Effectiveness of Training Strategy} To validate the effectiveness of our proposed training strategy, we conduct two ablation experiments: (1) No.1: we simplify the designed training strategy. The training of the two networks is no longer performed alternately, but directly trains R-Net on 1,000 real-labeled samples, then tests 9,000 samples with coarse labels to obtain the corresponding pseudo labels, and finally uses the pseudo-labeled and real-labeled samples to train the S-Net. This experiment is designed to verify the effectiveness of the overall training strategy. (2) No.2: we remove the credibility verification mechanism in each iteration to verify its effectiveness. (3) No.3: we use all refined coarse labels and real labels as supervision in the fifth iteration of S-Net training. (4) No.4: we remove contaminated data from each iteration and participate in each iteration using real-labeled samples, which is used to verify the effectiveness of the contamination mechanism on the real-labeled data. As can be found in Table \ref{as-3}, even with our designed framework pipeline of label refinement and SOD, without our proposed training strategy, the network cannot exert its maximum advantage. For example, on the DUTS-TE dataset, the max F-measure of experiment No.1 drops from 0.803 to 0.763 compared to the model with full training strategy, a decrease of 4\%. In addition, we introduce a credibility verification mechanism from the second iteration to ensure the validity of the pseudo labels. It can also be seen from the table that after removing this mechanism for experiment No.2, the indicators on all datasets decreased. From experiment No.3, we can see that although the amount of data for training S-Net is increased, the balance of data is disrupted, resulting in a slight decrease in the performance of the model instead of increasing. For example, compared with original training strategy, the max F-measure score of experiment No.3 drops from 0.827 to 0.824 on PASCAL-S dataset, and from 0.803 to 0.800 on the DUTS-TE dataset. Moreover, the training time of experiment No.3 is much longer than the full model. In addition, the role of the contamination labels is to prevent the network from overfitting the 1,000 samples with real labels, since these samples are involved in each round of training. From Table \ref{as-3}, it can be seen that the participation of contaminated real-labeled data in training improves the robustness and performance of the network. Compared to the version without the contamination mechanism on the real-labeled data, the MAE score is improved from 0.080 to 0.076 with a percentage gain of 5.2\% on the PASCAL-S dataset, and from 0.042 to 0.038 with a percentage gain of 10.5\% on the HKU-IS dataset. In summary, our model framework equipped with the designed training strategy achieves apparent advantages in the detection performance on the PASCAL-S, HKU-IS and DUTS-TE datasets. \renewcommand\arraystretch{1.1} \begin{table}[!t] \centering \caption{Ablation study of Training Strategy with Hybrid Labels on PASCAL-S, DUTS-TE and HKU-IS datasets.} \label{as-3} \setlength{\tabcolsep}{0.6mm}{ \begin{tabular}{c|cc|cc|cc} \hline & \multicolumn{2}{c|}{PASCAL-S} & \multicolumn{2}{c|}{DUTS-TE} & \multicolumn{2}{c}{HKU-IS}\\ \cline{2-7} & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ \\ \hline No.1 & 0.786 & 0.095 &0.763 & 0.071 &0.863 & 0.059 \\ No.2 & 0.819 & 0.083 & 0.792 &0.054 &0.880 & 0.043 \\ No.3 & 0.824 & 0.075 & 0.800 &0.050 &0.889 & 0.039 \\\hline \makecell[c]{No.4} & 0.812 & 0.080 & 0.796 & 0.053 & 0.878 & 0.042 \\\hline Ours &0.827 & 0.076 & 0.803 & 0.050 &0.892 & 0.038 \\ \hline \end{tabular}} \end{table} \subsubsection{Impact of the Group Settings} Considering the sample imbalance issue caused by the difference in the number of real-labeled samples and coarse-labeled samples, we propose a group-wise incremental mechanism to avoid network collapse caused by importing a large amount of pseudo-labeled data at a time. In implementation, we divide all training samples into some groups, and gradually increase the amount of data with pseudo labels in each training iteration. The number of groups simply reflects the number of samples embedded in each iteration and does not have a significant impact on performance theoretically, but the larger the number of groups, the more iterations required and the longer the training time. To this end, we design ablation experiments with different number of groups (\textit{i}.\textit{e}., 5 and 15), as reported in Table \ref{group-settings}. When the number of groups is set to 5, the 1,000 samples with real labels are still grouped into GROUP 1. The remaining 9,000 samples with coarse labels are divided into four groups for training, each containing 2,250 samples. Since the number of training iterations is related to the number of groups, we add one group of data for each iteration, and these five groups are iterated three times in total. Similarly, when the number of groups is set to 15, the remaining 9,000 samples with coarse labels are divided into 14 groups of 642 or 643 images each, and the whole training process requires 8 iterations. It can be seen from Table \ref{group-settings} that the performance of different grouping numbers is slightly different, which is also consistent with our theoretical analysis. \begin{table}[!t] \centering \small \renewcommand\arraystretch{1.5} \begin{spacing}{1} \caption{Ablation study of Different Group Settings.} \label{group-settings} \setlength{\tabcolsep}{0.1mm}{ \begin{tabular}{c|cc|cc|cc} \hline & \multicolumn{2}{c|}{PASCAL-S} & \multicolumn{2}{c|}{DUTS-TE} & \multicolumn{2}{c}{HKU-IS}\\ \cline{2-7} & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ & ${{F}_{\beta }^{max}}\uparrow$ & $MAE\downarrow $ \\ \hline 5 Groups & 0.824 &0.074 & 0.798 & 0.049 & 0.889 & 0.039 \\ 15 Groups& 0.824 & 0.075 & 0.801 & 0.049 & 0.890 & 0.039 \\ \makecell[c]{10 Groups (Ours) } &0.827 & 0.076 & 0.803 & 0.050 &0.892 & 0.038 \\ \hline \end{tabular}} \end{spacing} \end{table} \section{Conclusion} In this paper, we propose a weakly-supervised learning framework for SOD tasks with hybrid labels, which is decoupled into a R-Net and a S-Net. In order to make full use of the limited annotation information, the R-Net equipped with Blender with Guidance and Aggregation Mechanisms is designed to refine the coarse label and generate the pseudo label for S-Net training. In addition, we design three training mechanisms to guarantee the effectiveness and efficiency of network training, including alternate iteration mechanism, group-wise incremental mechanism, and credibility verification mechanism. Evaluations of five benchmark datasets demonstrate the effectiveness of our approach. \ifCLASSOPTIONcaptionsoff \newpage \fi \bibliographystyle{IEEEtran}
1,116,691,501,350
arxiv
\section*{Supplementary Information} \section{Design and simulations} \subsection{Subwavelength element simulations} \PillarCalc Comparisons of our rigorous coupled-wave analysis (RCWA) MATLAB code to open source planewave expansion \cite{Johnson_OE_01} and RCWA software \cite{Liu_CPC_12} are shown in Fig.~\ref{Fig:PillarCalc} to verify the accuracy of our calculations. As described in the methods section of the main text, the Bloch-mode effective index calculated by solving for the eigenvalues of Maxwell's equations in a truncated planewave basis with implicit periodic boundary conditions is shown in Fig.~\ref{Fig:PillarCalc}\textbf{a}. The effective index of the lowest-order HE$_{11}$ mode supported by an isolated pillar is also shown for comparison. The corresponding normal-incidence phase shift for \SI{1.0}{\micro\meter}-high pillars on a homogeneous diamond substrate, calculated by $\phi(d) = \angle t(d)$, is shown in Fig.~\ref{Fig:PillarCalc}\textbf{b}. Among the advantages of our metasurface design is its high transmission efficiency. Since the effective index of each pillar lies naturally between the refractive index of air and that of diamond (Fig.~1\textbf{b} of the main text), the pillars are inherently anti-reflective with an average transmission efficiency of $88.6\%$, which is higher than the $83\%$ transmission efficiency predicted for an air/diamond interface by normal incidence Fresnel coefficients (Fig.~\ref{Fig:PillarCalc}\textbf{c}). \subsection{Image reconstruction} \label{Sec:ImageRecon} The image formed by our microscope can be described by an electric-field amplitude vector, $\vec{E}_{\text{image}}(\vec{r}_{\text{image}})$, that is a function of the FSM and $\hat{z}$-piezo stage positions described by $\vec{r}_{\text{image}} = x_{\text{FSM}}\cdot\hat{x} + y_{\text{FSM}}\cdot\hat{y} + z_{\text{piezo}}\cdot\hat{z}$. This field vector can be expressed as a volume integral over the tensor Green's function (see ref. \cite{Novotny_12}), $\mathbf{G}(\vec{r}_{\text{image}},\vec{r})$, that describes the impulse response of our imaging system with a current distribution $\vec{J}(\vec{r})$ in the object space, $\vec{r}$, \begin{equation} \vec{E}_{\text{image}}(\vec{r}_{\text{image}}) = j\omega\mu_0 \int_V \mathbf{G}(\vec{r}_{\text{image}},\vec{r})\cdot\vec{J}(\vec{r}) \text{d}V. \label{Eqn:GreenField} \end{equation} \noindent Calculation of the tensor Green's function is described in the methods section. For the measurements described in Fig.~3 of the main text, the current distribution being imaged can be described by a displacement current, $\vec{J} = -j\omega\epsilon_{\text{D}}\epsilon_0\vec{E}$, caused by the focused fields of the metalens. Substituting this into Eqn.~(\ref{Eqn:GreenField}) and normalizing to remove scaling factors, we find an expression for the image field: \begin{align} \vec{E}_{\text{image}}(\vec{r}_{\text{image}}) &= \nonumber \\ \int_z \int_y \int_x &\mathbf{G}(x-x_{\text{FSM}},y-y_{\text{FSM}},z-z_{\text{piezo}})\cdot\vec{E}(x,y,z) \mathop \text{d}x \mathop \text{d}y \mathop \text{d}z \\ &= (G_{xx}*E_x + G_{xy}*E_y + G_{xz}*E_z)\cdot\hat{x} \nonumber \\ &+ (G_{yx}*E_x + G_{yy}*E_y + G_{yz}*E_z)\cdot\hat{y}, \label{Eqn:FieldFig3} \end{align} \noindent where $*$ denotes a three-dimensional spatial convolution. The intensity calculated by the squared magnitude of Eqn.~(\ref{Eqn:FieldFig3}), $I = |\vec{E}_{\text{image}}|^2$ (Eqn.~(3) of the methods section) defines the image formed by our microscope for the measurements presented in Fig.~3 of the main text. For confocal PL measurements the current distribution in Eqn.~(\ref{Eqn:GreenField}) can be replaced by a dipole emitter excited by the \SI{532}{\nano\meter} pump laser, $\vec{J} = -j\omega \mathbf{\alpha}\cdot\vec{E}_{\text{pump}}(\vec{r}_{\text{image}},\vec{r},\lambda_{\text{pump}})\cdot\delta(\vec{r}=\vec{r}_0)$, where $\mathbf{\alpha}$ is the emitter polarizability tensor and $\delta(\vec{r}=\vec{r}_0)$ is the Dirac delta function representing a dipole located at $\vec{r}=\vec{r}_0$. Following the analysis of ref. \cite{Novotny_12}, the integrals in Eqn.~(\ref{Eqn:GreenField}) can be normalized and approximated as the incoherent product of the PSF at pump and PL wavelengths: \begin{equation} I \approx |I_0(\lambda_{\text{pump}})|^2\cdot|I_0((\lambda_{\text{PL}})|^2, \label{Eqn:NVPSF} \end{equation} \noindent where $I_0$ is the lowest-order diffraction integral. In the paraxial limit $I_0$ takes the form of an Airy disk in the transverse plane, \begin{equation} I_0 = \frac{2J_1\left(\text{NA}_{\text{obj}} k_0(r_{\text{image}}-r_0) \right)}{\text{NA}_{\text{obj}} k_0(r_{\text{image}}-r_0)} \label{Eqn:paraxialPSF} \end{equation} \noindent with $(r_{\text{image}}-r_0) = \sqrt{(x_{\text{FSM}}-x_0)^2 + (y_{\text{FSM}}-y_0)^2}$, $k_0 = 2\pi/\lambda$, and NA$_{\text{obj}}$ is the numerical aperture of the imaging objective. Equations (\ref{Eqn:NVPSF}) and (\ref{Eqn:paraxialPSF}) are used to characterize the transverse response of our microscope in Sec.~\ref{Sec:Calibration}, while the axial response formed by scanning the piezo stage is described by evaluating Eqn.~(\ref{Eqn:NVPSF}) as a function of $z_{\text{piezo}}$: \begin{equation} I(x_{\text{FSM}} = 0,y_{\text{FSM}} = 0, z_{\text{piezo}}) = \left|\text{sinc}\left(\frac{\text{NA}_{\text{obj}}^2z_{\text{piezo}}}{2n_{\text{oil}}^2\lambda} \right)\right|^4, \label{Eqn:AxialRes} \end{equation} \noindent where $n_{\text{oil}} = 1.518$ is the refractive index of the immersion oil used with our objective. \section{Electron Beam Lithography Methods: Process Characterization, Data Preparation and Proximity Effect Correction} An Elionix ELS-7500EX 50 keV electron beam lithography (EBL) tool was used to generate the metalens pattern in hydrogen silsesquioxane (HSQ), a common negative tone EBL resist, atop diamond. Using a \SI{300}{\micro\meter} field size and a beam current of \SI{1}{\nano\ampere} on a \SI{5}{\nano\meter} beam step size (shot pitch), the final pattern was exposed as a direct result of careful process characterization and modeling. In this section, we will describe the patterns and methods to generate the the proximity effect correction (PEC) parameters for the metalens. \TowerPattern \subsection{Calibration Pattern} To calibrate the resist process, a tower pattern of lines and spaces was exposed in a dose matrix. Illustrated in Fig.~\ref{Fig:TowerPattern} is the line and space tower pattern of various pitch representing 0\%, 25\%, 50\%, 75\% and 100\% pattern densities. According to Monte Carlo simulations performed using TRACER\cite{TRACER} by GenISys, exposing with a 50 keV tool atop Si yields a backscatter length ($\beta$) of \SI{10}{\micro\meter}. Therefore, each pattern density region is 4$\beta$ by 4$\beta$ or greater in size such that the center of the pattern, when exposed, has a total absorbed energy that is saturated from backscattered electrons. \TowerPatternOM A specific pattern density is achieved by applying a specific pitch to the line-space pattern. For example, a 25\% pattern density consists of \SI{300}{\nano\meter} lines on a \SI{1200}{\nano\meter} pitch, where the line occupies 25\% of the full pitch. The line-width and pitch dimensions are provided in Fig.~\ref{Fig:TowerPattern}. After exposure and development, the final pattern seen in Fig.~\ref{Fig:TowerPatternOM} is imaged using a scanning electron microscope (SEM). The SEM images are post processed to extract the pattern density dependent exposure latitudes. \subsection{Process modeling and Correction} PEC is an edge-correction technology in which the absorbed energy of the resist in the pattern is analyzed and dose assignments are made such that the absorbed energy at threshold lands at the edge of the intended design. This threshold is associated with the resist sensitivity and development chemistry. Densely written patterns build up additional absorbed energy \textit{via} electron backscatter, requiring a local dose reduction; conversely, sparsely written (low density) patterns require an increase in local dose. The amount of background energy at these dense and sparse pattern densities directly impacts the exposure latitude, which is the critical dimension response to a change in dose. \ExposureLatitude HSQ has been shown to exhibit non-ideal behavior in its response to proximity effect correction methods due to microloading effects during resist development \cite{Bickford_JVSTB_14}. Using BEAMER\cite{BEAMER} by GeniSys, a genetic algorithm was employed to model the empirical exposure latitude data. For this simulation, only the 0\%, 25\% and 50\% pattern density data were of interest since the metalens pattern density falls within this range. Reducing the input data reduces the convergence time. The parameters used to obtain the model fit were the effective process blur, development bias, and base dose. These values are determined \textit{via} simulation in the genetic algorithm by matching the simulated resist edge contours to the experimental exposure latitude data obtained from the tower pattern in Fig.~\ref{Fig:TowerPattern} that was exposed in a dose matrix as shown in Fig.~\ref{Fig:TowerPatternOM}. The resulting effective process blur is then convolved into the electron point spread function to perform the simulation. The slope of the experimental exposure latitude data is matched in simulation by changing the effective process blur accordingly (Fig.\ref{Fig:ExposureLatitude}\textbf{a}). By adding two extra degrees of freedom, development bias and base dose, the algorithm can converge properly. \begin{table}[h] \centering \begin{tabular}{|c|c|} \hline $\alpha$ & \SI{5}{\nano\meter} \\ \hline $\beta$ & \SI{10}{\micro\meter} \\ \hline Effective Blur & \SI{67}{\nano\meter} \\ \hline Bias & -\SI{5}{\nano\meter}\\ \hline \end{tabular} \caption{Proximity Effect Correction Parameters\label{Tab:PEC}} \end{table} The final pattern was proximity effect corrected using the parameter found in Tab.~\ref{Tab:PEC}. As a result, the metalens is fractured such that the shapes receive the appropriate dose to print the features to size (Fig.~\ref{Fig:ExposureLatitude}\textbf{b}). \section{Imaging with the metalens} \MLQELimage The HPHT diamond hosting the metalens is placed in a conventional upright microscope (Olympus, BX41) for brightfield transmission and reflection imaging (Fig.~2 of the main text). The bright-field transmission microscope image in Fig.~2\textbf{d} of the main text was created by placing a chromium shadow mask between a lamp and a focusing objective, which was focused through the metalens and imaged on a CCD using a second objective as described in Fig.~\ref{Fig:MLQELimage}. The shadow mask was fabricated by e-beam depositing chromium on a glass microscope slide (Fig.~\ref{Fig:MLQELimage}\textbf{a}), and creating the pattern shown in Fig.~\ref{Fig:MLQELimage}\textbf{b} with a combination of photolithography and chemical etching. The resulting CCD image shown in Fig.~\ref{Fig:MLQELimage}\textbf{c} was created using the transmission microscope shown in Fig.~\ref{Fig:MLQELimage}\textbf{d}. \section{Metalens characterization} \subsection{Measurement setup} \MLExpSetup The diamond is mounted on a glass cover slip, which is attached to the stage of a custom-built laser-scanning confocal microscope (Fig.~\ref{Fig:MLExpSetup}) for characterization and NV center imaging (Figs.~3,4 of the main text). The laser-scanning confocal microscope has two optical paths for simultaneously probing the metalens from air and through the diamond substrate: a fiber-coupled path and an objective path. The objective path consists of a $4f$ relay-lens system with achromatic doublet lenses (L3 and L4, Newport, $\SI{25.4}{\milli\meter}\times\SI{150}{\milli\meter}$ focal length, PAC058AR.14), which is used to align the back aperture of the objective to a fast-steering mirror (FSM, Optics in motion, OIM101). This is followed by a \SI{560}{\nano\meter} long-pass dichroic mirror (Semrock, BrightLine FF560-FDi01) which directs the \SI{532}{\nano\meter} excitation laser (Coherent, Compass 315M-150) into the objective (OL, Nikon, Plan Flour x100/0.5-1.3) while wavelengths above \SI{560}{\nano\meter} are passed through a \SI{532}{\nano\meter} and a \SI{568}{\nano\meter} long-pass filter (Semrock, EdgeBasic BLP01-532R, EdgeBasic BLP01-568R) before being focused down to a \SI{25}{\micro\meter}-core, 0.1~NA, multimode fiber (Thorlabs M67L01) \textit{via} the achromatic doublet lens (L5, Newport, $\SI{25.4}{\milli\meter}\times\SI{50}{\milli\meter}$ focal length, PAC049AR.14). The multimode fiber is then connected to a single-photon counting module, (SPCM, Excelitas, SPCM-AQRH-14-FC) or a spectrometer (Princeton Instruments, IsoPlane-160, \SI{750}{\nano\meter} blaze wavelength with 1200 G/mm) with a thermoelectrically-cooled CCD (Princeton Instruments PIXIS 100BX). The electrical output of the single-photon counting module is routed via BNC cables to either a data acquisition card (DAQ, National Instruments PCIe-6323) or a time-correlated single-photon counting card (PicoQuant, PicoHarp 300). The fiber-coupled path is modified to enable different experiments conducted on the metalens. For characterization, a broadband supercontinuum source (Fianium WhiteLase SC400) was coupled into a single-mode fiber (Thorlabs P1-630AR-2). A $f = \SI{2.0}{\milli\meter}$ collimating lens (L6, Thorlabs CFC-2X-A) was used to create a \SI{380}{\micro\meter} diameter Gaussian beam that emulates the planewave source used in our FDTD simulations. The excitation wavelength is set by passing the supercontinuum beam through a set of linear variable short-pass (Delta Optical Thin Film, LF102474) and long-pass filters (Delta Optical Thin Film LF102475) prior to fiber-coupling, which can be adjusted to filter out a single wavelength with $<\SI{8}{\nano\meter}$ bandwidth or be removed completely for broadband excitation. For reflectance measurements, a $f = \SI{15}{\milli\meter}$ achromatic doublet lens (L2, Thorlabs, AC064-015-B) is added to focus the collimated excitation beam to a $\sim\SI{30}{\micro\meter}$-diameter spot at the top surface of the diamond. A beamspliter cube (Thorlabs, BS014) was added between the collimating and focusing lenses so that reflected light could be focused into a \SI{200}{\micro\meter}-core MMF (Thorlabs, M25L01) that is coupled to a spectrometer (Thorlabs CCS100) using a $f = \SI{100}{\milli\meter}$ achromatic doublet lens (L7, Newport, PAC052AR.14). To modify this setup for imaging an NV center, a \SI{532}{\nano\meter} and a \SI{568}{\nano\meter} long-pass filter (Semrock, EdgeBasic BLP01-532R, EdgeBasic BLP01-568R) is placed after L2 and the filtered light is focused down to a \SI{25}{\micro\meter}-core, 0.1~NA, multimode fiber (Thorlabs M67L02) with a $f = \SI{13}{\milli\meter}$ achromatic doublet lens (L1, Thorlabs, AC064-013-B). The multimode fiber can then be connected to a single-photon counting module or a spectrometer as described in the previous paragraph. \subsection{Calibration} \label{Sec:Calibration} Calibration of the fast-steering mirror (FSM) is critical for characterization of the metalens's point-spread function at focus. To perform this calibration, a PL scan of the metalens surface was taken with \SI{532}{\nano\meter} pump beam (Fig.~\ref{Fig:SetupCal}\textbf{a}), and the image was compared to the CAD layout of the metalens pattern (Fig.~\ref{Fig:SetupCal}\textbf{b}) to determine the differential voltage required to move the FSM by a known distance in $x$ and $y$. \FSMCal The relative shift in axial position of the confocal collection volume caused by piezo stage movements is scaled by a factor ranging from $\frac{n_{\text{D}}}{n_{\text{oil}}}$ to $\frac{n_{\text{D}} \cos\theta_{\text{D}}}{n_{\text{oil}}\cos\theta_{\text{oil}}}$, where $\theta_{\text{D,oil}} = \sin^{-1}\left(\frac{\text{NA}}{n_{\text{oil,diamond}}}\right)$ are the maximum focusing angles in diamond and oil, respectively\cite{Visser_Scanning_94}. We calculate this scaling factor using our numerical PSF model described in the methods section, and find that it is $\approx \frac{n_{\text{D}}}{n_{\text{oil}}}$ (Fig.~\ref{Fig:SetupCal}\textbf{c}), which is applied to the measured piezo stage position, $z'_{\text{piezo}}$, to find the physical displacement of the confocal volume within the sample, $z_{\text{piezo}} \approx \frac{n_{\text{D}}}{n_{\text{oil}}} z'_{\text{piezo}}$. The dispersive refractive index of diamond, $n_{\text{D}} (\lambda)$, used for the calculations in Fig.~\ref{Fig:SetupCal}\textbf{c} was modeled using the Sellmeier equation with coefficients from ref. \cite{Mildren_OED_Ch1_13}. The sample thickness was checked by focusing $\SI{532}{\nano\meter}$ on both the bottom surface and top surface of the diamond, and measuring the relative position on the piezo stage. The piezo stage displacement was $\SI{92}{\micro\meter}$, and the iris of the objective was set to NA$_{\text{obj}} = 0.5$. The numerically calculated scaling factor 1.6, giving a sample thickness of \SI{147}{\micro\meter}. The objective lens used has an adjustable iris, which effectively reduces the NA to improve spherical aberration. The collar was set to NA$_{\text{obj}}\approx 0.75$, which was confirmed by measuring PL from an NV center (Fig.~\ref{Fig:SetupCal}\textbf{d}), and fitting the PL scan as an incoherent convolution of two Airy disks using Eqns.~(\ref{Eqn:NVPSF},\ref{Eqn:paraxialPSF}). We found that the PSF of our microscope was not limited by the spot size of the excitation beam (either due to operating at saturation, or the pump beam not being diffraction limited), and thus modified Eqn.~(\ref{Eqn:NVPSF}) to fit $I = |I_0(\lambda_{\text{PL}})|^2 \cdot |I_0(\lambda_{\text{PL}})|^2$, with the results shown in Fig.~\ref{Fig:SetupCal}\textbf{e}. Fits were performed using both $\lambda_{\text{PL}} = \SI{700}{\nano\meter}$ and a weighted fit over the NV PL spectrum, resulting in fit values of NA$_{\text{obj}} = 0.76 \pm 0.03$ and NA$_{\text{obj}} = 0.73 \pm 0.03$, respectively. Using the fit value of NA$_{\text{obj}}\approx 0.75$, the unaberrated axial PSF corresponding to Eqn.~(\ref{Eqn:AxialRes}) (purple curve) and numerically evaluated aberrated axial PSF (red curve) are compared to measurements (black circles) in Fig.~\ref{Fig:SetupCal}\textbf{f}. \subsection{Focal length} \AxialFocusFits The focal length of the metalens shown in Fig.~3\textbf{e} of the main text (right axis) was measured at five wavelengths by setting the FSM position to the peak of the transverse focused spot and scanning the piezo sample stage in the $\hat{z}$ (axial)-direction by \SI{200}{\nano\meter} steps, corresponding to shifts of $\frac{n_{\text{D}}}{n_{\text{oil}}}\cdot\SI{200}{\nano\meter} \approx \SI{315}{\nano\meter}$ inside the diamond. The position of focus was determined by fitting the peak signal of the piezo scan at each wavelength to a Gaussian (Fig.~\ref{Fig:AxialScanFits}\textbf{a}). The bright PL of the metalens surface was also fit with a Gaussian (Fig.~\ref{Fig:AxialScanFits}\textbf{b}) and used to calibrate the relative distance between the sample surface and the metalens focus. The chromatic aberration of our imaging system was checked by feeding the supercontinuum through the collection line and measuring the location of the metalens's surface \textit{via} a CCD camera in the collection path. Since the supercontinuum is coupled to a SMF, we can achieve this by simply coupling the SMF to the MMF in the objective collection path with an FC-to-FC fiber connector (Thorlabs, ADAFC1). By verifying that the surface location is the same when the excitation source is band-passed to \SI{600}{\nano\meter} as when it is \SI{800}{\nano\meter}, the chromatic aberration of the system was found to be negligible. \subsection{Field profiles} A comparison of the simulated metalens focus, $|\vec{E}_{\text{ML}}|^2$, microscope PSF, $|\mathbf{G}\cdot\vec{p}|^2$, image formed by convolving the focus and PSF, $I_{\text{image}}$, and two sets of measured data at five wavelengths from \SIrange{600}{800}{\nano\meter} are shown in Fig.~\ref{Fig:FocusFits}. The microscope PSF is represented by the product of three dipole moments oriented along the three Cartesian axes, $\vec{p} = (\hat{x} + \hat{y} + \hat{z})\cdot\delta(\vec{r}_{\text{image}})$, and the tensor Green's function, $\mathbf{G}$, described in the methods section of the main text. The image intensity, $I_{\text{image}}$, has been calculated by a coherent convolution as described in Sec.~\ref{Sec:ImageRecon}. The measurements were taken with two different tube lenses (L5 in Fig.~\ref{Fig:MLExpSetup}), $f = \SI{50}{\milli\meter}$ with a $6\times$ reducing telescope (Fig.~\ref{Fig:FocusFits}\textbf{d}), and $f = \SI{100}{\milli\meter}$ (Fig.~\ref{Fig:FocusFits}\textbf{e}). Changing the tube lens effectively changes the size of the collection aperture relative to the image size. For the measurements taken in Fig.~\ref{Fig:FocusFits}\textbf{d}, the aperture could be considered infinitesimal (i. e., far below the confocal condition) and does not affect the imaging resolution\cite{Corle_96}, whereas in Fig.~\ref{Fig:FocusFits}\textbf{e} the aperture is finite and decreases the resolution with which the spot is measured. \TransverseFocusFits Comparisons of $x$ and $y$ cross-sections of the convolved simulations (Fig.~\ref{Fig:FocusFits}\textbf{c}) and measurements with infinitesimal pinhole (Fig.~\ref{Fig:FocusFits}\textbf{d}) are shown in Fig.~\ref{Fig:FocusCuts}. The agreement between model and measurement seen in Fig.~\ref{Fig:FocusCuts} is remarkable, given that there are no free parameters. In other words, the plots in Fig.~\ref{Fig:FocusCuts} represent an agreement between theory and experiment, rather than a fit to experimental data. \TransverseFocusCuts \subsection{Focusing in air} To measure the focus spot formed in air when the metalens is illuminated by a collimated beam from inside of the diamond (Fig.~\ref{Fig:FocusInAir}\textbf{a}), the diamond substrate is mounted upside-down on the inverted microscope shown in Fig.~\ref{Fig:MLExpSetup} with the metalens facing downwards towards a 100x air objective (Olympus, UMPlanFl $100\times$/0.90) in the objective path. A \SI{633}{\nano\meter} He-Ne laser source (Melles Griot 05-LHP-153) is SMF-coupled and collimated \textit{via} a $f = \SI{2.0}{\milli\meter}$ collimating lens (Thorlabs CFC-2X-A) to illuminate the back-side of the diamond substrate from the fiber-coupled path. The methods for measuring the focus spots and focal length of the metalens are described in the Experimental section of Methods, as the collection path after the objective is identical to the objective path shown in Fig.~\ref{Fig:MLExpSetup}. An axial scan of the metalens focus in air is plotted in Fig.~\ref{Fig:FocusInAir}\textbf{b}, showing an excellent agreement with the FDTD simulation. The measured transverse focus spot, shown in Fig.~\ref{Fig:FocusInAir}\textbf{c}, is deconvolved using blind deconvolution with MATLAB's \textbf{deconvblind} command. The cross-sections of the deconvolved focus spot, plotted in Fig.~\ref{Fig:FocusInAir}\textbf{d} in black, demonstrate again an excellent agreement with the FDTD simulation (red). \FocusInAir \section{Background subtraction for NV measurements} \subsection{Spectra and Saturation curves} Experimental setup for spectra and saturation curves measurements are described in Fig.~\ref{Fig:MLExpSetup}. Background spectra and saturation curves are measured at a transverse scan position that is away from the NV center but still within the field-of-view of the metalens. Signal (on-NV) and background (off-NV) spectra for both metalens and objective paths were collected with a 5-min acquisition time. The background spectra for both paths, plotted in Fig.~\ref{Fig:MLwithNVBkgd}\textbf{a}, are subtracted from the signal spectra to yield the points plotted in Fig.~4\textbf{d} in the main text. For saturation curves, the \SI{532}{\nano\meter} pump beam is passed through a variable optical-density filter (Thorlabs, NDC-50C-4), before going through a beamsplitter cube (Thorlabs BS014) which enables the pump beam's power to be measured by a power meter (Thorlabs PM100D). For each power increment, signal and background photon counts were measured for \SI{500}{\milli\second} for both the metalens and objective paths. The background countrates, plotted in Fig.~\ref{Fig:MLwithNVBkgd}\textbf{b}, are subtracted from the signal countrates to yield the points plotted in Fig.~4\textbf{e} in the main text. \MLwithNVBkgd \subsection{Autocorrelation} When we centered the FSM on the NV center to record photons for cross-correlation, we are collecting both the photons emitted from the NV center as well as photons from the background. To account for this background and correct for it, we need to examine the $g^{(2)}(\tau)$ function and its boundary conditions. Given an arbitrary correlation function mixed with Poissanian background, the measured correlated function, $g^{(2)}_{\text{measured}}(\tau)$, is related to the ideal, background-free correlated function, $g^{(2)}_{\text{ideal}}(\tau)$, in the following way: \begin{equation} g^{(2)}_{\text{measured}}(\tau) = 1 - \rho^2 + \rho^2g^{(2)}_{\text{ideal}}(\tau) \label{Eqn:gMeasured} \end{equation} \noindent this adjusted the boundary conditions of $g^{(2)}_{\text{measured}}(\tau)$ to the following: \begin{equation} g^{(2)}_{\text{measured}} = \begin{dcases} 1 - \rho^2 & \tau = 0\\ 1 & \tau = \infty \end{dcases} \label{Eqn:corrMeasBound} \end{equation} \noindent where $\rho$ is defined as: \begin{equation} \rho = \dfrac{S}{S + B} = 1 - \dfrac{B}{S + B} \label{Eqn:rhoDef} \end{equation} \noindent where $S$ is the signal and $B$ is the background. Both Eqn.~\ref{Eqn:gMeasured} and Eqn.~\ref{Eqn:rhoDef} make the assumption that the background in the measurement is Poissanian. To justify this assumption, we moved the FSM to a spot off the NV center that is still within the metalens' field of view and measured photons from both metalens and objective paths for the same duration as we did for when the FSM is centered on the NV center (5 minutes). The off-NV (background) measurement was performed immediately following the on-NV (signal) measurement and the pair of measurements was repeated for 40 times. The recorded countrates are shown in Fig.~\ref{Fig:MLwithNVCorrBkgd}\textbf{a} illustrating the consistency and stability of countrates over more than six hours of measurements. We calculate $\rho$ for each pair of experiments by using Eqn.~\ref{Eqn:rhoDef} where $B$ is measured as countrates from off-NV measurements and $S + B$ is measured as countrates from on-NV measurements. The distribution of these $\rho$ values is plotted in Fig.~\ref{Fig:MLwithNVCorrBkgd}\textbf{b}. Next, we calculate the cross-correlation of the recorded photons in the signal as well as the background measurements, shown in Fig.~\ref{Fig:MLwithNVCorrBkgd}\textbf{c}. We use a variant of the algorithm developed by Laurence \textit{et al.} \cite{Laurence2006}, to calculate the cross correlation function from the raw photon arrival times. These measurements clearly demonstrate that the background is Poissonian, whereas the background-incorporated signal measurements showed cross-correlation characteristic of a single- or few-photon emitter. \MLwithNVCorrBkgd To perform the background correction for $g^{(2)}_{\text{measured}}(\tau)$, we rearrange Eqn.~(\ref{Eqn:gMeasured}) to: \begin{equation} g^{(2)}_{\text{background-corrected}}(\tau) = \dfrac{g^{(2)}_{\text{measured}}(\tau) - (1 - \rho^2)}{\rho^2} \label{Eqn:gCorrected} \end{equation} \noindent which yields the points plotted in Fig.~\ref{Fig:MLwithNVCorrBkgd}\textbf{d} and in the main text. We fit the background-corrected autocorrelation function using the well-known approximation of the NV center as a 3-level system\cite{Brouri_OL_00}: \begin{align} g^{(2)}_{\text{background-corrected}}(\tau) &= 1 - Ae^{-\tfrac{\abs{t-t_0}}{\tau_1}} + Ce^{-\tfrac{\abs{t-t_0}}{\tau_2}} \label{Eqn:corrFunc} \end{align} \noindent where ideally $A = C + 1$ but we allow for the possibility of $A < C + 1$ to account for imperfect background measurements and finite detector bandwidth. The results of this fit is plotted in Fig.~\ref{Fig:MLwithNVCorrBkgd}\textbf{d}, clearly showing the antibunching dip at $\tau = 0$ below $\dfrac{1 + C}{2}$ to satisfy the condition of a single-photon emitter, as well as the characteristic short-delay bunching of an NV-center due to shelving in the spin-singlet manifold. \section*{Acknowledgements} This work was supported by an NSF CAREER grant (ECCS-1553511), the University Research Foundation, and the Singh Center for Nanotechnology at the University of Pennsylvania, a member of the National Nanotechnology Coordinated Infrastructure (NNCI), which is supported by the National Science Foundation (Grant ECCS-1542153). S.A.M. and E.C.G. were supported by the Netherlands Organisation for Scientific Research (NWO) and the European Research Council under the European Union’s Seventh Framework Programme ((FP/2007-2013)/ERC grant agreement no. 337328, “Nano-EnabledPV”). \section*{Author contributions} R. R. G. and T.-Y. H. contributed equally to this work. R. R. G. and L. C. B. conceived of the project. R. R. G., S. A. M., and E. C. G. performed the design and simulations; R. R. G. and G. G. L. fabricated the metalens; R. R. G., T.-Y. H., D. A. H., A. L. E., and L. C. B. performed the measurements and analysis. All authors contributed to writing the manuscript. \section*{Methods} \noindent \textbf{Design.} The metalens was designed using the procedure devised by \citeauthor{Lalanne_JOSAA_99} for TiO$_2$ deposited on glass \cite{Lalanne_JOSAA_99}. The procedure was carried out as follows: First, the Bloch-mode effective index, $n_{\text{eff}}$, was calculated as a function of pillar diameter (Fig.~\ref{Fig:Concept}\textbf{b}) on a subwavelength grid. The grid-pitch, $\Lambda$, was chosen to be just below the onset of first order diffraction, $\Lambda \leq \frac{\lambda}{n_\text{D}} = \SI{291}{\nano\meter}$ at $\lambda = \SI{700}{\nano\meter}$, which was rounded up to $\Lambda = \SI{300}{\nano\meter}$. The pillar height was chosen to be $h = \SI{1.0}{\micro\meter}$ and the minimum pillar diameter was set to $d_{\text{min}}=\SI{100}{\nano\meter}$ to ensure compatibility with our fabrication process. The maximum pillar diameter, $d_{\text{max}}$, was then found by determining the $n_{\text{eff}}$ required to achieve an optical pathlength increase of $2\pi$ relative to the minimum pillar diameter: \begin{equation} n_{\text{eff}}\left(d_{\text{max}}\right) = \frac{\lambda}{h} + n_{\text{eff}}\left(d_{\text{min}}\right). \label{eqn:PhiMax} \end{equation} \noindent The corresponding $d_{\text{max}}$ is found from the dispersion curve in Fig.~\ref{Fig:Concept}\textbf{b}. The minimum and maximum pillar diameters are indicated in Fig.~\ref{Fig:Concept}\textbf{b} (black dashed lines) along with the their relative optical pathlengths (red dashed lines). The Fresnel phase profile in Fig.~\ref{Fig:ML_design}\textbf{a} was calculated by $\phi = n_ \mathrm{D}k_0 \left(f-\sqrt{f^2 + x^2 + y^2}\right)$, with 93 grid points for a diameter of \SI{27.9}{\micro\meter} measured by the grid edges at the maximum widths along the Cartesian design dimensions. The symmetry of this structure ensures polarization independent focusing, which has been shown for similar designs using TiO$_2$ deposited on glass \cite{Khorasaninejad_NL_16}. \vspace{.5cm} \noindent \textbf{Fabrication.} The metalens was fabricated on $\SI{3.0}{\milli\meter}\times\SI{3.0}{\milli\meter}\times\SI{0.15}{\milli\meter}$ double-side polished high-pressure/high-temperature (HPHT)-grown single-crystal diamond (Applied Diamond, Inc.). The diamond surface was cleaned in \SI{90}{\celsius} Nano-Strip (a stabilized mixture of sulfuric acid and hydrogen peroxide, Cynaktec KMB 210034) for \SI{30}{\minute}, followed by a \SI{10}{\minute} plasma clean in a barrel asher with 40~sccm O$_2$ and \SI{300}{\watt} RF power. The metalens pattern was proximity effect corrected (see supporting information) and written in hydrogen silsesquioxane (HSQ, Dow Corning, Fox-16) using a 50 keV electron beam lithography tool (Elionix, ELS-7500EX). Prior to spin-coating HSQ, a \SI{7}{\nano\meter} adhesion layer of SiO$_2$ was deposited on the diamond surface by electron beam evaporation to promote adhesion. After exposure, the pattern was developed in a mixture of \SI{200}{\milli\liter} deionized water with \SI{8}{\gram} of sodium chloride and \SI{2}{\gram} of sodium hydroxide \cite{Yang_JVSTB_07}. Our e-beam lithography process for HSQ on diamond can be found in ref.~\onlinecite{Grote_SC_16}. A reactive ion etch (RIE, Oxford Instruments, Plasma lab 80) was used to remove the SiO$_2$ adhesion layer and to transfer the HSQ pattern into the diamond surface. The SiO$_2$ adhesion layer was removed by a \SI{1}{\minute} CF$_4$ reactive ion etch \cite{Metzler_SC_16}, followed by a \SI{23}{\minute} O$_2$ RIE etch with a flow rate of 40~sccm, a chamber pressure of \SI{75}{\milli\torr}, and an RF power of \SI{200}{\watt} to form the diamond pillars. Finally, the HSQ hardmask was removed using buffered-oxide etch. \vspace{.5cm} \noindent \textbf{Simulations.} Calculations of $n_{\text{eff}}$, $\phi$ (Fig.~\ref{Fig:Concept}\textbf{b}, left and right axes, respectively), and pillar transmission efficiency (supporting information) were performed using 3D rigorous coupled-wave analysis (RCWA) based on the method developed by \citeauthor{Rumpf_PIERS_11}\cite{Rumpf_PIERS_11}. The effective index of the pillars was calculated by solving for the eigenvalues of Maxwell's equations with the $z$-invariant refractive index profile of the pillar cross-section in a $\SI{300}{\nano\meter}\times\SI{300}{\nano\meter}$ square unit cell at $\lambda = \SI{700}{\nano\meter}$. The eigenproblem was defined in a truncated planewave basis using $25\times25$ planewaves, with implicit periodic boundary conditions. Following these calculations, the pillar height was set to \SI{1.0}{\micro\meter} with air above and homogeneous diamond below, and the complex amplitude transmission coefficient, $t$, of a normal incidence planewave from air is calculated as a function of pillar diameter. The right axis of Fig.~\ref{Fig:Concept}\textbf{b} was found by $\phi(d) = \angle t(d)$. The focused spot in Fig.~\ref{Fig:ML_performance}\textbf{a} was calculated using 3D finite-difference time-domain simulations (FDTD, Lumerical Solutions, Inc.). The \SI{27.9}{\micro\meter}-diameter metalens is contained in a $\SI{28.1}{\micro\meter}\times\SI{28.1}{\micro\meter}\times\SI{22.25}{\micro\meter}$ total-field/scattered-field (TFSF) excitation source to reduce artifacts caused by launching a planewave into a finite structure. Perfectly matched layers (PMLs) were used as boundary conditions \SI{0.5}{\micro\meter} away from the TFSF source. The simulation mesh in the pillars was set to $\SI{10}{\nano\meter}\times\SI{10}{\nano\meter}\times \SI{10}{\nano\meter}$, increasing gradually to \SI{50}{\nano\meter} along the propagation ($\hat{z}$)-direction into the diamond. Diamond is modeled with a non-dispersive refractive index, $n_D=2.4$. An $x$-polarized planewave pulse ($\omega_0 \approx 2\pi\times\SI{440}{\tera\hertz},\Delta \omega \approx 2\pi\times\SI{125}{\tera\hertz}$) is launched from air toward the metalens surface. Steady-state spatial electric field distributions, $\vec{E}(\vec{r})$, at five wavelengths ranging from \SIrange{600}{800}{\nano\meter} were stored, and the spatial fields at $\lambda = \SI{700}{\nano\meter}$ are plotted as transverse ($|\vec{E}(z=f)|^2$) and axial ($|\vec{E}(y=0)|^2$) intensity distributions in Fig.~\ref{Fig:ML_performance}\textbf{a}. The focal length, $f_{\text{ML}}$, at each wavelength (Fig.~\ref{Fig:ML_performance}\textbf{e}) was determined by finding the grid point in the simulation cell where $|\vec{E}|^2$ is maximum. The spatial distribution of the steady-state field amplitude, $E_x(\vec{r})$, in Fig.~\ref{Fig:Concept}\textbf{a} was simulated by removing the TFSF source and placing an $\hat{x}$-oriented dipole current source at the metalens focus position with a wavelength of \SI{700}{\nano\meter}. The reflection spectrum (Fig.~\ref{Fig:ML_performance}\textbf{f}) was calculated by integrating the time-averaged Poynting vector, $S_z = -\frac{1}{2}\text{Re}\left\{\vec{E}\times\vec{H}^*\right\}\cdot \hat{z}$, over a $\SI{30}{\micro\meter}\times\SI{30}{\micro\meter}$ surface, \SI{0.1}{\micro\meter} above and \SI{0.4}{\micro\meter} below the metalens within the TFSF source volume. The simulation volume was reduced to $\SI{31}{\micro\meter}\times\SI{31}{\micro\meter}\times\SI{2}{\micro\meter}$ and the number of wavelength points was increased to 41 for these simulations. The images in Fig.~\ref{Fig:ML_performance}\textbf{b} represent the optical intensity, $I$, collected by a detector at a focus position in the sample, $\vec{r}_{\text{image}}$, defined by the FSM in the transverse directions and by the sample stage in the axial direction: $\vec{r}_{\text{image}} = x_{\text{FSM}}\cdot\hat{x} + y_{\text{FSM}}\cdot\hat{y} + z_{\text{piezo}}\cdot\hat{z}$. These images are produced by coherently convolving the FDTD-calculated steady-state fields, $\vec{E}(\vec{r})$, with the point-spread function (PSF) of the microscope, which is modeled by numerically evaluating the diffraction integrals, $I_0,I_1,I_2$, that define the dyadic Green's function of a high-NA optical system \cite{Novotny_12}: \begin{align} \mathbf{G}&(\vec{r}_{\text{image}},\vec{r},\lambda) = \left[\begin{array}{ccc} G_{xx} & G_{xy} & G_{xz} \\ G_{yx} & G_{yy} & G_{yz} \\ 0 & 0 & 0 \end{array}\right] \nonumber\\ =& \left[\begin{array}{ccc} I_0 + I_2\cos2\phi & I_2 \sin2\phi & -2jI_1\cos\phi \\ I_2\sin2\phi & I_0 - I_2\cos2\phi & -2jI_1\sin\phi \\ 0 & 0 & 0 \end{array}\right], \label{Eqn:GreensFunc} \end{align} \noindent with the inclusion of an aberration function that accounts for the optical pathlength difference introduced by imaging through a media with mismatched refractive indices \cite{Sheppard_JM_97} ($n_{\text{oil}} = 1.518$ and $n_{\text{D}} = 2.4$ for our measurement setup). We assume an infinitesimal pinhole, which is consistent with our imaging system being below the confocal condition (see supporting information). Using Eqn.~(\ref{Eqn:GreensFunc}), the image formed by our microscope is modeled in the following manner (see supporting information): \begin{align} I(\vec{r}_{\text{image}}) &= |G_{xx}*E_x + G_{xy}*E_y + G_{xz}*E_z|^2 \nonumber \\ &+ |G_{yx}*E_x + G_{yy}*E_y + G_{yz}*E_z|^2 \label{Eqn:coherentConvlution} \end{align} \noindent where $*$ denotes a three-dimensional spatial convolution. The transverse, $I(z_{\text{piezo}}=f)$, and axial, $I(y_{\text{FSM}}=0)$, image intensity distributions at $\lambda = \SI{700}{\nano\meter}$ are shown in Fig.~3\textbf{b}, and cross-sections, $I(y_{\text{FSM}}=0,z_{\text{piezo}}=f)$, at $\lambda = \SI{600}{\nano\meter},\SI{700}{\nano\meter},\SI{800}{\nano\meter}$ are plotted in Fig.~\ref{Fig:ML_performance}\textbf{d} (red curves). Transverse profiles at five wavelengths ranging from $\lambda = \SIrange{600}{800}{\nano\meter}$ are plotted in the supporting information. \vspace{.5cm} \noindent \textbf{Experimental.} Measurements of the metalens were carried out with a custom-built confocal microscope, comprised of an oil immersion objective with adjustable iris (Nikon Plan Fluor x100/0.5-1.30) and an inverted optical microscope (Nikon Eclipse TE200) with a $\hat{z}$-axis piezo stage (Thorlabs MZS500-E) as well as a scanning stage for the $\hat{x}$- and $\hat{y}$-axis (Thorlabs MLS203-1). The diamond host substrate was fixed to a microscope coverslip (Fisher Scientific 12-548-C) using immersion oil (Nikon type N) with the patterned surface facing upwards. A combination of $\SI{30}{\milli\meter}$ cage system and SM1-thread components (Thorlabs) were used to create a fiber-coupled optical path above the stage of the inverted microscope. This configuration allowed for simultaneous excitation and measurement of the metalens from air (fiber-coupled path) or through diamond (objective path). The objective path was routed outside of the microscope body so that laser-scanning confocal excitation and collection optics could be added. A $4f$ relay-lens-system consisting of two achromatic doublet lenses (Newport, $\SI{25.4}{\milli\meter}\times\SI{150}{\milli\meter}$ focal length, PAC058AR.14) is used to align the back aperture of the objective to a fast-steering mirror (FSM, Optics in motion, OIM101), which is used to raster the diffraction-limited confocal volume in the transverse $x-y$ plane of the objective space. A \SI{560}{\nano\meter} long-pass dichroic mirror (Semrock, BrightLine FF560-FDi01) placed after the FSM was used to couple a \SI{532}{\nano\meter} excitation laser (Coherent, Compass 315M-150) into the objective, while wavelengths above \SI{560}{\nano\meter} pass through the dichroic mirror and are focused into a \SI{25}{\micro\meter}-core, 0.1~NA, multimode fiber (Thorlabs M67L01) that can be connected to a single-photon counting module (Excelitas, SPCM-AQRH-14-FC) or a spectrometer (Princeton Instruments IsoPlane-160, \SI{750}{\nano\meter} blaze wavelength with 1200 G/mm) with a thermoelectrically-cooled CCD (Princeton Instruments PIXIS 100BX). Computer control of the FSM and counting the electrical output of the SPCM are achieved using a data acquisition card (DAQ, National Instruments PCIe-6323). For the characterization measurements presented in Fig.~3, a broadband supercontinuum source (Fianium WhiteLase SC400) was coupled into a single-mode fiber (Thorlabs P1-630AR-2), which was used to illuminate the metalens from the fiber-coupled path of our microscope. A $f = \SI{2.0}{\milli\meter}$ collimating lens (Thorlabs CFC-2X-A) was used to create a \SI{380}{\micro\meter} diameter Gaussian beam that emulates the planewave source used in our FDTD simulations. The excitation wavelength is set by passing the supercontinuum beam through a set of linear variable short-pass (Delta Optical Thin Film, LF102474) and long-pass filters (Delta Optical Thin Film LF102475) prior to fiber-coupling, which can be adjusted to filter out a single wavelength with $<\SI{8}{\nano\meter}$ bandwidth or be removed completely for broadband excitation. The transverse profile and cross-sections in Fig.~\ref{Fig:ML_performance}\textbf{c,d} were measured by filtering the supercontinuum source to a single wavelength and rastering the FSM while collecting counts in the SPCM connected to the confocal path at each scan position. This process is repeated for a series of $z$-stage positions to measure the axial profile, which is shown in Fig.~\ref{Fig:ML_performance}\textbf{c} at $\lambda = \SI{700}{\nano\meter}$ and was used to find the metalens focal length as a function of wavelength in Fig.~\ref{Fig:ML_performance}\textbf{e}. For reflection measurements (Fig.~\ref{Fig:ML_performance}\textbf{f}) a $f = \SI{15}{\milli\meter}$ achromatic lens (Thorlabs AC064-015-B) is used to focus the collimated excitation beam to a $\sim\SI{30}{\micro\meter}$-diameter spot at the top surface of the diamond. A beamspliter cube (Thorlabs BS014) was added between the collimating and focusing lenses so that reflected light could be focused into a \SI{200}{\micro\meter}-core MMF (Thorlabs, M25L01) that is coupled to a spectrometer (Thorlabs CCS100) using a $f = \SI{100}{\milli\meter}$ achromatic doublet lens (Newport, PAC052AR.14). In Fig.~\ref{Fig:ML_with_NV}, the fiber-coupled path was used to image a single NV center through the metalens, as shown in Fig.~\ref{Fig:ML_with_NV}\textbf{a}. This was achieved with two achromatic doublet lenses (L1 \& L2) with focal lengths of $f = \SI{13}{\milli\meter}$ and $f = \SI{15}{\milli\meter}$ (Thorlabs AC064-013/015-B), respectively, aligned to a \SI{25}{\micro\meter}-core, 0.1~NA, multimode fiber (Thorlabs M67L01). The multimode fiber was then connected to a second SPCM (Excelitas, SPCM-AQRH-14-FC), allowing for simultaneous PL collection from both the fiber-coupled and objective paths while scanning the excitation source. The long-pass filters (LPF) in both collection lines consisted of a \SI{532}{\nano\meter} and a \SI{568}{\nano\meter} long-pass filters (Semrock, EdgeBasic BLP01-532R, EdgeBasic BLP01-568R) for spectra measurements, with an additional $\SI{650}{\nano\meter}$ long-pass filter (Thorlabs, FEL0650) in both paths to improve the signal-to-background for PL, saturation, and cross-correlation measurements. The outputs of both SPCMs were connected to a time-correlated single-photon counting card (TCSPC, PicoQuant, PicoHarp 300) to collect photon arrival-time data that was used to calculate cross-correlation functions (Fig.~\ref{Fig:ML_with_NV}\textbf{f}). Background spectra and saturation curves were measured at a transverse scan position away from the NV, but still within the field-of-view of the metalens, and were subtracted from measurements taken on the NV. This process was also used to determine the background for correcting cross-correlation data by interleaving 40 measurements off the NV with 40 measurements taken on the NV, each with a \SI{5}{\minute} acquisition time. Further details on background-subtraction of the measurements in Fig.~\ref{Fig:ML_with_NV} are given in the supporting information. \vspace{.5cm} \noindent{\textbf{Analysis}.} The NA of the metalens, NA$_{\text{ML}}$, plotted in Fig.~\ref{Fig:ML_performance}\textbf{e} is calculated by fitting the simulated transverse focus spot at each wavelength to the paraxial point-spread function of an ideal lens, an Airy disk \cite{Novotny_12}, \begin{equation} I = \left|\frac{2J_1\left(\text{NA}_{\text{ML}}k_0r\right)}{\text{NA}_{\text{ML}}k_0r}\right|^2, \label{Eqn:Airy} \end{equation} \noindent where $k_0 = 2\pi/\lambda$ is the free space wavenumber and $r = \sqrt{x^2 + y^2}$ is the radial coordinate in the focal plane. Fits are performed using non-linear least squares curve fitting (MATLAB function \textbf{lsqcurvefit}). The entrance pupil, $D$, of the metalens can be calculated by geometry using NA$_{\text{ML}}$ and $f_{\text{ML}}$: \begin{equation} D = 2f_{\text{ML}}(\lambda)\tan\left[\sin^{-1}\left(\frac{\text{NA}_{\text{ML}}}{n_{\text{D}}}\right)\right]. \label{Eqn:EntrancePupil} \end{equation} \noindent Using Fig.~\ref{Fig:ML_performance}\textbf{e} along with eqn.~(\ref{Eqn:EntrancePupil}), we find that $D = \SI{19.3}{\micro\meter}$, which is smaller than the physical \SI{27.9}{\micro\meter} diameter of the metalens. This indicates a maximum collection angle inside the diamond of $\theta_{\text{max}} = \sin^{-1}\left(\frac{\text{NA}_{\text{ML}}}{n_{\text{D}}}\right) = \SI{27.8}{\degree}$. Despite this limited collection angle, Fig.~\ref{Fig:ML_performance}\textbf{e} clearly illustrates NA$_{\text{ML}} > 1.0$, which can be increased by using diffractive designs for larger angles. The focal length of the metalens in Fig.~\ref{Fig:ML_performance}\textbf{e} was determined by measuring the distance between the metalens surface and the focused spot formed below the metalens using the piezo stage of the microscope. The distance traversed by the piezo stage is then scaled by a factor of $\approx \frac{n_{\text{D}}}{n_{\text{oil}}}$ to compensate for distortions caused by imaging through diamond \cite{Visser_Scanning_94}. Further details are given in the supporting information. The reflectance spectrum in Fig.~\ref{Fig:ML_performance}\textbf{f} was normalized using measurements of the reflected optical power measured with the fiber-coupled path aligned to the metalens, $P_{\text{ML}}(\lambda)$, and off the metalens on a planar region of the diamond surface, $P_{\text{surface}}(\lambda)$, using the following expression: \begin{equation} R_{\text{ML}}(\lambda) = \frac{P_{\text{ML}}(\lambda)}{P_{\text{surface}}(\lambda)}R_{\text{surface}}, \label{Eqn:RefNorm} \end{equation} \noindent where $R_{\text{surface}} = \frac{P_{\text{surface}}(\lambda)}{P_{\text{in}}(\lambda)}$ is the reflectance of an air/diamond interface and is calculated using Fresnel coefficients to be $17\%$ at normal incidence. The ripples in Fig.~\ref{Fig:ML_performance}\textbf{f} are due to ghosting from the beam splitter cube used to collect the reflected signal (see supporting information). The measured reflectance spectrum is slightly lower than the simulated spectrum (both plotted in Fig.~\ref{Fig:ML_performance}\textbf{f}). The source of the discrepancy is believed to be due to the NA of our top collection optics. The simulations represent the reflected light over all angles (specular and scattered), while our collection optics only cover a limited range of angles. The saturation curves in Fig.~\ref{Fig:ML_with_NV}\textbf{e} were fit with the following equation: \begin{equation} C = \frac{C_{\text{sat}}}{1 + \frac{P_{\text{sat}}}{P_{\text{pump}}}}, \label{Eqn:Saturation} \end{equation} \noindent using non-linear least squares curve fitting (MATLAB function \textbf{lsqcurvefit}), resulting in saturation count rates of $C^{\text{ML}}_{\text{sat}} = 121.7\pm2.2$~photons/ms and $C^{\text{obj}}_{\text{sat}} = 33.5\pm0.6$~photons/ms for the metalens signal, $S_{\text{ML}}$, and confocal signal, $S_{\text{obj}}$, respectively. The saturation power was $P_{\text{sat}} = 4.3 \pm 0.1~\si{\milli\watt}$ in both paths, since they are both pumped by the same excitation beam. The collection efficiency as a function of numerical aperture can be estimated as \cite{Castelletto_NJP_11}: \begin{align} \eta =& \frac{1}{32}\Bigg[ 15 \left(1-\sqrt{1-\left(\frac{\text{NA}}{n_{\text{D}}}\right)^2}\right) \nonumber \\ &+ \left(1 - \cos\left[3 \sin^{-1}\left(\frac{\text{NA}}{n_{\text{D}}}\right) \right]\right)\Bigg]. \label{Eqn:CollectionEfficiency} \end{align} Assuming that the excitation and collection paths have similar transmission efficiencies, the ratio of collection efficiencies from both paths is equal to the ratio of saturation count rates, $\frac{\eta_{\text{ML}}}{\eta_{\text{obj}}} = \frac{C^{\text{ML}}_{\text{sat}}}{C^{\text{obj}}_{\text{sat}}}$. Using a numerical aperture of NA$_{\text{obj}} = 0.75$ for the confocal collection path, the metalens is estimated to have NA$_{\text{ML}} \approx 1.4$. If instead we assume that the ratio of the collection efficiencies is proportional to the ratio of the integrated spectra in Fig.~\ref{Fig:ML_with_NV}\textbf{d}, we find that NA$_{\text{ML}} = 1.16$. Discrepancies in these values arise from differences in the collection efficiency of both paths caused by the confocal aperture and optical components in the path. However, this rough calculation provides strong evidence that NA$_{\text{ML}}>1.0$. Background-correction of the cross-correlation data in Fig.~\ref{Fig:ML_with_NV}\textbf{f} was performed using the following relationship\citep{Brouri_OL_00}: \begin{equation} g^{(2)}_{\text{bc}}(\tau) = \dfrac{g^{(2)}(\tau) - (1 - \rho^2)}{\rho^2} \label{Equ:gCorrected} \end{equation} \noindent where $g^{(2)}(\tau)$ is the measured second-order correlation function and $\rho = 0.26\pm0.01$ is the total signal-to-background ratio determined by 40 repeated measurements. After background correction, $g^{(2)}_{\text{bc}}(\tau)$ is fit with the following expression: \begin{align} g^{(2)}_{\text{bc}}(\tau) &= 1 - Ae^{-\tfrac{\abs{t-t_0}}{\tau_1}} + Ce^{-\tfrac{\abs{t-t_0}}{\tau_2}}, \label{Equ:corrFunc} \end{align} \noindent which corresponds to the the approximation of the NV center as a 3-level structure\cite{Kitson_PRA_98}. The fit coefficients are as follows: $A = 1.31\pm0.05, C = 0.48\pm0.02, \tau_1 = 8.82\pm\SI{0.05}{\nano\second}, \tau_2 = 220.89\pm\SI{9.28}{\nano\second}$. Further details are given in the supporting information. \section*{References}
1,116,691,501,351
arxiv
\section{Introduction } \hskip .5cm Recently in several papers (\cite{Lukier91} - \cite{Chaichian}) there were considered quantum deformations of $D=4$ Poincar{\'e} algebra which describes the relativistic symmetries. Subsequently we would like to stress here that during the last twenty years the supersymmetric extensions of the relativistic symmetries were one of the most studied ideas in the theory of fundamental interactions. We conclude therefore that it is natural to ask how do look the quantum deformations of superalgebras or supergroups which describe the supersymmetric extensions of the four-dimensional space-time symmetries. The deformation of $N=1$ superPoincar{\'e} algebra with fourteen generators $I_A = ( M_i, L_i, P_{\mu}, Q_i, \overline {Q}_i ), \quad (A=1, \ldots, 14)$ can be studied at least in two different ways: \begin{description} \item[a)] By considering the Hopf subalgebras of quantum superconformal algebra\linebreak $U_q(SU(2, 2; 1))$. The complete description of this approach should take all possible quantum deformations of $SU(2, 2; 1)$\footnote{We would like to recall here that for the complexified conformal algebra one can introduce the $R$-matrix with 7 parameters \cite{Schir91}. The analogous general multiparameter deformations of quantum superalgebras were not studied in the literature (see however the partial results in \cite{Manin}).}. In the case studied so far (see \cite{Dobrev92}) the minimal Hopf subalgebra of $U_q (SU(2,2;1))$ containing deformed $N=1$ superPoincar{\'e} generators has 16 generators; 14 generators of superPoincar{\'e} algebra ${\cal P}_{4;1}$ as well as the dillatation generator $D$ and the chiral generator $A$. We have therefore \begin{equation} U_q (SU(2, 2; 1)) \supset U_q ({\cal P}_{4;1} +\!\!\!\!\!\smash{\supset} (D \oplus A)) \stepcounter{rown} \end{equation} i. e. we obtain in such a way the quantum deformation of $D=1$ super-Weyl algebra. \item[b)] By considering the contraction of quantum super-de Sitter algebra ${\cal U}_q (OSp (1;4))$. It appears that such a method provides a genuine 14-generator quantum deformation of $N=1$ Poincar{\'e} superalgebra, the $\kappa$-deformed super-Poincar{\'e} algebra given firstly in \cite{LukSob93}, and described briefly in Sect. 2. \end{description} In this paper we shall study further the quantum deformation of $N=1$ super-Poincar{\'e} group given in \cite{LukSob93}. From the $\kappa$-deformed super-Poincar{\'e} algebra, which is a non-commutative Hopf algebra, there can be extracted the non-trivial classical $r$-matrix. Indeed, in \cite{LukSob93} it has been shown that the graded-antisymmetric part of the coproducts in first order in deformation parameter $h \equiv {1\over {\kappa}}$ is given by \begin{equation} \delta (X) = {1\over {\kappa}} [X \otimes {\bf 1} + {\bf 1} \otimes X, r] \stepcounter{rown} \end{equation} \begin{equation} r = L_i \wedge P_i - {i\over 4}Q_{\alpha} \wedge \overline {Q}_{\dot {\alpha}} \equiv r^{AB} I_A \wedge I_B \stepcounter{rown} \end{equation} where $A \wedge B \equiv A \otimes B - (-1)^{\eta (A) \eta (B)}B \otimes A; \quad i=1,2,3; \quad \alpha = 1,2$.\\ The bitensor $r \in \hat g \otimes \hat g$ given by (1.3) describes the classical $r$-matrix for the $N=1$ Poincar{\'e} superalgebra, where $L_i$ denotes the boost generators, $P_i$ - the three-momenta, and $Q_{\alpha}, Q_{\dot {\alpha}}$ describe the supercharges written as Weyl two-spinors. It appears that the classical $r$-matrix (1.3) satisfies the graded {\it modified} classical Yang-Baxter equation $^{\small 2}$, which permits to introduce consistently on the space $g^*$ dual to $g$ the non-trivial multiplication structure, determined by the cobracket (1.2). Introducing the generators $Z_A \in \tilde g$ representing the supergroup parameters, one can define on the functions $f(Z_A)$ the graded Poisson $r$-bracket$^{\small 2}$ \begin{equation} \{f, g\} = \{f, g\}_R - \{f, g\}_L \stepcounter{rown} \end{equation} where $(a = R, L) ^{\small 3}$ \begin{equation} \{f, g\}_a = (-1)^{\eta (A)\eta (B)} (\stackrel{\leftarrow}{D}_A^{(a)}\!f)r^{AB}(\vec{D}_B^{(a)}\!g) \stepcounter{rown} \end{equation} and\\ \begin{description} \item[-] $\stackrel{\leftarrow}{D}_A^{(a)}$ denotes left derivative which is for $a=R \quad (a=L)$ right-invariant (left-invariant) under supergroup transformations, \item[-] $\vec{D}_A^{(a)}$ respectively denotes right derivative which is right-invariant (left-invariant) for $a=R (a=L)$. \end{description} In Sect.3 we shall consider more in detail the Poisson-Lie supergroup structure on $N=1$ Poincar{\'e} supergroup. It appears that for the choice of the $r$-matrix given by (1.3) the Poisson bracket (1.4) can be consistently quantized in a standard way, by the substitution of (graded) Poisson brackets by (anti-)commutators. In such a way the supergroup parameters are promoted to the noncommuting generators of quantum $N=1$ Poincar{\'e} supergroup, with the coproduct rules, described by the composition law of two $N=1$ supersymmetry transformations. \vskip .5cm \footnoterule {\noindent\small $^2$ For non-supersymmetric case see \cite{Drin83} - \cite{Drin86} \\ $^3$ In supersymmetric case one can introduce the left- and right-side derivatives $$ \vec{d}f = \vec{d}Z_A {{\vec{\partial}\!f}\over {\partial\!Z_A}}\qquad \stackrel{\leftarrow}{d}\!f = {{\stackrel{\leftarrow}{\partial}\!f}\over {\partial\!Z_A}} \stackrel{\leftarrow}{d}\!a \eqno (A.1) $$ where ${\vec{d}}^2 = {\stackrel{\leftarrow}{d}}^2 = 0$, satisfying different Leibnitz rules $$ \vec{d}(f\!g) = \vec{d}\!fg +(-1)^{\eta(f)}f\vec{d}\!g\qquad \stackrel{\leftarrow}{d}(f\!g) = (-1)^{\eta(g)}\stackrel{\leftarrow}{d}\!fg + f\stackrel{\leftarrow}{d}\!g \eqno (A.2) $$ One gets that $$ {{\vec{\partial}\!f}\over {\partial\!Z_A}}= (-1)^{\eta(f)\eta(Z_A)} {{\stackrel{\leftarrow}{\partial}\!f}\over {\partial Z_A}} \eqno (A.3) $$ Using the relations (A.3) one can write the Poisson $r$-bracket on a supergroup in four different ways, which differ by suitable {\it sign} factors. The choice (1.5) is the standard one.} \pagebreak It appears that after this quantization procedure the Lorentz sector of the quantum $N=1$ Poincar{\'e} supergroup is classical - in analogy with the case of quantum Poincar{\'e} group, considered previously by Zakrzewski \cite{Zakrzinpress}. The deformation of the remaining generators of quantum $N=1$ Poincar{\'e} supergroup, describing translations and supertranslations, provides the $\kappa$-deformed $N=1$ superspace, which is discussed in Sect.4. Finally in Sect.5 we present an outlook and some unsolved problems. \section{$D=4$ Quantum superPoincar{\'e} Algebra} The $\kappa$-deformed $D=4$ Poincar{\'e} superalgebra given in \cite{LukSob93} has the structure of noncommutative and noncocommutative Hopf superalgebra. It is described by the following set of relations: \begin{description} \item[a)] Lorentz sector $(M_{\mu\nu} = (M_i, N_i)$, where $M_i = \frac{1}{2} \epsilon_{ijk}M_{jk}$ describe the non-relativistic $O(3)$ rotations, and $N_i$ describe boosts). \begin{description} \item[$i)$] {\it algebra}\\ \stepcounter{rown} $$ [M_i, M_j] = i\epsilon_{ijk}M_k \qquad\quad [M_i, L_j] = i\epsilon_{ijk}L_k \eqno (\theequation a) $$ $$ [L_i, L_j] = -i\epsilon_{ijk}(M_k \cosh{{P_0}\over {\kappa}} - {1\over {8\kappa}}T_k \sinh{{P_0}\over {2\kappa}} + {1\over {16\kappa^2}}P_k (T_0 - 4M)) \eqno (\theequation b) $$ where $(\mu = 0, 1, 2, 3)$ \begin{equation} T_{\mu} = Q^A(\sigma_{\mu})_{A\dot B}Q^{\dot B} \stepcounter{rown} \end{equation} \item[$ii)$] {\it coalgebra}\\ $$ \Delta (M_i) = M_i \otimes \hbox{\bf 1} + \hbox{\bf 1} \otimes M_i \eqno (2.3 a) \stepcounter{rown} $$ $$ \begin{array}{ll} \Delta (L_i) = L_i \otimes e^{{P_0}\over {2\kappa}} + e^{-{{P_0}\over {2\kappa}}}\otimes L_i + {1\over {2\kappa}}\epsilon_{ijk} (P_j \otimes\null&\null\\ \\ \null\otimes M_k e^{{P_0}\over {2\kappa}} + M_j e^{-{{P_0}\over {2\kappa}}}\otimes P_k)+\null&\null\\ \\ \null+{i\over {8\kappa}} (\sigma_i)_{\dot\alpha\beta}(\overline{Q}_{\alpha}e^{-{{P_0}\over {4\kappa}}}\otimes Q_{\beta}e^{{P_0}\over {4\kappa}} + Q_{\beta}e^{-{{P_0}\over {4\kappa}}}\otimes \overline{Q}_{\dot\alpha}e^{{P_0}\over {4\kappa}}&\null \end{array} \eqno (2.3 b) $$ \item[$iii)$] {\it antipodes}\\ $$ \begin{array}{ll} S(M_i)&= - M_i\\ S(N_i)&= - N_i + {{3i}\over {2\kappa}}P_i - {i\over {8\kappa}}(Q\sigma_i\overline{Q} + \overline{Q}\sigma_i Q) \end{array} \eqno (2.4) $$ \stepcounter{rown} \end{description} \item[b)] Fourmomenta sector $P_{\mu} = (P_i, P_0)$ \begin{description} \item[$i)$] {\it algebra}\\ $$ \lbrack M_i, P_j\rbrack = i\epsilon_{ijk}P_k \qquad \lbrack M_j, P_0\rbrack = 0 \eqno (2.5 a) $$ \stepcounter{rown}\vskip -0.3cm $$ \lbrack N_i, P_j\rbrack = i\kappa\delta_{ij}\sinh{{P_0}\over {\kappa}} \qquad \lbrack N_i, P_0\rbrack = iP_i \eqno (2.5 b) $$ $$ \lbrack P_{\mu}, P_{\nu}\rbrack = 0 (\mu , \nu = 0, 1, 2, 3) \eqno (2.5 c) $$ \item[$ii)$] {\it coalgebra}\\ $$ \Delta (P_i) = P_i \otimes e^{{P_0}\over {2\kappa}} + e^{-{{P_0}\over {2\kappa}}} \otimes P_i \eqno (2.6 a) $$ $$ \Delta (P_0) = P_0 \otimes \hbox{\bf 1} + \hbox{\bf 1} \otimes P_0 \eqno (2.6 b) $$ \end{description} The antipode is given by the relation \stepcounter{rown} $S(P_{\mu}) = -P_{\mu}$. \item[c)] Supercharges sector \cite{LukSob93} \begin{description} \item[$i)$] {\it algebra} $$ \begin{array}{ll} \{Q_{\alpha}, Q_{\dot\beta}\} & = 4\kappa\delta_{\alpha\beta}\sin{{P_0}\over {2\kappa}} - 2P_i (\sigma_i)_{\alpha\dot\beta} \\ \\ \{Q_{\alpha}, Q_{\beta}\} & = \{Q_{\dot\alpha}, Q_{\dot\beta}\} = 0 \end{array} \eqno(2.7 a) $$\stepcounter{rown} $$ \lbrack M_i, Q_{\alpha}\rbrack = -{1\over 2} (\sigma _i)_{\alpha}^{\dot\beta} Q_{\beta} \quad \lbrack M_i, Q_{\dot\alpha}\rbrack = -{1\over 2} (\sigma _i)_{\dot\alpha}^{\dot\beta} Q_{\dot\beta} \eqno (2.7 b) $$ $$ \lbrack N_i, Q_{\alpha}\rbrack = -{i\over 2}\cosh{{P_0}\over {2\kappa}} (\sigma _i)_{\alpha}^{\beta}Q_{\beta}\quad \lbrack N_i, Q_{\dot\alpha}\rbrack = {i\over 2}\cosh{{P_0}\over {2\kappa}} (\sigma _i)_{\dot\alpha}^{\dot\beta}Q_{\dot\beta} \eqno (2.7 c) $$ $$ \begin{array}{ll} \lbrack P_{\mu}, Q_{\alpha}\rbrack & = \lbrack P_{\mu}, Q_{\dot\beta}\rbrack = 0 \end{array} \eqno (2.7 d) $$ \item[$ii)$] {\it coalgebra} \begin{equation} \begin{array}{ll} \Delta (Q_{\alpha}) & = Q_{\alpha} \otimes e^{{P_0}\over {4\kappa}} + e^{-{{p_0}\over {4\kappa}}} \otimes Q_{\alpha}\\ \Delta (Q_{\dot\alpha} & = Q_{\dot\alpha} \otimes e^{{P_0}\over {4\kappa}} + e^{-{{P_0}\over {4\kappa}}} \otimes Q_{\dot\alpha} \end{array} \stepcounter{rown} \end{equation} \item[$iii)$] {\it antipodes} \begin{equation} S(Q_{\alpha}) = - Q_{\alpha} \qquad S(Q_{\dot\alpha}) = - Q_{\dot\alpha} \stepcounter{rown} \end{equation} \end{description} \end{description} On the basis of the relations (2.3) - (2.7) one can single out the following features of the quantum superalgebra ${\cal U}_{\kappa} ({\cal P}_{4;1})$ : \begin{description} \item[$i)$] The algebra coproducts and antipodes of Lorentz boosts $N_i$ do depend on $Q_{\alpha}, Q_{\dot\alpha}$ i.e. the $\kappa$-deformed Poincar{\'e} as well as Lorentz sectors do not form the Hopf subalgebras. \item[$ii)$] Putting in the formulae (2.1) - (2.6) $Q_{\alpha} = Q_{\dot\alpha} = 0$ one obtains the $\kappa$-deformed Poincar{\'e} algebra considered in \cite{Lukier92}, i.e. $$ {\cal U}_{\kappa} ({\cal P}_{4;1}) \left |_{Q_{\alpha} = Q_{\dot\alpha} = 0}\right . = {\cal U}_{\kappa}({\cal P}_4) $$ \item[$iii)$] From (2.5 c) we see that the fourmomenta commute. This property implies by duality the standard addition formula for the space-time fourvectors (see Sect.4). \end{description} \section{Poisson $r$-brackets For $N=1$ Poincar{\'e} Supergroup And Their Quantization} The classical $N=1$ Poincar{\'e} Lie superalgebra with the cobracket (1.2) describes the $N=1$ Poincar{\'e} Lie super-bialgebra $(\hat g, \hat\delta)$, which is called {\it coboundary} \cite{Drin86} due to the relation (1.3) between the cobracket $\delta$ and the $r$-matrix. The coboundary super-bialgebras with the $r$-matrix satisfying the modified classical Yang-Baxter equation describe infinitesimally Poisson-Lie supergroups, with the supergroup action $(Z_A, Z_B) \longrightarrow Z_A \circ Z_B$ consistent with the Poisson structure given by the $r$-Poisson bracket (1.5). These brackets satisfy the following properties: \begin{enumerate} \item Graded antisymmetry \begin{equation} \{f, g\} = - (-1)^{\eta\!(f)\eta\!(g)} \{g, f\} \stepcounter{rown} \end{equation} \item Graded Jacobi identity \begin{eqnarray} \lefteqn{(-1)^{\eta\!(f)\eta\!(h)} \{f, \{g, h\}\} + (-1)^{\eta\!(g)\eta\!(h)}\cdot\null}\nonumber\\ &&\null\cdot\{h, \{f, g\}\} + (-1)^{\eta\!(f)\eta\!(g)} \{g, \{h, f\}\} = 0 \stepcounter{rown} \end{eqnarray} \item Graded Leibnitz rules \begin{equation} \begin{array}{ll} \{f, gh\} & = \{f, g\} h + (-1)^{\eta\!(f)\eta\!(g)}g\{f, h\} \\ \\ \{fg, h\} & = f\{ g, h\} + (-1)^{\eta\!(g)\eta\!(h)}\{ f, h\} g \end{array} \stepcounter{rown} \end{equation} \item Lie -- Poisson property Let us write the coproduct induced by the composition law of two supergroup transformations \begin{equation} \Delta (Z) = Z\stackrel{\circ}{\otimes} Z \stepcounter{rown} \end{equation} where "$\stackrel{\circ}{\otimes}$" denotes that we take the composition rule described by "$\circ$" and replace the product by the tensor product. The Lie-Poisson property takes the form \begin{equation} \Delta \{f, g\} = \{\Delta (f), \Delta (g)\} \stepcounter{rown} \end{equation} where the following rule for the multiplication of graded tensor products should be used: \begin{equation} (f_1 \otimes f_2) (g_1 \otimes g_2) = (-1)^{\eta\!(f_2)}(-1)^{\eta\!(g_1)}f_1g_1 \otimes f_2g_2 \stepcounter{rown} \end{equation} \end{enumerate} In order to calculate explicitly the Poisson bracket (1.4) one can express the right- and left-invariant derivatives in terms of the ordinary ones, i.e. rewrite (1.4) as follows \begin{equation} \{f, g\} = f {{\stackrel{\leftarrow}{\partial}}\over {\partial\!Z_A}} \omega^{AB}(z) {{\vec{\partial}}\over {\partial\!Z_B}}g \stepcounter{rown} \end{equation} If we observe that right \begin{equation} \stackrel{\leftarrow}{D}_A^{(a)} = {{\stackrel{\leftarrow}{\partial}}\over {\partial\!Z^B}} \stackrel{\leftarrow}{\mu}^{(a)B}_A (Z) \qquad \vec{D}_A^{(a)} = {\vec{\mu}^{(a)B}_A}(Z){{\vec{\partial}}\over {\partial\!Z^B}} \stepcounter{rown} \end{equation} where $\stackrel{\leftarrow}{\mu}^{(a)}, {\vec{\mu}}^{(a)}$ can be calculated by the differentiation of the composition formulae of the supergroup parameters $Z_A$, one obtains that $(L=+,\quad R=-)$: \begin{equation} \omega^{AB}(Z) = \stackrel{\leftarrow}{\mu}^{(+)A}_C(Z) r^{CD}{\vec{\mu}^{(+)B}_D}(Z) - \stackrel{\leftarrow}{\mu}^{(-)A}_C (Z) r^{CD}\vec{\mu}_D^{(+)B}(Z) \stepcounter{rown} \end{equation} where the leading term at $Z=0$ is linear , and describes the cobracket of the $N=1$ Poincar{\'e} bi-superalgebra $(\hat g, \hat\delta)$, in accordance with the relation (1.2). The quantization of the $N=1$ superPoincar{\'e} algebra consists in two steps: \hskip -0.3cm \begin{enumerate} \item Write (3.9) for the independent parameters $Z^A$ (the generators of the algebra of functions on the supergroup ${\cal P}_{4;1}$) \begin{equation} \{Z^A, Z^B\} = \omega^{AB}(Z) \stepcounter{rown} \end{equation} and calculate $\omega^{AB}$ by choosing the functions $\stackrel{\leftarrow}{\mu}^{(a)}, \vec{\mu}^{(a)}$ in (3.8), depending on the parametrization of the supergroup. \item Quantize the Poisson bracket by the substitution \begin{equation} \{Z^A, Z^B\} \longrightarrow \left\{ \begin{array}{ll} {1\over {i\hbar}}[{\hat Z}^A, {\hat Z}^B]_- & \hbox{if} \quad \eta\!(A)\cdot\eta\!(B) = 0\\ {1\over {i\hbar}}[{\hat Z}^A, {\hat Z}^B]_+ & \hbox{if} \quad \eta\!(A)\cdot\eta\!(B) = 1 \end{array}\right. \stepcounter{rown} \end{equation} where $[{\hat A}, {\hat B}]_{\pm} = {\hat A}{\hat B} \pm {\hat B}{\hat A}$, and choose the ordering of the ${\hat Z}$-variables in $\omega^{AB}$ in such a way that the Jacobi identities are satisfied, and the coproduct (3.4) is a homomorphism of the quantized superalgebra. \end{enumerate} Let us recall the supergroup composition law ($A$ is $2\times 2\quad Sl (2;{\bf C})$ matrix). \begin{eqnarray} \lefteqn{(X_{\mu}, \theta_{\alpha}, A_{\alpha}^{\beta}) \circ (X_{\mu}', \theta_{\alpha}', A_{\alpha}'^{\beta})=\null}\nonumber\\ &&\null=(X_{\mu}+\Lambda_{\mu}^{\nu}(A)X_{\nu}'+{i\over 2} (\theta'^T A^{-1}\sigma^{\mu}\overline\theta - \theta^T\sigma^{\mu}(A^+)^{-1}\overline\theta'),\nonumber\\ &&\theta_{\alpha} +\theta_{\beta}'(A^{-1})^{\beta}_{\alpha}, \quad A_{\alpha}^{\gamma}{A'}_{\gamma}^{\beta}) \stepcounter{rown} \end{eqnarray} The formulae (3.12) permits to calculate the functions $\stackrel{\leftarrow}{\mu}^{(\pm)}, \vec{\mu}^{(\pm)}$ in the formula (3.9). We obtain for example the following formulae for left-sided left-invariant super-derivatives: \begin{equation} \begin{array}{lll} \vec{D}_{\alpha}^{(+)}&=&(A^{-1})_{\alpha}^{\beta} {{\partial}\over {\partial\theta^{\beta}}}+{i\over 2}(A^{-1}\sigma^{\mu}\overline\theta_{\alpha}) {{\partial}\over {\partial\!X^{\mu}}}\\ {\vec{D}^{(+)\beta}_{\alpha}}&=&A_{\gamma}^{\beta}{{\partial}\over {\partial\!A_{\gamma}^{\alpha}}} \end{array} \stepcounter{rown} \end{equation} and by conjugation \begin{equation} \begin{array}{lll} \vec{D}_{\dot\alpha}^{(+)}&=&(A^{-1})_{\dot\alpha}^{\dot\beta}{{\partial}\over {\partial\overline\theta ^{\dot\beta}}} + {i\over 2}(\theta^T \sigma^{\mu} (A^+)^{-1})_{\dot\alpha} {{\partial}\over {\partial\!X^{\mu}}}\\ {\vec{D}^{(+)\dot\beta}_{\dot\alpha}}&=&(A)_{\dot\gamma}^{\dot\beta} {{\partial}\over {\partial\!A_{\dot\gamma}^{\dot\alpha}}} \end{array} \stepcounter{rown} \end{equation} Calculating the remaining invariant derivatives on the bosonic Poincar{\'e} subgroup and inserting in the formula (3.9) the $r$-matrix (1.3) we obtain the following fundamental $r$-Poisson brackets for the coordinates $(X_{\mu}, A_{\alpha}^{\beta}, A_{\dot\alpha}^{\dot\beta}, \theta_{\alpha}, \theta_{\dot\alpha})$ on $N=1$ Poincar{\'e} supergroup\setcounter{footnote}{3} \footnote{We use the spinorial representation of the Lorentz generators, e.g. $ L_i = {1\over 4}(\sigma_i)_{\alpha}^{\beta}L^{\alpha}_{\beta}+(\overline\sigma_i)_{\dot\alpha} ^{\dot\beta} L^{\dot\alpha}_{\dot\beta} $}: \begin{description} \item[a)] Lorentz sector $(A_{\alpha}^{\beta}, A_{\dot\alpha}^{\dot\beta})$\\ The Lorentz subgroup parameters are classical, i.e. \begin{equation} \{A_{\alpha}^{\beta}, A_{\gamma}^{\delta}\} = \{A_{\alpha}^{\beta}, A_{\dot\gamma}^{\dot\delta} \} = \{A_{\dot\alpha}^{\dot\beta}, A_{\dot\gamma}^{\dot\beta} \} = 0 \stepcounter{rown} \end{equation} \item[b)] Translations $(X_{\mu})$ (we denote $\theta = {{\theta_1}\choose{\theta_2}}, \overline\theta = {{\theta_{\dot 1}}\choose{\theta_{\dot 2}}}$) \begin{equation} \begin{array}{ll} \{X^i, X^j\} = {i\over {8\kappa}}\theta^T \sigma^i (\hbox{\bf 1}_2 - (AA^+)^{-1})\sigma^j \overline\theta-{i\over {8\kappa}}\theta^T \sigma^j (\hbox{\bf 1}-(AA^+)^{-1})\sigma^i \overline\theta&\null\\ \\ \{X^0, X^j\} = -{i\over {\kappa}}X^j + {i\over {8\kappa}}\theta^T \lbrack \sigma^j, (AA^+)^{-1}\rbrack \overline\theta&\null \end{array} \stepcounter{rown} \end{equation} \begin{equation} \begin{array}{lll} \{ A_{\alpha}^{\beta}, X^i\}&=&{1\over {2\kappa}}((A\sigma_{n})_{\alpha}^{\beta} \Lambda^i_n (A) - (\sigma^i\cdot\!A)_{\alpha}^{\beta})\\ \\ \{ A_{\alpha}^{\beta}, X^0\}&=&{1\over {2\kappa}}(A\sigma_i )_{\alpha}^{\beta} \Lambda^0_i (A) \end{array} \stepcounter{rown} \end{equation} \item[c)] Supertranslations \begin{equation} \{ \theta^{\alpha}, \theta^{\beta}\}=\{ \theta^{\dot\alpha}, \theta^{\dot\beta}\} = 0\quad \{ \theta^{\alpha}, \theta^{\dot\beta}\}={i\over {2\kappa}}(\hbox{\bf 1} - AA^+)^{-1})^{\dot\beta \alpha} \stepcounter{rown} \end{equation} \begin{equation} \begin{array}{lll} \{ X^i, \theta_{\alpha} \} &=&{1\over {4\kappa}}(\theta^T\sigma^i)_{\gamma} (\hbox{\bf 1}_2 - (AA^+)^{-1})^{\gamma}_{\alpha}\\ \\ \{ X^0, \theta_{\alpha}\}&=&-{1\over {4\kappa}}\theta^T_{\gamma}(\hbox{\bf 1}_2 + (AA^+)^{-1})^{\gamma}_{\alpha} \end{array} \stepcounter{rown} \end{equation} \begin{equation} \begin{array}{lll} \{ A_{\alpha}^{\beta}, \theta^{\gamma}\}&=&\{ A_{\dot\alpha}^{\dot\beta}, \theta^{\gamma} \} = 0 \end{array} \stepcounter{rown} \end{equation} \end{description} In order to quantize the Poisson brackets (3.15 - 3.20) we perform the substitution (3.11). It appears that this substitution is consistent with Jacobi identities if we keep the order of the coordinate generators on {\it rhs} of (3.16) also in quantized case\nolinebreak\footnote{For other relations (3.17-3.20) the problem does not occur due to the classical nature of the Lorentz sector (see (3.16)).}. Furthermore, rewriting the composition law (3.12) as the coproduct rule for the coordinate generators, i.e. \begin{equation} \begin{array}{ll} \Delta (X_{\mu})&=X_{\mu}\otimes\hbox{\bf 1} + \Lambda_{\mu}^{\nu}(A)\otimes X_{\nu}- {i\over 2}({A^{-1}_{\alpha}}^{\beta} \sigma^{\mu}_{\beta\dot\gamma} \theta^{\dot\gamma} \otimes \theta^{\alpha} + \theta^{\alpha}\sigma^{\mu}_{\alpha\dot\beta}A^{-1\beta}_{\dot\gamma} \otimes \theta^{\dot\gamma})\\ \Delta (\theta_{\alpha})&=\theta_{\alpha}\otimes\hbox{\bf 1}+(A^{-1})^{\beta}_{\alpha}\otimes\theta_{\beta}\\ \Delta (A_{\alpha}^{\beta})&=A_{\alpha}^{\gamma}\otimes A_{\gamma}^{\beta} \end{array} \stepcounter{rown} \end{equation} One can show that the formulae (3.21) describe the homomorphism of the quantized superalgebra given in Sect.2. Adding the formulae for the antipodes \begin{equation} S(X^{\mu})=-\Lambda^{\mu}_{\nu}(A^{-1})X^{\nu}\quad S(A_{\alpha}^{\beta})=(A^{-1})_{\alpha}^{\beta} \stepcounter{rown} \end{equation} $$ S(\theta^{\alpha})=-A_{\beta}^{\gamma}\theta^{\beta} $$ we see that we have obtained the complete set of relations describing the $\kappa$-deformation of $N=1$ Poincar{\'e} supergroup. Let us observe that \begin{description} \item[a)] If we put $A^+A=1$, i.e. we consider the semidirect product $T_{4;4} +\!\!\!\!\!\smash{\supset} SU(2)$ of the quantum subgroup $T_{4;4}$ (quantum fourtranslations + quantum supertranslations) and $SU(2)$ describing the space rotations, only in two relations (first relation (3.17) and second relation (3.19)) the nontrivial $\kappa$-deformation occurs. \item[b)] If we put $A=1$, i.e. we consider the quantum subgroup $T_{4;4}$, we obtain the $\kappa$-deformed $N=1$ superspace. It appears that only the commutator $[X^0, \theta_{\alpha}]$ is $\kappa$-deformed. \item[c)] Putting in (3.15) - (3.17) $\theta^{\alpha} = \theta^{\dot\alpha} = 0$ one recovers the $\kappa$-deformed inhomogeneous $ISl(2;\hbox{\bf C})$ group, given in \cite{Masl93}. \end{description} \section{$\kappa$-deformed $N=1$ Superspace} Let us recall firstly that for $\kappa$-deformed relativistic theory, with infinitesimal symmetries described by the $\kappa$-deformed Poincar{\'e} algebra \cite{Lukier91,Giller92,Lukier92,Lukier93,Bacry93} there are two different ways of introducing the Poincar{\'e} group and space-time coordinates: \begin{description} \item[a)] Using the formula (2.5 c) one can consider the space-time coordinates by considering {\it ordinary} Fourier transforms of the functions depending on the commuting fourmomenta \cite{Lukier92,Lukier93,Giller93}. In such an approach the space-time coordinate operators $\hat X_{\mu}$ commute and are introduced as the operators satisfying the relations \begin{equation} [\hat X_{\mu}, \hat P_{\nu}]=i\eta_{\mu\nu} \stepcounter{rown} \end{equation} \item[b)] Using the duality relation for Hopf algebras described by the scalar product on quantum double with the following properties \begin{equation} \begin{array}{lll} <\Delta (\hat z)|\hat g_1 \otimes \hat g_2>&=&<\hat z |\hat g_1 \hat g_2>\\ \\ <\hat z_1 \otimes \hat z_2|\Delta (\hat g)>&=&<\hat z_1 \hat z_2 |\hat g> \end{array} \stepcounter{rown} \end{equation} we easily see that for standard duality relation between $\hat X_{\mu}$ and $\hat P_{\mu}$ generators \begin{description} \item[-] non-cocommutative fourmomenta (see (2.6)) imply the non-commutativity of the coordinates \cite{Zakrzinpress}: \begin{equation} \lbrack \hat X^i, \hat X^j\rbrack = 0\qquad \lbrack \hat X^0, \hat X^j\rbrack = {1\over {\kappa}}\hat X^j \stepcounter{rown} \end{equation} \item[-] commutativity of the fourmomenta imply that \begin{equation} \Delta (\hat X^{\mu}) = \hat X^{\mu} \oplus \hbox{\bf 1} + \hbox{\bf 1} \oplus \hat X^{\mu} \stepcounter{rown} \end{equation} \end{description} One can rewrite the coproduct formulae (2.6) and (4.4) as the addition formulae for the fourmomenta $$ p_i^{(1+2)}=p_i^{(1)}e^{{p_0^{(2)}}\over {2\kappa}} + p_i^{(2)}e^{-{{p_0^{(1)}}\over {2\kappa}}} \qquad p_0^{(1+2)}=p_0^{(1)} + p_0^{(2)} \stepcounter{rown} \eqno (\theequation a) $$ and for the space-time coordinates $$ x^{\mu}_{(1+2)}=x^{\mu}_{(1)}+x^{\mu}_{(2)} \eqno (\theequation b) $$ If we introduce the following element of the quantum double describing the translation sector of $\kappa$-Poincar{\'e} $(\hat X^0 = -\hat X_0,\quad \hat X^i = -\hat X_i)$\footnote{For the concepts of exponentiation of the generators of quantum double, consisting of quantum Lie algebra and dual quantum Lie group see \cite{Fronsdal93} - \cite{Bonechi93}, where the exponentials (4.6) are called quantum $T$-matrices. The notion of quantum $T$-matrix is related to the notion of the universal bicharacter of Woronowicz (see e.g.\cite{Woron91}).} \begin{equation} G(\hat X^{\mu}; \hat P_{\mu})= e^{-\frac{i}{2}\hat X_0 \otimes \hat P_0} e^{i\hat X_i \otimes \hat P_i} e^{-\frac{i}{2}\hat X_0 \otimes \hat P_0} \stepcounter{rown} \end{equation} one can encode the additional formulae (4.5a-b) into the following multiplication rules $$ G(\hat X^{\mu}; p_{\mu}^{(1)})G(\hat X^{\mu}; p_{\mu}^{(2)})\quad =\quad G(\hat X^{\mu}; p_{\mu}^{(1+2)}) \stepcounter{rown} \eqno (\theequation a) $$ $$ G(x^{\mu}_{(1)}; \hat P_{\mu})G(x^{\mu}_{(2)}; \hat P_{\mu})\quad =\quad G (x^{\mu}_{(1+2)}; \hat P_{\mu}) \eqno (\theequation b) $$ We see therefore that the relations (4.6) describe the generalization of Fourier transform kernels to the case of the translation sector of $\kappa$-Poincar{\'e} group, with the coproducts determining their multiplication rule. \end{description} Let us extend such a scheme to $N=1$ superPoincar{\'e} case. The non-commutative Hopf algebra, describing $\kappa$-deformed superspace, is obtained by the quantization of the relations (3.16)-(3.19) with $A=1$. One obtains $$ \lbrack \hat X^i, \hat X^j\rbrack = 0\qquad \lbrack \hat X^0, \hat X^j\rbrack = {1\over {\kappa}}\hat X^j $$ \begin{equation} \begin{array}{lll} \{\hat\theta^{\alpha}, \hat\theta^{\beta}\}&=&\{\hat\theta^{\alpha}, \hat\theta^{\dot\beta}\}=\{\hat\theta^{\dot\alpha}, \hat\theta^{\dot\beta} \} = 0\\ \\ \lbrack \hat X^i, \hat\theta^{\alpha}\rbrack &=&\lbrack \hat X^i, \hat\theta^{\dot\alpha}\rbrack =0 \end{array} \stepcounter{rown} \end{equation} $$ \lbrack \hat X^0, \hat\theta^{\alpha}\rbrack = -{1\over {2\kappa}}\hat\theta^{\alpha}\qquad \lbrack \hat X^0, \hat\theta^{\dot\alpha}\rbrack = -{1\over {2\kappa}}\hat\theta^{\dot\alpha} $$ and the coproducts (3.26) implying the following composition law in superspace: \begin{equation} \begin{array}{ll} \hat\theta^{\alpha}_{(1+2)}&=\hat\theta^{\alpha}_{(1)}+\hat\theta^{\alpha}_{(2)} \qquad \hat\theta^{\dot\alpha}_{(1+2)}=\hat\theta^{\dot\alpha}_{(1)}+\hat\theta^{\dot\a lpha}_{(2)}\\ \\ \hat X^{\mu}_{(1+2)}&=X^{\mu}_{(1)}+X^{\mu}_{(2)}+{i\over 2}(\sigma^{\mu})_{\alpha\dot\beta} (\theta^{\dot\beta}_{(1)}\theta^{\alpha}_{(2)}-\theta^{\alpha}_{(1)}\theta^{\dot \beta}_{(2)}) \end{array} \stepcounter{rown} \end{equation} We recall that $\kappa$-deformed $N=1$ superalgebra is described by the relations (2.7) and the coproducts (2.8). The addition formula of the Grassmann-algebra-valued eigenvalues $q_{\alpha}, q_{\dot\alpha}$ of the supercharges, induced by (2.12), is the following \begin{equation} \begin{array}{lll} q_{\alpha}^{(1+2)}&=&q_{\alpha}^{(1)}e^{{p_0^{(2)}}\over {4\kappa}}+q_{\alpha}^{(2)}e^{-{{p_0^{(1)}}\over {4\kappa}}}\\ q_{\dot\alpha}^{(1+2)}&=&q_{\dot\alpha}^{(1)}e^{{p_0^{(2)}}\over {4\kappa}}+q_{\dot\alpha}^{(2)}e^{-{{p_0^{(1)}}\over {4\kappa}}} \end{array} \stepcounter{rown} \end{equation} If we introduce the following quantum counterpart of the finite supertranslation group elements in momentum as well as coordinate superspace $$ \stepcounter{rown} G(p_{\mu}, q_{\alpha}, q_{\dot\alpha})=e^{-\frac{i}{2}\hat X_0 p_0}e^{i(\hat X^{i}p_{i}+\hat\theta^{\alpha}q_{\alpha}+\hat\theta^{\dot\alpha}q_{\dot\alpha})} e^{-\frac{i}{2}\hat X_0 p_0} \eqno (\theequation a) $$ $$ \tilde G (x_{\mu}, \theta_{\alpha}, \theta_{\dot\alpha})=e^{i(x^{\mu}{\tilde P}_{\mu} + \theta^{\alpha}Q_{\alpha}+\theta^{\dot\alpha}Q_{\dot\alpha})} \eqno (\theequation b)\stepcounter{rown} $$ where $\tilde P_0 = 2\kappa \sinh{\frac{P_0}{2\kappa}}$ and $\tilde P_i = P_i$, we obtain the following multiplication laws: $$ G(p_{\mu}^{(1)}, q_{\alpha}^{(1)}, q_{\dot\alpha}^{(1)}) G(p_{\mu}^{(2)}, q_{\alpha}^{(2)}, q_{\dot\alpha}^{(2)}) = G(p_{\mu}^{(1+2)}, q_{\alpha}^{(1+2)}, q_{\dot\alpha}^{(1+2)}) \eqno (\theequation a) $$ $$ \tilde G(x^{\mu}_{(1)}, \theta^{\alpha}_{(1)}, \theta^{\dot\alpha}_{(1)}) \tilde G(x^{\mu}_{(2)}, \theta^{\alpha}_{(2)}, \theta^{\dot\alpha}_{(2)}) = \tilde G(x^{\mu}_{(1+2)}, \theta^{\alpha}_{(1+2)}, \theta^{\dot\alpha}_{(1+2)}) \eqno (\theequation b) $$ Following the discussion for ordinary supersymmetry (see e.g. \cite{Ferrara74}) one can consider the objects (4.11 a) and (4.11 b) as describing respectively the superfields in momentum superspace and in the usual (coordinate) superspace. It should be mentioned that the algebra (4.8) describes the superspace coordinates in the particular Lorentz frame $(A=1)$. If we allow nontrivial Lorentz transformations, the algebra of superspace coordinates is no longer closed, and one should consider the full algebra given by (3.15-20). \section{Outlook} In this paper we presented quantum $\kappa$-deformation of $N=1$ Poincar{\'e} supergroup, which is a non-commutative and non-co-commutative Hopf superalgebra. We would like to mention the following problems which should be further studied: \begin{description} \item[i)] It appears that for the non-semisimple Lie (super)algebras the "naive" quantization (see (3.11))of the $r$-Poisson bracket may be very useful as a consistent quantization scheme. In \cite{Zakrzinpress} as well as in the case presented in this paper the ambiguities related to the ordering of the $rhs$ of the quantized $r$-Poisson brackets are resolved in the unique way. It is interesting to classify for non-semisimple Lie (super)algebras the classical $r$-matrices and find for which cases the "naive" quantization of the $r$-Poisson bracket leads to a consistent quantization\footnote{This programme is now under consideration, where also the classical $r$-matrices for simple quantum Lie-algebras and the "naive" quantization of corresponding quadratic $r$-Poisson brackets are studied.}. \item[ii)] One can show that the $\kappa$-deformed $N=1$ supersymmetry algebra $(Q_{\dot\alpha}, Q_{\alpha}, P_{\mu})$ as a Hopf superalgebra (see Sect.2) is dual to the Hopf superalgebra describing the $N=1 \quad \kappa$-deformed superspace (see Sect.4). It would be important to show that the whole $N=1 \quad \kappa$-deformed supergroup is dual (possibly modulo some nonlinear transformations of the generators) to the $N=1\quad \kappa$-Poincar{\'e} superalgebra, given in \cite{LukSob93}. We would like to stress that such duality for $D=4\quad\kappa$-deformed Poincar{\'e} group given in \cite{Zakrzinpress} is not known. \item[iii)] It would be interesting to generalize the results of \cite{LukSob93} and of this paper to $N>1$. We would like to mention that complete $N$-extended Poincar{\'e} superalgebra, with $N(N-1)$ central charges, can be obtained by the construction of the superalgebra $OSp(2N;4)$ \cite{Lukier82}. Replacing the classical superalgebra $OSp(2N;4)$ by its $q$-analogue ${\cal U}_q(OSp(2N;4))$ and performing quantum de-Sitter construction limit with the rescalling (2.3) one should obtain the quantum deformation of $N$-extended superPoincar{\'e} algebra. For obtaining $N$-extended $\kappa$-deformed Poincar{\'e} supergroup it is sufficient to extend the classical $r$-matrix (1.3) to $N>1$ and follow the method presented in this paper. \end{description}
1,116,691,501,352
arxiv
\section{Introduction} \label{sec:intro} Let $S$ or $S_g$ denote a compact, connected, orientable surface of genus $g$, where $g \geq 2$. A simple closed curve on $S$ is {\em essential} if does not bound a disk in $S$. The complex of curves, introduced by Harvey \cite{[Ha]}, is the simplicial complex, $\mathcal{C}(S)$, whose vertices (or $0$-skeleton), $\mathcal{C}^0(S)$, are isotopy classes of essential simple closed curves; and, whose edges of the $1$-skeleton, $\mathcal{C}^1(S)$, connect vertices that have disjoint representatives. For the remainder of this note, ``curve'' will mean ``simple closed curve''. By declaring that each edge of $\mathcal{C}^1(S)$ has length $1$, we endow $\mathcal{C}^0(S)$ with a metric. Specifically, an {\em edge path} is a sequence of vertices $\{v=v_0 , v_1 , \cdots , v_n=w\} $ such that $d(v_i , v_{i+1})=1$. A {\em geodesic path} joining $v$ and $w$ is a shortest edge-path. The {\em distance}, $d(v,w)$, between arbitrary vertices is the length of a geodesic path. Since it is known that the complex of curves is connected, which was stated by Harvey \cite{[Ha]} and followed from a previous argument of Lickorish \cite{[Li]}, the value $d(v,w)$ is well-defined for all vertex pairs. We note that if $d(v,w)=2$, there is a vertex $\bar\gamma \in \mathcal{C}^0(C)$ and curve representatives in $S$, $\alpha \in v$, $\beta \in w$ and $\gamma \in \bar\gamma$, such that $\alpha \cap \beta \not= \emptyset$ and $\gamma \subset S \setminus (\alpha \cup \beta)$. The generic situation (when some component of $S \setminus (\alpha \cap \beta)$ has Euler characteristic less than zero) is that there are infinitely many isotopically distinct choices for $\gamma \subset S \setminus (\alpha \cap \beta)$ and, thus, infinitely many possible geodesics for distance $2$. In this case, the existence of infinitely many geodesics at distance $2$ forces infinitely many geodesics for all distances. It is this infinite local pathology which makes finding an effective distance computing algorithm challenging. The curve complex was first introduced by Harvey \cite{[Ha]}. Its coarse geometric properties were first studied extensively by Masur--Minsky \cite{[MM1], [MM2]}. The complex of curves has proved a useful tool for the study of hyperbolic $3$-manifolds, mapping class groups and Teichm\"uller theory. In particular, in \cite{[MM2]}, it was established that Teichm\"uller space is quasi-isometric to the complex of curves and is therefore $\delta$-hyperbolic. Here, $\delta$-hyperbolic means that geodesic triangles in $\mathcal{C}^1(S)$ are $\delta$-thin: any edge is contained in the $\delta$-neighborhood of the union of the other two edges. In \cite{[A]}, Aougab established uniform hyperbolicity---$\delta$ can be chosen independent of genus (for $g \geq 2$). In spite of this considerable advancement in understanding the coarse geometry of the complex of curves, the development of tools intended to explicitly compute distance has been difficult. In 2002, Jason Leasure proved the existence of an algorithm to compute the distance between two vertices of $\mathcal{C}^0(S)$ (\cite{[L]}, Corollary 3.2.6). Later, other algorithms were discovered by Shackleton \cite{[Sh]} and Webb \cite{[W]}, but none of these algorithms were studied seriously from the viewpoint of doing explicit computations, and all seem unsuitable for that purpose. Recently Birman, Margalit and the second author \cite{[BMM]} have given a new algorithm---{\em the efficient geodesic algorithm}---and we have developed an implementation of it called the {\em Metric in the Curve Complex} (MICC). Applications of MICC we will present in this note include: \begin{itemize} \item[(i)] establishing that the minimal geometric intersection number for vertices of $\mathcal{C} (S_2)$ with distance four is $12$, \item[(ii)] listing of all vertex pairs (up to an action of an element of the mapping class group) of $\mathcal{C} (S_2)$ with distance four and having minimally positioned representatives with intersection number at most $25$, and \item[(iii)] producing an explicit example of two vertices of $\mathcal{C} (S_3)$ that have distance four and intersection number $29$ \end{itemize} The key idea in \cite{[BMM]} is the introduction of a new class of geodesics, {\em efficient geodesics}. They are not the same as the `tight geodesics' that have dominated almost all published work on the curve complex following their introduction in \cite{[MM1],[MM2]}, however they share with tight geodesics the nice property that there are finitely many efficient geodesics between any two fixed vertices in $\mathcal{C}(S)$. For convenience, for a pair of curves, $(\alpha , \beta)$, we will refer to a component of $(\alpha \cup \beta) \setminus (\alpha \cap \beta)$ as a {\em segment}. We will use a slightly weaked definition for efficient geodesic than that given in \cite{[BMM]}. \begin{definition} \label{D: IE} Let $v, w \in \mathcal{C}^0(S)$ with $d(v,w) \geq 3$. An oriented path $v=v_0, \dots, v_n=w, \ n\geq 3$, in $\mathcal{C}^0(S)$ is {\em initially efficient} if there are representatives $\alpha_0 \in v_0$, $\alpha_1 \in v_1$ and $\alpha_n \in v_n$ such that $| \alpha_1 \cap b| \leq n-2 $ for any segment $b \subset \alpha_n \setminus \alpha_0$. We say $v = v_0 , \cdots , v_n = w$ is {\em efficient} if $v_k , \cdots , v_n$ is initially efficient for each $ 0 \leq k \leq n-3$ and the oriented path $v_{n} , v_{n-1} , v_{n-2} , v_{n -3}$ is also initially efficient. \end{definition} The efficient path algorithm is a consequence of the following. \begin{theorem} {\rm (Theorem 1.1 of \cite{[BMM]})} \label{T:simplify} Let $ g \geq 2$, and let $v$ and $w$ be two vertices of $\mathcal{C} (S_g)$ with $d(v,w) \geq 3$. There exists an efficient geodesic from $v$ to $w$, and in fact there are finitely many. \end{theorem} When $n=3$, notice that an efficient geodesic $v=v_0 , v_1 , v_2 , v_3=w$ yields an oppositely oriented efficient geodesic, $w=v_3 , v_2 , v_1 , v_0=v$. That is, distance $3$ vertices have non-oriented efficient geodesics. Thus, for corresponding representatives $\alpha_0, \alpha_1, \alpha_2, \alpha_3$, we have that $\alpha_1$ (respectively $\alpha_2$) will intersect segments of $\alpha_3 \setminus \alpha_0$ (respectively of $\alpha_0 \setminus \alpha_3$) at most once. From this observation, we will establish the following test for distance $\geq 4$ which MICC implements. \begin{theorem} \label{theorem: BMM test}{\rm({\bf Distance $\geq 4$ Test})} Let $v, w$ be vertices with $d(v,w) \geq 3$. Let $\Gamma\subset \mathcal{C}^0(S)$ be the collection of all vertices such that the following hold: \begin{enumerate} \item for $\bar\gamma \in \Gamma$, we have $d(v,\bar\gamma)=1$; and \item for $\bar\gamma \in \Gamma$, there exist representatives $\alpha, \beta,\gamma$ of $v,w,\bar\gamma$ respectively, such that for each segment $b\subset \beta\setminus \alpha$ we have $| \gamma\cap b | \leq 1$. \end{enumerate} Then $d(v,w) \geq 4$ if and only if $d(\bar\gamma,w) \geq 3$ for all $\bar\gamma \in \Gamma$. Moreover, the collection $\Gamma$ is finite. \end{theorem} \begin{remark} \label{remark on Z} Keeping with our previous observation regarding non-oriented efficient geodesics at distance $3$, we can flip the roles of $v$ and $w$. Thus, the test can also be stated in terms of $d(v,\bar\gamma^\prime) \geq 3$ for $\bar\gamma^\prime \in \Gamma^\prime$ where: \begin{itemize} \item[1.] If $\bar\gamma^\prime \in \Gamma^\prime$ then $d(\bar\gamma^\prime,w)=1$. \item[2.] If $\bar\gamma^\prime \in \Gamma^\prime$, there exists representatives, $\alpha \in v$, $\beta \in w$ and $\gamma^\prime \in \bar\gamma^\prime$ such that for all segments, $a \subset \alpha \setminus \beta$, we have $| a \cap \gamma^\prime | \leq 1$. \end{itemize} \end{remark} Before giving the proof of the Distance $\geq 4$ Test, we recall a useful concept and its implications. Let $\alpha ,\beta \subset S$ be a pair of curves such that $|\alpha \cap \beta|$ is minimal with respect to isotopies of $\beta$. That is, $\alpha$ and $\beta$ are {\em minimally positioned}. Determining when $\alpha$ and $\beta$ are minimally positioned is straightforward due to the bigon criterion (Propostion 1.7, \cite{[FM]})---no disc component of $S \setminus (\alpha \cup \beta)$ has exactly two segments of $(\alpha \cup \beta) \setminus (\alpha \cap \beta)$ in its boundary. We say $\alpha$ and $\beta$ (or $(\alpha,\beta)$) is a {\em filling pair} if $S \setminus (\alpha \cup \beta)$ is a collection of $2$-discs. It is readily seen that a pair is filling on $S$ if and only if their corresponding vertices in $\mathcal{C}^0(S)$ are at least distance $3$ apart. When a minimally positioned pair of curves is not filling but still intersects, some component of $S \setminus (\alpha \cup \beta)$ contains an essential curve. Thus, the corresponding vertices are distance $2$ apart. Algorithmically determining whether a minimally positioned pair is filling, or not, requires simple tools coming from classical topology. For $\alpha \cap \beta \not= \emptyset$, let $N(\alpha \cup \beta) \subset S_g$ be a regular neighborhood. The genus of this neighborhood, $genus(N(\alpha \cup \beta))$, can be algorithmically computed as discussed in \S\ref{sec: genus}. (For an oriented surface $\Sigma$ with boundary, recall $genus(\Sigma) = 1 - \frac{ \chi(\Sigma) + |\partial \Sigma|} {2}$, where $\chi(\Sigma)$ is the Euler characteristic and $\chi (N(\alpha \cup \beta)) = -|\alpha \cap \beta|$.) If $genus(N(\alpha \cup \beta)) < g$, then a component of $S_g \setminus (\alpha \cup \beta)$ contains an essential curve of $S_g$ and the vertices that $\alpha$ and $\beta$ represent are distance $2$ apart. If $genus(N(\alpha \cup \beta))=g$, those vertices are distance at least $3$ apart. We will see in \S~\ref{sec: genus} that this {\em filling calculation} can be readily implemented. However, if one is handed a nice enough presentation of $\alpha$ and $\beta$ in $S$, determining whether they are a filling pair can be done by inspection. For example, we will do such filling determinations in Example~\ref{example:Hempel}. We now give the proof of Distance $\geq 4$ Test. \noindent {\bf Proof:} From the above discussion we see that the assumption, $d(v,w) \geq 3$, translates into considering only minimally positioned filling pairs in $S$. To determine whether the associated vertices in $\mathcal{C}^0(S)$ of a filling pair are at distance $\geq 4$, we need only determine that they are not at distance 3. Thus, suppose $\alpha$ and $\beta$ represent classes $v$ and $w$ such that $d(v,w) \geq 3$. Assume there exists a length $3$ path $v =v_0, v_1, v_2, v_3=w$. From Theorem \ref{T:simplify}, we can further assume that this path is initially efficient. In particular, for representative $\alpha = \alpha_0 , \alpha_1 , \alpha_2, \alpha_3=\beta$ of the vertices of this path, respectively, we can assume $\alpha_1$ intersects segments of $\alpha_3 \setminus \alpha_0$ at most once. Thus, $v_1$ is an element of the set $\Gamma$. But, since $d(v_1 , v_3(=w)) =2$, we need only establish that $d(\bar\gamma,w)\geq 3$ for all $\bar\gamma \in \Gamma$ to contradict the assumption that there was a distance $3$ path. The fact that the set $\Gamma$ is finite is due to $S \setminus (\alpha_0 \cup \alpha_3)$ being a collection of $2$-discs and representatives of any vertex of $\Gamma$ having bounded intersection with any such $2$-disc component. The stated test for $\geq 4$ follows. \qed \begin{example} \label{example:Hempel} We consider an example of a pair of curves, $\alpha$ and $\beta$, on a genus $2$ surface which represent classes that are distance $4$ apart. (See Figure~\ref{fig: Hempel example}.) This is an example of J. Hempel and appears in the lecture notes \cite{[Sc]}. These notes assert distance $4$ for the pair without proof. As an application of the Distance $ \geq 4$ Test, we now give a proof establishing distance $4$ for Hempel's example. \begin{figure}[htbp!] \labellist \small\hair 2pt \pinlabel $\mathrm{arcs \ of}\ \alpha$ at 140 100 \pinlabel $\beta$ at 165 119 \pinlabel $\beta$ at 363 244 \pinlabel $\gamma$ at 97 280 \pinlabel $\gamma^\prime$ at 380 276 \endlabellist \centering{\includegraphics[width=1.1\textwidth]{Hemple-example-revision}} \caption{{\small An example due to J. Hempel} } \label{fig: Hempel example} \end{figure} In Figure \ref{fig: Hempel example}, the surface $S_2$ is represented as a rectangular region minus two discs. The gray sides of the rectangle are identified, left-to-right and top-to-bottom, to form a torus minus two discs. The genus $2$ surface is obtained by identifying the two oriented red boundary curves, and the resulting single curve is $\beta$ (The identification is initiated by lining up the six colored dots on the $\alpha, \gamma ,\gamma'$ curves). These identifications induce identifications of the endpoints of the dark blue arcs, so as to form the curve $\alpha$. By inspection, one can see that $(\alpha, \beta)$ is a minimally positioned filling pair. We now apply the Distance $\geq 4$ Test. We wish to find curves, $\gamma$, that represent vertices, $\bar\gamma \in \Gamma$. Such a $\gamma$ will be in the complement of $\alpha$, intersecting any segment of $\beta \setminus \alpha$ at most once. Three such $\gamma{\rm 's}$ can be immediately identified. This is because the complement of $\alpha \cap \beta$ in $S_2$ is a collection of some number of $4$-gon regions and one single $12$-gon region. The boundary of any one of these regions is an alternating joining of segments in $\alpha$ and $\beta$. Thus, any $4$-gon boundary has two segments in $\beta$; and, the boundary of the single $12$-gon has $6$ segments in $\beta$. Requiring that any $\gamma$ intersect segments of $\beta$ at most once forces it to either not intersect a $4$-gon, or intersect each of the two $\beta$ segments of a $4$-gon once. However, there six different ways a $\gamma$ can exit/enter the $12$-gon, giving us three possible $\gamma{\rm 's}$ that intersect the $12$-gon region once. In Figure \ref{fig: Hempel example}, the dashed green and purple curves $\gamma$ and $\gamma^\prime$ illustrate two of the three curves generated by the exit/enter possibilities. It is readily apparent that both $S \setminus (\beta \cup \gamma)$ and $S \setminus (\beta \cup \gamma^\prime)$ is a collection of $2$-discs, none of which are bigons. Thus, the corresponding vertex pairs are at least distance $3$ apart. The remaining possibilities for a $\gamma$ can be dealt with in a similar straightforward manner (theoretically, there are also $\gamma{\rm 's}$ that intersect the $12$-gon region $2$ and $3$ times). $\diamond$ \end{example} The $\Gamma$-calculation above illustrates the primary computing capabilities of the MICC software package \cite{[MICC]}. MICC is a computational tool that can determine whether the distance between two vertices in $\mathcal{C}(S_{g\geq2})$ is $2$, $3$, or $\geq 4$. Its input is readily produced from any representation of two curves on a closed surface. It has functionality that can be used to search for new curve pairs or manipulate existing examples. Its output can be used to construct geodesic paths between curves of short distances. As such, MICC is an additional tool scholars can utilize in answering a number of basic questions about the local pathology of the complex of curves. As an illustration, we consider the relationship between distance and minimal intersection number. It is known that the theoretical minimal intersection number for a filling pair on a $S_g$ is $2g-1$ due to the Euler characteristic of the surface. For $g=2$, this theoretical minimum is not realizable and the realizable minimum is in fact $4$. Recent work of Aougab and Huang \cite{[AH]} has given a construction for realizing the theoretical minimum for $g\geq3$. Additionally, they show that all such minimum filling pairs are distance $3$. For fixed $g \geq 2$, using his uniform hyperbolicity result, Aougab proved that the theoretical minimum intersection number grows exponentially as a function of distance (Theorem 1.2, \cite{[A]}). Also, Aougab and Taylor \cite{[AT]} give a recipe for producing filling pairs at a given distance whose intersection numbers are close to the minimum in an asymptotic sense; see their paper for the precise statement. Ido, Jang and Kobayashi \cite{[IJK]} also have a construction for producing filling pairs of a prescribed distance. The arguments in these last three citations employs the high power machinery of Masur and Minsky, including the {\em Bounded geodesic image theorem} \cite{[MM2]}. Thus, the growth bounds and constructed examples inherit a ``coarse geometry'' quality, which so far in the literature has not been used to produce the exact minimal intersection number with accompanying filling pairs for a specified distance and genus. In contrast, MICC can be used to find explicitly all minimum intersecting filling pairs of distance $4$. Using the Distance $\geq 4$ Test, we give a ``proof of concept'' calculation that constructs all minimum intersecting distance $4$ filling pairs in $\mathcal{C}(S_2)$ up to homeomorphism. We can next use MICC to calculate distance for curve pairs of increasing intersection number starting at this minimum. The result of this calculation is the following theorem. \begin{theorem} \label{theorem: DWH-G2-D4-I12} The minimal intersection for a filling pair, $\alpha , \beta \subset S_2$, representing vertices $v, w \subset \mathcal{C}^0(S_2)$, respectively, with $d(v,w)=4$ is $12$. \end{theorem} Combining this theorem with the Distance $\geq 4$ Test we obtain a partial test for distance four. \begin{corollary} \label{corollary: DWH-G2-D4-I12} Let $d(v,w) \geq 4$ for two vertices in $\mathcal{C}^0(S_2)$. Let $\alpha, \beta \subset S_2$ be curves in minimal position representing $v$ and $w$, respectively. Let $\gamma \subset S_2 $ be a curve. If \begin{enumerate} \item $\gamma \cap \beta = \emptyset$ and $|\gamma \cap \alpha| < 12$, or \item $\gamma \cap \alpha = \emptyset$ and $|\gamma \cap\beta|<12$, \end{enumerate} then $d(v,w)=4$. \end{corollary} The proof of concept calculation for Theorem \ref{theorem: DWH-G2-D4-I12} involves finding all solutions to an integer linear programing problem so as to identify all potential candidates for minimally intersecting distance four curve pairs. However, such a comprehensive search is not necessarily needed to find examples of distance $\geq 4$ pairs. Utilizing all of the functionality of MICC, one can ``experiment'' with different curve pairs in a search distance four pairs. Remark \ref{remark: experiment} discusses how such experimentation led to the discovery of the first known filling pair representing distance $4$ vertices in $\mathcal{C}^0(S_3)$. In particular, we have the following result: \begin{theorem} \label{theorem: DWH-G3-D4-I29} The minimal intersection number for a pair of filling curves in $S_3$ that represent distance $4$ vertices in $\mathcal{C}^0(S_3)$ is less than or equal to $29$. \end{theorem} The outline for our paper is as follows. In \S\ref{sec: DWH}, we discuss a method of representing a filling pair on $S_{g \geq 2}$. In \S\ref{sec: DWH-G2-D4-I12}, we give the proof of Theorem \ref{theorem: DWH-G2-D4-I12}. In particular, the proof can be viewed as giving a general strategy for calculating theoretical minimal intersections of distance $4$ filling pairs for any higher genus. This strategy employed in a limited manner allowed us to verify that the explicit example given in \cite{[BMM]} (cf. \S2) of a distance $4$ vertex pair in $\mathcal{C}^0(S_2)$ establishes Theorem \ref{theorem: DWH-G2-D4-I12}. We finish \S\ref{sec: DWH-G2-D4-I12} with an analysis of a genus $3$ distance $4$ pair establishing Theorem \ref{theorem: DWH-G3-D4-I29}. Finally, in \S\ref{sec: MICC commands}, we discuss the complete functionality of MICC. To make this discussion concrete, we illustrate the range of MICC commands with a running example. At the end of this manuscript, we attach the current known {\em spectrum} of pairs of distance $4$ or greater in $\mathcal{C}^0(S_2)$ with up to $14$ intersections. The full distance $4$ or greater spectrum in $\mathcal{C}^0(S_2)$ with up to $25$ intersections is available at \cite{[MICC]}. \noindent {\em An expository remark}---Throughout we will continue to use $\alpha$ and $\beta$ as representatives of $v$ and $w$, respectively. Similarly, indexed $\alpha_i$ curves will be used as representatives of indexed $v_i$ vertices. This is meant to be consistent with the notation used in \cite{[BMM]}. For all other curves in the surface, we will use various ``flavors'' of $\gamma$, and $\bar\gamma$ will denote the corresponding vertex. We will always assume that any pairing of curves are in minimal position. \section{Representations of pairs of curves.} \label{sec: DWH} \subsection{Disc with handles} \label{subsec: DWH} In deciding how to represent curves on surfaces, we must first choose how we will represent closed oriented surfaces. Representing surfaces with boundary as {\em disc with handles} (or $DWH$, example in Figure~\ref{fig: DWH-G2-2}) is a well known method among working geometers and topologists, and it can be readily adapted to closed surfaces in our situation. For an essential curve $\alpha \in S_{g \geq 2}$, we can split $S$ along $\alpha$ to produce a surface $\hat{S}$ having two boundary curves, $\partial_+$ and $\partial_-$. If $\alpha$ is separating, $\hat{S}$ will have two connected components, i.e. $\hat{S} = \hat{S}^1 \cup \hat{S}^2$. The genus of each component will be less than $g$ and their sum would be ${\rm genus}(\hat{S}^1) + {\rm genus}(\hat{S}^2) = g$. As such, a $DWH$ representation will have $\hat{S}^1$ (respectively, $\hat{S}^2$) being a single $2$-disc with $2\times{\rm genus}(\hat{S}^1)$ (respectively, $2\times{\rm genus}(\hat{S}^2)$) $1$-handles attached. We recover $S$ by gluing the boundary of these two components together. When $\alpha$ is non-separating $\hat{S}$ will be a $g-1$ connected surface with two boundary curves $\partial_+$ and $\partial_-$. A $DWH$ representation of $\hat{S}$ would be a single $2$-disc with $2 \times {\rm genus}(\hat{S}) + 1$ $1$-handles attached. As before, $S$ is recovered by giving a gluing of its two boundary curves. Now suppose $(\alpha,\beta)$ is a filling pair in minimal position on $S$. We obtain a $DWH$ representation of $S$ by splitting along $\alpha$. Since $\alpha$ and $\beta$ are in minimal position, we know that $\beta \cap \hat{S}$ will be a collection of properly embedded essential arcs, $\{\omega_1 , \cdots , \omega_k \}$. We require that each $1$-handle in our $DWH$ representation contain at least one $\omega$-arc. For example, skipping ahead to Figure \ref{fig: DWH-G2-2}, the arcs with labels $w_1, \ w_2, \ w_3$ are the needed arcs. We refer to such an arc as a {\em co-cores} of a $1$-handle. Skipping ahead to Figure \ref{fig: DWH-G2-D4-I12}, the reader will find an example of a genus $1$ $DWH$ surface with two boundary components. The properly embedded essential black arcs are examples of $\omega$-arcs and each $1$-handle contains at least one such arc. Since we must be able to recover both $\beta$ and $S$ by a gluing of $\partial_+$ and $\partial_-$, we must have $|(\cup_{1\leq j \leq k } \ \omega_j ) \cap \partial_+ | = |(\cup_{1\leq j \leq k }\ \omega_j ) \cap \partial_- |$. More precisely, for $\partial \omega = p_1 \cup p_2$ there are three possible configurations: $\omega$ is a $++$ arc (respectively, $--$ arc) when $p_1 \cup p_2 \subset \partial_+$ (respectively, $p_1 \cup p_2 \in \partial_-$); and, $\omega$ is a $+-$ arc when $p_1 \in \partial_+$ and $p_2 \in \partial_-$. Thus we can have any number of $+-$ $\omega$-arcs but the number of $++$ arcs must equal the number of $--$ arcs. Finally, observe that the $\omega$-arcs divide both $\partial_+$ and $\partial_-$ into $k$ intervals. In order to specify a gluing of $\partial_+$ and $\partial_-$, we orient and cyclically label these $k$ intervals, by convention, $0$ through $k-1$. Again, referring to Figure \ref{fig: DWH-G2-D4-I12}, we illustrate a gluing of $\partial_+$ and $\partial_-$ by the $0$ through $11$ labels, shown in red and blue respectively. \subsection{Strategy for constructing examples} \label{subset: constructing examples} We can reverse engineer this construction with an eye towards finding filling pairs of distance greater than $3$. Suppose we are interested in finding such a filling pair $(\alpha,\beta)$ with $\alpha$ non-separating. Any associated $\hat{S}$ will be a connected $DWH$ with two boundary curves. Initially, let us fill $\hat{S}$ with a maximal collection, $A$, of properly embedded essential arcs that are pairwise non-parallel. We specify $2g +1$ arcs to be arcs associated to the $1$-handles. Thus, when we split the $DWH$ along these $2g+1$ arcs, we obtain the underlying $2$-disc. Figure \ref{fig: DWH-G2-2}-left illustrates such a configuration for $g=2$. \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $w_6$ at 116 100 \pinlabel $w_2$ at 107 245 \pinlabel $w_4$ at 340 123 \pinlabel $w_1$ at 73 305 \pinlabel $w_3$ at 380 305 \pinlabel $w_5$ at 225 90 \pinlabel $w_6$ at 625 93 \pinlabel $w_2$ at 619 245 \pinlabel $w_4$ at 840 123 \pinlabel $w_1$ at 585 305 \pinlabel $w_3$ at 885 305 \pinlabel $w_5$ at 735 94 \endlabellist \centering \includegraphics[width=0.90\textwidth]{DWH-G2-2} \caption{{\small The left illustration is a genus one surface with two boundary curves---coded red and blue. $C$ is a maximal collection of $6$ weighted arcs. The weights, $w_1 , w_2 , w_3 , w_4 , w_5 , w_6$, are non-negative integers. The green graph in the right illustration is $G(C)$, the dual graph. Each edge of $G(C)$ intersects exactly one arc of $C$ once.}} \label{fig: DWH-G2-2} \end{figure} To obtain the collection of $\omega$-arcs that will be used to produce a curve $\beta$, we will assign weights to each arc of $A$ such that there is a reasonable expectation that the Distance $\geq 4$ Test is satisfied. Once we have determined the weight of an arc of $A$, we will place that number of parallel copies of the arc in the $DWH$. To this end, we consider the dual graph, $G(A)$, to $A$ in $\hat{S}$. (Figure \ref{fig: DWH-G2-2}-right illustrates such a dual graph for the left configuration.) In graph-theoretic terms, we consider \textit{elementary circuits}---edge paths that form simple loops---in $G(A)$. Each elementary circuit, $\gamma \subset G(A)$, represents a possible vertex in $\Gamma$ of the Distance $\geq 4$ Test. Let $\Sigma(\gamma)$ be the sum of the weights of all the arcs in $A$ that a circuit $\gamma$ intersects. Since our speculative $\beta$ curve will be the union of arcs parallel to those in $A$ and $(\gamma,\beta)$ should be a filling pair for any circuit $\gamma$, we require that $\Sigma(\gamma)$ be greater than or equal to the minimal intersection number for a filling pair, i.e. $4$ when $g=2$ and $2g -1$ when $g>2$ \cite{[AH]}. Thus, if $\{ \gamma_1 , \cdots , \gamma_m \} \subset G(A)$ is the complete set of elementary circuits, for each circuit we get an inequality of the form $\Sigma(\gamma_i) \geq 2g -1 $ (when $g>2$) or $\geq 4$ (for $g=2$). This gives us $m$ inequalities that make up an {\em integer linear program} (ILP). We add to this the equality that states the sum of the weights of $++$ arcs equals the sum of the weights of $--$ arcs. These combined equations are the constraints for the object equation, $P$, the sum of all the weights which we wish to minimize. Figure \ref{fig: DWH-G2-2}-right illustrates this correspondence between a $G(A)$ and a ILP when $g = 2$. In particular, for this dual graph, there are $6$ elementary circuits which yield $6$ weight equations. We get a seventh equation coming from having the weights of the $++$ and $--$ arcs being equal. All this yields the following constraints for minimizing $P = w_1 + w_2 + w_3 + w_4 + w_5 + w_6 $: \begin{eqnarray} \label{LLP} \begin{tabular}{r c r c r c r c r c r} $w_1$ & $+$ & $w_4$ & $+$ & $w_5$ & $+$ & $w_6$ & $\geq$ & $4$ \\ $ w_2$ & $+$ & $w_4$ & $+$ & $w_5$ & $+$ &$w_6$ & $\geq$ & $4$ \\ $\ $ & $ \ $& $\ $ & $ \ $ & $w_3$ & $+$ & $w_5$ & $\geq$ & $4$ \\ $ \ $ & $\ $ & $ \ $ & $\ $ & $w_1$ &$+$ & $w_2$ & $\geq$ & $4$ \\ $ w_1$ & $+$ & $w_3$ & $+$ & $w_4$ & $+$ & $w_6$ & $\geq$ & $4$ \\ $ w_2$ & $+$ & $w_3$ & $+$ & $w_4$ &$+$ & $w_6$ & $\geq$ & $4$ \\ $\ $ & $ \ $& $\ $ & $ \ $ & $ \ $ & $ \ $ & $w_4$ & $=$ & $w_6$ \\ $\ $ & $w_1 ,$& $w_2 , $ & $w_3 , $ & $w_4 , $ & $w_5 ,$ & $w_6$ & $\geq$ & $0$ \end{tabular} \end{eqnarray} Using any popular computing software (e.g. Maple) one can readily check that the optimal value of this $P$ is $8$. Thus, we have a theoretical minimal intersection number for a filling pair of distance $4$ on $S_2$. This optimal value happens to be uniquely realized by the {\em weight solution} $[w_1 , w_2 , w_3 , w_4 , w_5 , w_6] = [2,2,2,0,2,0]$. Placing the corresponding set of $8$ $\omega$-arcs into the $DWH$ of Figure \ref{fig: DWH-G2-2}-left, we then have $8$ possible ways to identify $\partial_+$ to $\partial_-$. As discussed in \S\ref{subsec: perm}, MICC's permutation functionality can be used to check which of these boundary identifications will result in a single $w$ curve. For $\omega$-arcs corresponding to the weight solution $[2,2,2,0,2,0]$ there happen to be four identifications that yield a $(\alpha,\beta)$ filling pair. Finally, by employing MICC's distance functionality (described in \S\ref{subsec: distance}) to search for all elementary circuits, one can determine that all of these intersection $8$ filling pairs whose corresponding vertices have distance $3$ in $\mathcal{C}^0(S_2)$. \section{Proofs of main results.} \label{sec: DWH-G2-D4-I12} Although the case counting is extensive, the discussion in \S\ref{sec: DWH} gives us a straight forward strategy for proving Theorem \ref{theorem: DWH-G2-D4-I12}. \subsection{Proof of Theorem \ref{theorem: DWH-G2-D4-I12}.} First, we need to generate all possible genus $2$ $DWH$ diagrams with weighted $\omega$-arcs so that we may then generate the corresponding ILP's. Initially we divide this generating process into two cases corresponding to whether $\alpha$ is a separating or non-separating curve. If $\alpha$ is separating, we have exactly one possible $DWH$ diagram with weighted $\omega$-arcs. $\alpha$ splits $S_2$ into two genus one surfaces with boundary, and each genus one surface has three weighted $\omega$-arcs. (See Figure \ref{fig: DWH-G2-4}.) \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $w_3$ at 163 100 \pinlabel $w_2$ at 245 345 \pinlabel $w_1$ at 120 330 \pinlabel $w_3$ at 666 100 \pinlabel $w_2$ at 748 347 \pinlabel $w_1$ at 623 330 \endlabellist \centering \includegraphics[width=0.90\textwidth]{DWH-G2-4} \caption{{\small }} \label{fig: DWH-G2-4} \end{figure} We leave it to the reader to generate the unique ILP in this situation. For the ILP corresponding to Figure \ref{fig: DWH-G2-4}, we determined that the theoretical minimal intersection number for a filling pair $(\alpha,\beta)$ of distance $4$ on $S_2$ is $12$; and, intersection $12$ is uniquely realized when all the weights equal $2$. IBM's software package {\em CPLEX Optimization Studio} \cite{[I]} was utilized in solving this and all other ILP's in this paper. Next, employing MICC's permutation functionality, we determined that there are six possible identifications of $\partial_+$ and $\partial_-$ which yield a single $\beta$ curve. However, MICC's distance functionality determined that all of these filling pairs of intersection $12$ were distance $3$. So we turn to the non-separating case for $\alpha$. \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $w_6$ at 106 110 \pinlabel $w_2$ at 245 349 \pinlabel $w_4$ at 315 135 \pinlabel $w_1$ at 120 332 \pinlabel $w_3$ at 297 280 \pinlabel $w_5$ at 255 85 \pinlabel $w_6$ at 606 110 \pinlabel $w_2$ at 747 349 \pinlabel $w_4$ at 855 124 \pinlabel $w_1$ at 620 332 \pinlabel $w_3$ at 797 280 \pinlabel $w_5$ at 755 85 \endlabellist \centering \includegraphics[width=0.90\textwidth]{DWH-G2-3} \caption{{\small }} \label{fig: DWH-G2-3} \end{figure} When $\alpha$ splits $S_2$ into a connected genus one surface with two boundary components, we first need to generate all possible $DWH$ diagrams along with all possible complete collections of weighted $\omega$-arcs. Such a $DWH$ will have three handles with each handle having the feature that it intersects one or both boundary curves. Since we must have at least one handle intersecting both boundary curves, the possibilities are: all three handles intersect both boundaries (as in Figure \ref{fig: DWH-G2-2}); two handles intersect both boundaries (as in Figure \ref{fig: DWH-G2-3})); or, only one handle intersects both boundary curves. In the latter case, it is straight forward to see that, due to the requirement that the number of $++$ and $--$ arcs are equal, there will be boundary parallel $\omega$-arcs. Since this would mean that $|\alpha \cap \beta|$ is not minimal, we conclude that only the first two possibilities occur. Having settled on the $DWH$ diagram of either Figure \ref{fig: DWH-G2-2} or \ref{fig: DWH-G2-3}, we consider other choices for a maximal collection of weighted $\omega$-arcs. For example, Figure \ref{fig: DWH-G2-3}-left and \ref{fig: DWH-G2-3}-right illustrates two different choices (w.r.t. the $DWH$ structure) in the case where we have just two handles intersecting both boundaries. Note that the difference between the two collections is a different choice for the arcs associated with the weights $w_4, w_5, w_6$. However, by interchanging the roles of the $w_3$ and $w_4$---in Figure \ref{fig: DWH-G2-3}-right, we view $w_4$ as the co-core of a handle instead of $w_3$---we obtain Figure \ref{fig: DWH-G2-3}-left (after a relabeling weights). Moreover, there is a similar re-choosing of the co-cores of handles in Figure \ref{fig: DWH-G2-3}-left that will yield the weighted arc collection of Figure \ref{fig: DWH-G2-2}. Finally, for the alternate choice of the $w_4, w_5, w_6$ $\omega$-arcs in Figure \ref{fig: DWH-G2-2}, one can again re-choose the handle co-cores to produce the collection of Figure \ref{fig: DWH-G2-2}. (We leave the details to the reader.) Thus, we need only consider the collection of weighted arcs in Figure \ref{fig: DWH-G2-2} and its associated ILP (\ref{LLP}). \begin{figure}[htbp] \labellist \small\hair 1pt \pinlabel $(9,10,2)$ at 226 140 \endlabellist \centering \includegraphics[width=0.75\textwidth]{DWH-G2-D4-I12} \caption{{\small }} \label{fig: DWH-G2-D4-I12} \end{figure} As mentioned at the end of \S\ref{sec: DWH}, the optimal value of ILP(\ref{LLP}) is $8$ and $[2,2,2,0,2,0]$ is the unique weight solution. Again, this was determined by utilizing the software package CPLEX \cite{[I]}. Next, we employed MICC to determine that any of the four associated filling pairs are only distance $3$. Continuing, we utilized CPLEX to find all weight solutions for values $P=9,10,11$. CPLEX found five weight solutions for value $9$: $[2, 3, 2, 0, 2, 0]$, $[3, 2, 2, 0, 2, 0]$, $[2, 2, 3, 0, 2, 0]$, $[2, 2, 2, 0, 3, 0]$,and $[2, 2, 3, 0, 2, 0]$. Using MICC we determined all associated intersection $P=9$ filling pairs are distance $3$. Similarly, for values $P=10$ and $P=11$ there are $55$ and $79$, respectively, weight solutions. Again, MICC found only distance $3$ filling pairs. However, using CPLEX to find all weight solutions to ILP (\ref{LLP}) for value $P=12$ we found $150$ solutions, $9$ of which had associated filling pairs that are of distance $4$. To list these solutions: $[w_1 , w_2 , w_3 , w_4 , w_5 , w_6] \in \{ [2, 2, 2, 2, 2, 2]^4,$ $[2, 4, 2, 0, 4, 0]^2,$ $[4, 2, 4, 0, 2, 0]^2,$ $[4, 0, 4, 2, 0, 2]^2,$ $[4, 2, 2, 1, 2, 1],$ $[2, 2, 4, 1, 2, 1],$ $[4, 0, 0, 2, 4, 2]^2,$ $[0, 4, 0, 2, 4, 2]^2,$ $[0, 4, 4, 2, 0, 2]^2\}$. (The power notation is to dedicate a multiplicity of distance $4$ filling pairs.) Figure \ref{fig: DWH-G2-D4-I12} is a realization of $[4, 2, 2, 1, 2, 1]$. We credit its original discovery to J. Birman, D. Margalit and the second author \cite{[BMM]}. Finally, we remark that there are undoubtedly duplications up to homeomorphism which we do not determine. To summarize, utilizing the Distance $\geq 4$ Test capability of MICC, we have shown that the filling pair of Figure \ref{fig: DWH-G2-D4-I12} is distance $\geq 4$. To establish distance $4$, we must produce a length $4$ path between $\alpha$ and $\beta$. We refer the reader to the green arc in Figure \ref{fig: DWH-G2-D4-I12} that crosses the $\omega$-arc labeled $2$ and has endpoints in the $5$ segments of $\partial_+$ and $\partial_-$. Thus, this green arc can be understood as a closed curve that intersects $\alpha$ once and $\beta$ once. This closed curve, $\alpha_2$, will represent a vertex $v_2$ of a length $4$ path $\{ v=v_0 , v_1 , v_2 , v_3 , v_4 = w\}$. We can obtain a curve, $\alpha_1$, representing $v_1$ by taking a regular neighborhood of $\alpha_0 \cup \alpha_2$ and letting $\alpha_1$ correspond to its unique boundary curve. (Here, $\alpha = \alpha_0$ and $\alpha_4 = \beta$.) Similarly, $v_3$ is represented by a curve coming from the boundary curve of a regular neighborhood of $\alpha_4 \cup \alpha_2$. The fact that these neighborhoods are each topologically a torus-minus-disc makes all of these curves essential. Thus, we have a length $4$ path. \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $\gamma_1$ at 80 100 \pinlabel $\gamma_1$ at 470 90 \pinlabel $\gamma_1$ at 630 90 \pinlabel $\alpha_2$ at 265 90 \pinlabel $\gamma_2$ at 170 90 \pinlabel $\gamma_2$ at 522 90 \endlabellist \centering \includegraphics[width=0.80\textwidth]{DWH-G3-D4-I29} \caption{{\small }} \label{fig:genus 3} \end{figure} \subsection{Proof of Theorem \ref{theorem: DWH-G3-D4-I29}.} We will prove the $DWH$ diagram of Figure \ref{fig:genus 3} is a genus $3$ distance $4$ example of a filling pair have intersection $29$. As described in \S\ref{subsec: input}, the input for MICC can be obtained by traversing $\partial_+$ \& $\partial_-$ and reading off the labels of the $\omega$-arcs as they are crossed. The labels of the segments of the boundary curves indicate the identification map that forms the curve $\alpha$. As before, the black $\omega$-arcs join together to form $\beta$. For convenience, we give the MICC input below: \begin{itemize} \item[] {\tt Input top identifications: 1,11,3,27,8,15,7,24,0,10,2,12,4,21,19,17,\\ 24,14,6,23,28,9,16,25,13,5,20,18,16} \item[] {\tt Input bottom identifications: 0,10,2,26,7,14,6,23,28,9,1,11,3,22,20,\\ 18,25,13,5,22,27,8,15,26,12,4,21,19,17} \end{itemize} In our genus $2$ example, we saw that there were $6$ curves representing vertices in the elementary circuit set $\Gamma$. In fact, MICC utilizes the set $\Gamma^\prime$ in its calculation. For the filling pair of Figure \ref{fig:genus 3}, the set $\Gamma^\prime$ has $28$ vertices. We can specific a curve representing a vertex of $\Gamma^\prime$ by a sequence of boundary segment labels. For example, the sequence {\tt [0, 22, 5, 17, 24, 3, 20, 8]} corresponds to a curve in this figure that intersects in cyclic order the boundary segments in this list. The set of green arcs in the figure correspond to this label sequence and should be to understood as a curve, $\gamma_1$, representing $\bar\gamma_1 \in \Gamma^\prime$. By inspection observe that $(\alpha,\gamma_1)$ is a filling pair. Similarly, the brown arcs of the figure correspond to the label sequence {\tt [3, 24, 17, 7, 19, 26, 13]}. The reader can also check by inspection that this curve, $\gamma_2$, representing $\bar\gamma_2 \in \Gamma^\prime$, is also filling when paired with $\alpha$. Having had MICC determine that all of the curves representing vertices in $\Gamma^\prime$ fill when paired with $\alpha$, it remains for us to find a length $4$ path so as to establish $d(v,w)=4$. The magenta arc of Figure \ref{fig:genus 3} corresponds to a curve, $\alpha_2$, representing a $v_2$ vertex of such a path. Notice $\alpha_2$ intersects $\beta$ only twice. Thus, as a pair they cannot be filling. Taking an appropriate boundary curve of a regular neighborhood of $\alpha_2 \cup \beta$ will yield a $a_3$. Similarly, since $|\alpha \cap \alpha_2|=1$ we know $d(v,v_2)=2$ and we can construct an $\alpha_1$ as we did for our genus $2$ example to give us a representative of $v_1$. \qed \begin{remark} \label{remark: experiment} As previously mentioned, the argument establishing (the genus $2$) Theorem \ref{theorem: DWH-G2-D4-I12} can be thought of as a proof of concept calculation. For higher genus the calculation is exactly the same, although more extensive due to the fact that there are more $DWH$ representations and, thus, a $\Gamma$-calculation with accompanying ILP equations for each $DWH$. However, if we willing to settle for an estimate the calculation can be limited to just one $DWH$ representation and we can use MICC to ``discover'' some rough bounds. This is essentially how the example of Figure \ref{fig:genus 3} was found. We choose a somewhat symmetric genus $3$ $DWH$ and placed an initial set of disjoint proper essential arcs so that any simple closed curve drawn in the $DWH$ intersected these arcs at least $5 (= [2 \cdot 3 - 1] = [2 \cdot g - 1 ])$ times. (For example, observe that a curve that goes over only the left most $1$-handle of Figure \ref{fig:genus 3} can be made to intersect the black arcs exactly $5$ times.) Using the {\tt perm} command discussed in \S\ref{subsec: perm} we were able to find identifications of the two boundary curves that connected these arcs into a single curve. From there we could use MICC to compute the distance of the resulting filling pair. When such an attempt yields a distance $3$ pair, we can use the {\tt curves} command discussed in \S\ref{subsec: curves} to list all possible curves in $\Gamma$. By adding in more proper arcs and/or altering their placement and iterating the procedure above, we can reduce the size of $\Gamma$ until we find a distance $4$ filling pair. Such experimentation may be useful in understanding the relationship between the distribution of the proper arcs in the $DWH$ and the possible curves in $\Gamma$. \end{remark} \section{Information for the user of MICC.} \label{sec: MICC commands} In this section we discuss the input format for MICC and the commands for analyzing and manipulating that input. The input is two sequences of numbers which corresponds to a ``ladder'' representation of curves in the surface. The commands are {\em genus}, {\em distance}, {\em curves}, {\em matrix}, {\em faces} and {\em perm} (for permutation). We will illustrate each of these features by further developing the example in Figure \ref{fig: DWH-G2-D4-I12}. \subsection{MICC input} \label{subsec: input} Given a $DWH$ presentation of filling pair of curves $(\alpha,\beta)$ on a surface $S$, as previously discussed in \S\ref{sec: DWH}, we have specified the gluing of $\partial_+$ and $\partial_-$ by cyclically labeling their segments. Assuming we used $k$ labels for the segments of $\partial_\pm$, we now label the $\omega$-arcs of $\beta$, $0$ through $k-1$. Looking at Figure \ref{fig: DWH-G2-D4-I12}, we have a labeling using $0$ through $11$. For the purpose of extracting the MICC input from this example, the labeling assignment could have been done in a random fashion. However, for aesthetic reasons we have taken care to label the $\omega$-arcs in the cyclic order they occur in $\beta$. With the $\omega$-arcs of $\beta$ in our $DWH$ representation labeled as described above, we can now extract the MICC input for our filling pair. Starting at segment $0$ in $\partial_-$, we traverse this boundary component in the positive direction and record the labels of the $\omega$-arcs as we pass over their endpoints. This sequence of labels will be the input for MICC's ``{\tt Input top identifications}''. Repeating this process on $\partial_+$ we get the sequence of labels that will be the input for MICC's ``{\tt Input bottom identifications}''. Together these two sequences are the basis of a {\em ladder representation}, $L_v(w)$, of a filling pair $(\alpha,\beta)$. A ladder representation is readily understood by again considering our example in Figure \ref{fig: DWH-G2-D4-I12}. Starting at the $0$ segments of $\partial_\pm$ and reading off the two label sequences, our input for MICC would be: \begin{itemize} \item[] {\tt Input top identifications: 1,6,11,4,3,2,7,0,5,9,8,7} \item[] {\tt Input bottom identifications: 0,5,10,3,2,1,6,11,4,10,9,8} \end{itemize} The reader should readily grasp the ladder metaphor by considering the representation in Figure \ref{fig: d4-l-m}-left of a regular neighborhood of $(\partial_+ \cup_\sim \partial_-) \cup \{{\rm all} \ \omega \ {\rm arcs} \}$ coming from Figure \ref{fig: DWH-G2-D4-I12}. In this illustration, our two boundary components which have been glued together to form $\alpha$ is represented by the horizontal segment that has its ends identified. (Our convention forces this identification to always occur in the middle of the $0$-segment of $\partial_\pm$.) Each vertical segment above or below this horizontal $\alpha$ is half of an $\omega$-arc. From left-to-right there are $12$ labels above these vertical $\omega$-halves correspond to MICC's {\tt Input top identifications}. Similarly, the $12$ labels below these vertical $\omega$-halves correspond to MICC's {\tt Input bottom identifications}. \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $\alpha$ at 1 115 \pinlabel $\beta$ at 25 155 \endlabellist \centering \includegraphics[width=0.80\textwidth]{Ladder-matrix} \caption{{\small }} \label{fig: d4-l-m} \end{figure} It will be useful to the reader to have an understanding of how MICC utilizes a ladder representation. Specifically, for a ladder $L_v(w)$ we have, from left-to-right, $0$ through $k-1$ intersection points of $\alpha$ with $\beta$. For the $i^{\rm th}$ intersection we will associate to it a $1 \times 4$ row vector ${[L_v(w)]}_i = [v^-(i),w^+(i),v^+(i), w^-(i)]$. Figure \ref{fig: d4-l-to-m} illustrates the scheme for determining the values of the $v^\pm {\rm 's}$ and $w^\pm {\rm 's}$. In particular, $v^- (i)= -(i-1)$ and $v^+(i) = i+1$; and $w^+(i)$ and $w^-(i)$ correspond to the adjacent (ladder) vertices along $\beta$ with the parity determined by whether $\beta$ is pointing down ($w^+(i) <0$ and $w^-(i)>0$) or up ($w^+(i) >0$ and $w^-(i)<0$) at the $i^{\rm th}$ intersection. \begin{figure}[htbp] \labellist \small\hair 2pt \pinlabel $\alpha$ at 7 75 \pinlabel $\beta$ at 10 110 \pinlabel $v^-$ at 253 95 \pinlabel $w^+$ at 283 95 \pinlabel $v^+$ at 313 95 \pinlabel $w^-$ at 343 95 \endlabellist \centering \includegraphics[width=0.50\textwidth]{Ladder-scheme} \caption{{\small The illustration depicts how to generate ${[L_v(w)]}_0 = [v^-(0), w^+(0) , v^+(0) , w^-(0)] = [-11, -5, 1 , 7]$.}} \label{fig: d4-l-to-m} \end{figure} Stacking the row vectors ${[L_v(w)]}_i$ in order, we produce a $ k \times 4$ matrix. Again, $k$ is the intersection number of $\alpha$ and $\beta$. Figure \ref{fig: d4-l-m}-right is the associated {\em characteristic matrix}, $[L_v(w)]$, for the ladder in Figure \ref{fig: d4-l-m}-left. It should be readily apparent to the reader that the information in $[L_v(w)]$ is sufficient to reproduce the ladder $L_v(w)$, and $L_v(w)$ is sufficient to reproduce a $DWH$ representation. Thus, up to any permutation of labels and changes of orientation of our curves, $L_v(w)$ is dependent only on the classes $v$ and $w$. Given the prompt, {\tt What would you like to calculate?}, a reply of {\tt matrix} will produce $[L_v(w)]$. \subsection{Genus command} \label{sec: genus} We now make concrete the filling calculation. We will use $L_v(w)$ and its characteristic matrix $[L_v(w)]$ to compute the minimal genus of the surface that $\alpha$ and $\beta$ fill. For a surface of genus $g$, with a minimally intersecting filling $4$-valent graph, $\alpha \cup \beta$, we have $g= -\frac{1}{2}(|V| - |E| + |F|)+1$, where $V = \alpha \cap \beta$ is the set of vertices and $E$ is the set of edges of the graph $\alpha \cup \beta$. Also, $F = S \setminus (\alpha \cup \beta)$ is a set of $2n$-gon disc regions. Then $|E| = 2|V|$. Thus, determining $g$ requires a count of the number of $2n$-gon regions of $F$. Listing the components of $F$ using $[L_v(w)]$ is achieved by specifying their edge-path boundaries in $\alpha \cup \beta$. When traversed such a edge-path will be {\em alternating}---an edge $e^{\alpha}_1 \in \alpha$ followed by an edge $e^{\beta}_1 \in \beta$ followed by $ e^{\alpha}_2 \in \alpha$ and so on, will be a cyclically ordered set $\{e^{\alpha}_1 , e^{\beta}_1 , \cdots , e^{\alpha}_n, e^{\beta}_n \}$. Thus, $e^{\alpha}_j$ (respectively, $e^{\beta}_j$) starts at vertex having integer label $\partial^s e^{\alpha}_j$ (respectively, $\partial^s e^{\beta}_j$) and terminates at vertex having integer label $\partial^t e^{\alpha}_j$ (respectively, $\partial^t e^{\beta}_j$); and, $\partial^t e^{\beta}_{j-1} = \partial^s e^{\alpha}_j$ and $\partial^t e^{\alpha}_j = \partial^s e^{\beta}_j$, all modulo $n$. The entries of the ${[L_v(w)]_i}{\rm 's}$ gives us that $$\partial^t e^{\alpha}_j \in \{ v^+(\partial^s e^{\alpha}_j) , |v^- (\partial^s e^{\alpha}_j)| \} = \{ ( \partial^s e^{\alpha}_j -1 ) , (\partial^s e^{\alpha}_j +1 ) \}.$$ Additionally, we have $$ \partial^s e^{\beta}_j \in \{ |w^+ (\partial^t e^{\beta}_j)| , |w^- (\partial^t e^{\beta}_j)| \} \ {\rm and} \ \partial^t e^{\beta}_j \in \{ |w^+ (\partial^s e^{\beta}_j)| , |w^- (\partial^s e^{\beta}_j)| \}.$$ Now fixing a $2n$-gon region and, by convention, traversing its edge path boundary so as to always keep the region to our left, we have the following scheme for finding the terminus endpoint for the {\em next} $e^\alpha$ or $e^\beta$ edge. \begin{itemize} \item[T1--] If $\partial^t e^\alpha_j = v^+(\partial^s e^\alpha_j)$ then $\partial^t e^\beta_j = |w^+ (\partial^t e^\alpha_j)|$ (with $\partial^s e^\beta_j = \partial^t e^\alpha_j$). \item[T2--] If $\partial^t e^\alpha_j = |v^-(\partial^s e^\alpha_j)|$ then $\partial^t e^\beta_j = |w^- (\partial^t e^\alpha_j)|$ (with $\partial^s e^\beta_j = \partial^t e^\alpha_j$). \item[T3--] If $\partial^s e^\beta_j = |w^+(\partial^t e^\beta_j)|$ and $\partial^t e^\beta_j = |w^- (\partial^s e^\beta_j)|$ then $\partial^t e^\alpha_{j+1} = v^+(\partial^t e^\beta_j)$ (with $\partial^s e^\alpha_{j+1} = \partial^t e^\beta_j$). \item[T4--] If $\partial^s e^\beta_j = |w^-(\partial^t e^\beta_j)|$ and $\partial^t e^\beta_j = |w^+ (\partial^s e^\beta_j)|$ then $\partial^t e^\alpha_{j+1} = |v^- (\partial^t e^\beta_j)|$ (with $\partial^s e^\alpha_{j+1} = \partial^t e^\beta_j$). \item[T5--] If $\partial^s e^\beta_j = |w^+(\partial^t e^\beta_j)|$ and $\partial^t e^\beta_j = |w^+ (\partial^s e^\beta_j)|$ then $\partial^t e^\alpha_{j+1} = v^+ (\partial^t e^\beta_j)$ (with $\partial^s e^\alpha_{j+1} = \partial^t e^\beta_j$). \item[T6--] If $\partial^s e^\beta_j = |w^-(\partial^t e^\beta_j)|$ and $\partial^t e^\beta_j = |w^- (\partial^s e^\beta_j)|$ then $\partial^t e^\alpha_{j+1} = v^- (\partial^t e^\beta_j)$ (with $\partial^s e^\alpha_{j+1} = \partial^t e^\beta_j$). \end{itemize} We illustrate this scheme using our $[L_v(w)]$ in Figure \ref{fig: d4-l-m}-right. Starting at vertex $2$ we can traverse the edge between $2(= \partial^s e^\alpha_1 = v^+ ( 1) )$ and $1 (= \partial^t e^\alpha_1 = |v^- (2)|)$. Thus, we have the assumption of T2 for $e^\alpha_1$. This gives us $\partial^t e^\beta_1 = 8 (= |w^- (1 )| = \partial^s e^\alpha_2)$ with $1=\partial^s e^\beta_1 = \partial^t e^\alpha_1$. Since $1 = |w^+ (8)|$, we have the T3 assumption and $9 = \partial^t e^\alpha_2( = v^+(8))$. But this gives us the T1 assumption for $e^\alpha_2$. Thus, we have that $\partial^t e^\beta_2 = |w^+ (9)|$ which is $10$. Since $|w^-(10)| = 9$ we again have the T3 assumption which implies $\partial^t e^\alpha_3 = |v^-(10)| = 9$. Now, we again have the T2 assumption for $e^\alpha_3$ which implies $\partial^t e^\beta_3 = | w^- (9)| = 2$, back where we started. (Refer to the region in Figure~\ref{fig: DWH-G2-D4-I12} contain $(9,10,2)$.) Working out all such boundary edge paths we get a count for $|F|$ and, thus, are able to compute genus. \subsection{Faces command} \label{subsec: faces} Having traversed all the boundaries of the $2n$-gon regions of $S \setminus (\alpha \cup \beta)$, MICC records this calculation as a vector. Specifically, let $ F_{2n} \subset F$ be the number of $2n$-gon regions for $n \in \{2,3,4, \cdots \}$. Then associated with the graph $\alpha \cup \beta \subset S_g$ we have the vector $[F_4 , F_6 , F_8 , \cdots]$. The output of MICC is actually formatted as $\{4:\ F_4 , \ 6:\ F_6, 8: \ F_8, \cdots \}$. This vector solution to Euler characteristic equation of Lemma 4.1 of \cite{[He1]}. The prompting inquiry and command appears as: {\tt What would you like to calculate? faces}. Additionally, it lists each boundary edge in a truncated matter---it lists only the $e^\alpha_i {\rm 's}$. For our extended example of Figure \ref{fig: d4-l-to-m}, we would get: \begin{eqnarray} \begin{tabular}{l} ${\tt Vector solution:} \ \{4: 6, 6: 4\}$ \\ $(0, 11, 7)$ \\ $(0, 5, 6)$ \\ $(1, 6)$ \\ $(8, 1)$ \\ $(2, 7)$ \\ $(9, 10, 2)$ \\ $(8, 3)$ \\ $(9, 3, 4)$ \\ $(4, 5)$ \\ $(10, 11)$ \\ \end{tabular} \end{eqnarray} \subsection{Curves command} \label{subsec: curves} An alternate reply to the prompt, {\tt What would you like to calculate?}, is {\tt curves}. MICC applies the Theorem \ref{theorem: BMM test} test by listing all of the curves representing vertices of $\Gamma^\prime$ and computing the genus of their graphs when paired with $\alpha$. MICC finds all curves representing elements of $\Gamma^\prime$ by applying a classical depth-first search \cite{[Si]} for elementary circuits of the graph $G(C^\prime)$, the dual graph to the proper arcs of $\alpha$ in $S \setminus \beta$. The MICC output is a cyclic sequence of $e^{\alpha}$ edges. That is, a curve,$\gamma$, representing a vertex $\bar\gamma \in \Gamma^\prime$ and a region $f \in F$, $\gamma \cap f$ will be a collection of proper arcs having their endpoints on $e^{\alpha}$ edges of the boundary of $f$. As we traverse $\gamma$, we will travel between regions by passing through $e^{\alpha}{\rm's}$. Thus, $\bar\gamma$ can be characterized by giving a cyclic listing of these $e^{\alpha}{\rm's}$. Continuing with our extended example of Figure \ref{fig: d4-l-to-m}, the output response to {\tt curves} would yield: \begin{eqnarray} \begin{tabular}{l} ${\tt Path \ [0, 7, 2, 9, 3, 8, 1, 6]}$ \\ ${\tt Curve genus:} \ 2$ \\ \\ ${\tt Path \ [2, 10, 11, 7]}$ \\ ${\tt Curve genus:} \ 2$ \\ \\ ${\tt Path \ [1, 6, 5, 4, 3, 8]}$ \\ ${\tt Curve genus:} \ 2$ \\ \\ ${\tt Path \ [0, 5, 4, 9, 2, 7]}$ \\ ${\tt Curve genus:} \ 2$ \\ \\ ${\tt Path \ [0, 5, 4, 9, 10, 11]}$ \\ ${\tt Curve genus:} \ 2$ \\ \\ ${\tt Path \ [0, 11, 10, 9, 3, 8, 1, 6]}$ \\ ${\tt Curve genus:} \ 2$ \\ \end{tabular} \end{eqnarray} \subsection{Distance command} \label{subsec: distance} To determine $d(v,w)$ one replies to the prompt, {\tt What would you like to calculate?}, by typing {\tt distance}. If the genus of any of the pairs $(\alpha,\gamma)$ is less than $g$, then MICC will respond with {\tt Distance: 3}. If the genus of all such pairs is $g$, then MICC will respond with {\tt Distance: 4+}. All possible $\gamma$ representing elements of $\Gamma^\prime$ are determined as described above. \subsection{Perm command} \label{subsec: perm} Finally, MICC has an experimental functionality. As most topologist who have attempted to construct filling pairs on surfaces know, it is difficult to do so while avoiding the production of multi-curves. For example, starting with a $DWH$ representation of the surface, after placing down some collection of $\omega$-arcs finding an identification of $\partial^+$ and $\partial^-$ so as to have $\beta$ be a single curve is tedious at best. Fortunately, MICC automates this process. Given any ladder top/bottom identification, it will first determine whether $\beta$ is a single curve or a multi-curve. If it is a multi-curve, then it will produce the inquiring prompt {\tt Would you like to shear this multi-curve?}. With the reply {\tt yes}, it will search through all possible $\partial^+$/$\partial^-$ identifications for those that yield a single $\beta$ curve and print them out along with their distance. If MICC has been given a ladder identification that corresponds to a single $\beta$ curve, one can still find all other $\partial^+$/$\partial^-$ identifications that yield a single curve. When given the prompt {\tt What would you like to calculate?} just reply with {\tt perm} (for permutation). For our extended example, the output would be: \begin{eqnarray} \begin{tabular}{l} ${\tt Curve \ 1 \ Distance: \ 3}$ \\ $[2, 7, 12, 5, 9, 8, 7, 1, 6, 11, 4, 3]$ \\ $[12, 5, 10, 3, 2, 1, 6, 11, 4, 10, 9, 8]$ \\ \\ ${\tt Curve \ 2 \ Distance: \ 3}$ \\ $[5, 9, 8, 7, 1, 6, 11, 4, 3, 2, 7, 12]$ \\ $[12, 5, 10, 3, 2, 1, 6, 11, 4, 10, 9, 8]$ \\ \\ ${\tt Curve \ 3 \ Distance:\ 4+}$ \\ $[1, 6, 11, 4, 3, 2, 7, 12, 5, 9, 8, 7]$ \\ $[12, 5, 10, 3, 2, 1, 6, 11, 4, 10, 9, 8]$ \end{tabular} \end{eqnarray} \section{Concluding remarks.} \label{sec: conclusion} MICC was originally created as a tool that would help in the search for distance $4$ filling pairs on surface of genus greater than $2$. We have since realized that it can be set to other uses. For example, it was recently used to find geodesic triangles where any pair of vertices correspond to filling pairs in minimal position. As mentioned previously, we hope researchers will find additional uses. Remarking on the complexity of the algorithms employed in MICC, the most computationally expensive task involves finding all cycles in the graph $G(A^\prime)$ of \S \ref{subsec: curves}. We know that the runtime of the process of counting all cycles in a graph is at best exponential. This gives a lower bound on the complexity of finding all cycles in $G(A^\prime)$, and thus on the complexity of the program itself. There are many ways in which we would like to improve the current version of MICC. Currently, MICC is only a partial implementation of the algorithm presented in \cite{[BMM]}. We plan to extend the functionality of MICC to encompass the full scope of the Efficient Geodesic Algorithm of \cite{[BMM]}. Yet this partial implementation is also manifestation of the complexity barrier involved with the current exponential running time of the graph search. An improved algorithm will allow for more intricate curves to be studied, and parallelization of the MICC would help future users to fully utilize their multicore computers in their research. \noindent {\small{\bf Acknowledgements}} The first, third and fourth authors are grateful to Joan Birman, Dan Margalit and the second author for sharing results of their joint work as it developed. Our thanks goes to John Ringland, Joaquin Carbonara and the URGE to Compute program at the University at Buffalo and Buffalo State College for supplying a nurturing environment for our work. This work was supported in part by NSF CSUMS grants 0802994 and 0802964 to the University at Buffalo and Buffalo State College. Finally, we thank the referees for alerting us to the recent results in the literature, for suggesting a strengthening of the statement of Corollary \ref{corollary: DWH-G2-D4-I12} and numerous other expository improvements. \section{Addendum: All weight solutions of distance $4$ filling pairs in $\mathcal{C}^1(S_2)$ for $12$, $13$ \& $14$ intersections when one curve is non-separating.} Below we list with multiplicity all solutions to ILP (\ref{LLP}) for $P$-values $12$, $13$ and $14$. For $P=w_1 + w_2 + w_3 + w_4 + w_5 + w_6 = 12$: \begin{itemize} \item[] $[2, 2, 2, 2, 2, 2]^4$, $[2, 4, 2, 0, 4, 0]^2$, $[4, 2, 4, 0, 2, 0]^2$, $[4, 0, 4, 2, 0, 2]^2$, $[4, 2, 2, 1, 2, 1]$, $[2, 2, 4, 1, 2, 1]$, $[4, 0, 0, 2, 4, 2]^2$, $[0, 4, 0, 2, 4, 2]^2$, $[0, 4, 4, 2, 0, 2]^2$ \end{itemize} For $P=w_1 + w_2 + w_3 + w_4 + w_5 + w_6 = 13$: \begin{itemize} \item[] $[5, 4, 2, 0, 2, 0]$, $[2, 3, 2, 2, 2, 2]^2$, $[2, 2, 5, 1, 2, 1]$, $[4, 1, 2, 1, 4, 1]$, $[4, 5, 2, 0, 2, 0]$, $[1, 4, 4, 1, 2, 1]$, $[1, 4, 4, 2, 0, 2]$, $[2, 2, 3, 1, 4, 1]$, $[5, 2, 2, 1, 2, 1]$, $[5, 2, 2, 0, 4, 0]$, $[2, 4, 5, 0, 2, 0]$, $[0, 4, 1, 2, 4, 2]$, $[2, 4, 4, 1, 1, 1]$, $[3, 2, 2, 2, 2, 2]$, $[4, 2, 2, 0, 5, 0]$, $[2, 4, 3, 0, 4, 0]^2$, $[3, 4, 2, 0, 4, 0]^2$, $[4, 2, 4, 0, 3, 0]^2$, $[4, 0, 4, 2, 1, 2]$, $[4, 3, 2, 1, 2, 1]$, $[4, 1, 0, 2, 4, 2]$, $[2, 4, 3, 1, 2, 1]^2$, $[2, 2, 2, 1, 5, 1]$, $[2, 2, 4, 1, 3, 1]$, $[4, 1, 2, 2, 2, 2]$, $[2, 3, 4, 1, 2, 1]^2$, $[3, 4, 2, 1, 2, 1]$, $[2, 2, 5, 0, 4, 0]$, $[2, 5, 4, 0, 2, 0]$, $[2, 2, 2, 2, 3, 2]$ $[4, 0, 1, 2, 4, 2]$, $[3, 2, 2, 1, 4, 1]^2$, $[2, 2, 4, 0, 5, 0]$, $[1, 4, 0, 2, 4, 2]$, $[2, 5, 2, 1, 2, 1]$, $[1, 4, 2, 2, 2, 2]$, $[2, 2, 1, 2, 4, 2]$, $[2, 2, 3, 2, 2, 2]^2$, $[0, 4, 4, 2, 1, 2]$, $[4, 2, 1, 1, 4, 1]$, $[4, 1, 4, 2, 0, 2]$, $[2, 2, 4, 2, 1, 2]$, $[4, 3, 4, 0, 2, 0]^2$, $[4, 2, 2, 1, 3, 1]^2$ \end{itemize} For $P=w_1 + w_2 + w_3 + w_4 + w_5 + w_6 = 14$: \begin{itemize} \item[] $[4, 0, 6, 1, 2, 1]^2$, $[3, 2, 4, 2, 1, 2]$, $[4, 0, 2, 3, 2, 3]^2$, $[0, 4, 4, 2, 2, 2]$, $[0, 4, 2, 1, 6, 1]^2$, $[4, 3, 1, 1, 4, 1]$, $[4, 2, 2, 1, 4, 1]^4$, $[0, 4, 2, 2, 4, 2]$, $[2, 2, 6, 0, 4, 0]$, $[1, 4, 2, 2, 3, 2]$ $[4, 2, 4, 2, 0, 2]$, $[5, 3, 2, 0, 4, 0]$, $[2, 2, 4, 2, 2, 2]^3$, $[3, 3, 2, 1, 4, 1]$, $[6, 2, 0, 1, 4, 1]^2$, $[4, 3, 4, 0, 3, 0]^2$, $[2, 4, 2, 2, 2, 2]^3$, $[2, 6, 4, 0, 2, 0]$, $[4, 4, 4, 0, 2, 0]^2$, $[2, 3, 2, 2, 3, 2]$, $[2, 2, 5, 1, 3, 1]$, $[6, 2, 4, 1, 0, 1]^2$, $[2, 2, 6, 1, 2, 1]$, $[4, 4, 2, 1, 2, 1]^2$, $[2, 2, 2, 3, 2, 3]^2$, $[3, 2, 3, 2, 2, 2]$, $[3, 5, 2, 1, 2, 1]$, $[4, 1, 4, 2, 1, 2]$, $[3, 5, 4, 0, 2, 0]$, $[6, 2, 2, 0, 4, 0]$, $[4, 2, 3, 0, 5, 0]$, $[3, 3, 2, 2, 2, 2]^2$, $[4, 1, 3, 2, 2, 2]$, $[2, 2, 3, 2, 3, 2]^2$, $[1, 4, 4, 1, 3, 1]$, $[6, 2, 4, 0, 2, 0]^2$, $[3, 3, 4, 1, 2, 1]$, $[2, 3, 5, 0, 4, 0]$, $[4, 0, 2, 2, 4, 2]$, $[4, 2, 4, 1, 2, 1]$, $[3, 4, 4, 1, 1, 1]$, $[2, 4, 2, 0, 6, 0]^2$, $[0, 4, 6, 1, 2, 1]^2$, $[2, 2, 3, 1, 5, 1]$, $[1, 4, 1, 2, 4, 2]$, $[4, 6, 2, 0, 2, 0]$, $[2, 4, 6, 0, 2, 0]$, $[2, 4, 4, 0, 4, 0]^2$, $[2, 4, 4, 1, 2, 1]^4$, $[4, 2, 2, 0, 6, 0]$, $[2, 2, 0, 3, 4, 3]^2$, $[2, 2, 4, 3, 0, 3]^2$, $[4, 1, 3, 1, 4, 1]$, $[2, 4, 4, 2, 0, 2]$, $[3, 4, 3, 1, 2, 1]$, $[2, 6, 2, 1, 2, 1]$, $[3, 2, 3, 1, 4, 1]$, $[4, 2, 0, 2, 4, 2]$, $[4, 0, 4, 2, 2, 2]$, $[2, 2, 4, 1, 4, 1]^2$, $[4, 2, 2, 2, 2, 2]^3$, $[2, 3, 1, 2, 4, 2]$, $[0, 4, 2, 3, 2, 3]^2$, $[2, 4, 5, 0, 3, 0]$ $[5, 3, 2, 1, 2, 1]$, $[2, 4, 3, 1, 3, 1]$, $[4, 2, 4, 0, 4, 0]^2$, $[4, 3, 2, 1, 3, 1]$, $[4, 4, 2, 0, 4, 0]^2$, $[2, 2, 2, 1, 6, 1]$, $[2, 3, 4, 1, 3, 1]$, $[6, 4, 2, 0, 2, 0]$, $[5, 4, 2, 0, 3, 0]$, $[4, 0, 2, 1, 6, 1]^2$, $[4, 2, 6, 0, 2, 0]^2$, $[4, 5, 3, 0, 2, 0]$ $[2, 4, 2, 1, 4, 1]$, $[4, 2, 3, 1, 3, 1]$, $[6, 2, 2, 1, 2, 1]$, $[2, 2, 2, 2, 4, 2]^3$, $[2, 6, 2, 0, 4, 0]^2$, $[3, 2, 4, 0, 5, 0]$, $[3, 4, 3, 0, 4, 0]^2$, $[2, 6, 4, 1, 0, 1]^2$, $[2, 2, 4, 0, 6, 0]$, $[2, 6, 0, 1, 4, 1]^2$. \end{itemize} \begin{figure}[htbp] \centering \includegraphics[width=0.80\textwidth]{3d2} \caption{{\small }} \label{fig: 2,2,2,2,2,2} \end{figure} The weight solutions $[2,2,2,2,2,2]$ for $P=12$ is particularly intriguing since it suggests there might be a high level of symmetry. Figure \ref{fig: 2,2,2,2,2,2} is a $3$-dimensional rendering of the one the four associated distance $4$ filling pair. The aesthetic of this rendering is so appealing that it was placed at the begin of \cite{[BMM]}. We thank those authors for its use here. MICC software package, software tutorial, and all known weight solutions yielding filling pairs having distance $\geq 4$ for $P \leq 25$ with $g=2$ (approximately 72,000 weight solutions) are posted for download at micc.github.io.
1,116,691,501,353
arxiv
\section{\label{sec:intro}Introduction} Late--time cosmic acceleration is best described in general relativity (GR) by the inclusion of cold dark matter (CDM) and a cosmological constant ($\Lambda$) which are both embodied in the $\Lambda$CDM concordance model \cite{Peebles:2002gy,Copeland:2006wr}. In this setting, CDM acts as a stabilizer for galactic structures \cite{Baudis:2016qwx,Bertone:2004pz}, while on larger scales the cosmological constant takes on the role of dark energy and is responsible for the late--time accelerated expansion of the Universe \cite{Riess:1998cb,Perlmutter:1998np}. Putting aside internal consistency issues \cite{RevModPhys.61.1}, this description of the Universe is increasingly coming into tension with observations \cite{DiValentino:2020vhf,DiValentino:2020zio,DiValentino:2020vvd,Staicova:2021ajb}, while the prospect of detecting CDM seems to be ever more elusive \cite{Gaitskell:2004gd}. This has led to the consideration of a host of new physical models through which current and future observation can be more precisely described. These new proposals range from a reconsideration of GR as the fundamental theory of gravitation \cite{Clifton:2011jh,CANTATA:2021ktz,Bahamonde:2021gfp,AlvesBatista:2021gzc,Addazi:2021xuf} to new aspects of CDM \cite{Feng:2010gw,Dodelson:1993je} as well as dynamical dark energy models \cite{Copeland:2006wr,Benisty:2021gde,Benisty:2020otr}, among others. For this reason, it is crucial that observational data sets can be used to produce cosmological parameters without a dependence on a cosmological model, such as $\Lambda$CDM. This can become more intricate as more complex parameters are considered such as the growth of large scale structure data. To this end, a whole new range of statistical approaches have been utilized to build cosmological parameter profiles without the need of an underlying physical model. These model-independent techniques are nonparametric in that they do not necessitate a physical model to interpret the data, but they do require assumptions in the statistical interpretation of each parameter data set. Reconstruction techniques that are model independent have since become more robust and better understood in terms of late--time cosmological data sets. The most popular of these methods is Gaussian processes (GP) \cite{RasmussenW06} which has been successfully used to produce the Hubble diagram for various data sets \cite{Busti:2014aoa,Gomez-Valent:2018hwc,Briffa:2020qli} as well as large scale structure growth data \cite{Benisty:2020kdt,LeviSaid:2021yat}. GP assumes every element of a data set is normally distributed and part of a larger stochastic process, by optimizing a covariance function between these points it can reconstruct the entire evolution of the data set for some ranges of the data. The immediate issue here is that not all cosmological data is normally distributed. More concretely there are auxiliary issues with selecting the covariance function, but this can be circumvented with genetic algorithms \cite{Bernardo:2021mfs} to a certain extent. However, GP suffers from a bigger issue with over-fitting for low redshift data \cite{OColgain:2021pyh}, which impacts inferred values of $H_0$ in the Hubble diagram, and $f\sigma_{8_0}$ for growth data. These challenges of GP seem to be generically problematic features that are hard to avoid within this approach. On the other hand, other approaches to reconstruction exist such as the Locally weighted Scatterplot Smoothing together with Simulation and extrapolation method (LOESS-Simex) \cite{Montiel:2014fpa,Escamilla-Rivera:2015odt}. LOESS-Simex provides an independent nonparametric approach through which it generalizes the technique of least-squares in reconstructing data sets. Both GP and LOESS have been compared against each other in Ref.~\cite{Escamilla-Rivera:2021rbe} where their performance in reconstructing data sets was quantified using a number of statistical metrics. Another nonparametric approach that can be used to reconstruct late--time cosmology data is an artificial neural network (ANN). Inspired from biological neural networks, a network is a setup composed of a collection of neurons which are organized into layers \cite{aggarwal2018neural,Wang:2020sxl,Gomez-Vargas:2021zyl}. Each neuron is built on a so-called activation function which governs the neuron output. Thus, every neuron contains a number of hyperparameters (statistical parameters) which must be assigned a value. To do this the network is trained with observational data which optimize the neural responses. This then produces an ANN that can mimic the observational data at every redshift. The core difference between GP and ANNs is that GP entails a supervised form of learning, meaning that the data is used concurrently as the reconstruction takes place, while ANNs are unsupervised, which means that once the neural network is trained it no longer requires the data to reconstruct a parameter at new redshifts since it is imitating the natural process itself. There have been a number of works on using GP to reconstruct the Hubble diagram using a variety of data sets as reported in Refs.~\cite{Qi:2016wwb,Lin:2019cuy,Singirikonda:2020ieg,Bengaly:2020neu,Velasquez-Toribio:2021ufm,Reyes:2021owe,vonMarttens:2018bvz,vonMarttens:2020apn,Andrade:2021njl}. In light of the current disagreement between the inferred values of the Hubble constant $(H_0)$, primarily between the local and early--time determinations of $H_0$, we shall be analysing the impacts of $H_0$ priors on our ANN reconstructions of the cosmic expansion in this work. Several local measurements of $H_0$ have been reported (see, for instance, Refs. \cite{Blakeslee:2021rqi,Kourkchi:2020iyz,Schombert:2020pxm,LIGOScientific:2017adf,LIGOScientific:2019zcs,Mukherjee:2019qmm,DES:2019fny,Birrer:2020tax,Pesce:2020xfe,Khetan:2020hmh,Wong:2019kwg,Huang:2019yhh,Riess:2019cxk,Riess:2020fzl,Freedman:2020dne,Freedman:2019jwv,Denzel:2020zuq}), where one could easily observe that the reported late--time measurements seem to agree on $H_0\gtrsim70\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$, at the very least. We will therefore be considering the most precise Cepheid calibration result of $H_0^\mathrm{R20}=73.2\pm1.3\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ \cite{Riess:2020fzl} (R20) along with $H_0^\mathrm{TRGB}=69.8\pm1.88\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ \cite{Freedman:2020dne,Freedman:2019jwv} which has been recently inferred via the Tip of the Red Giant Branch (TRGB) calibration technique. The discrepancy between early--time and late--time measurements of $H_0$ currently stands at the level of $\sim4-6\sigma$ tension \cite{Verde:2019ivm}, with R20 being the most discordant measurement. It is well--known that the most precise early--time determination of $H_0=67.36\pm0.54\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ \cite{Aghanim:2018eyx} is dependent on the adopted cosmological model, since we are probing the physics of the cosmic microwave background (CMB) with which we can indirectly infer the current expansion rate of the Universe. Several studies tried to address such a puzzling discrepancy in a number of avenues, such as by the introduction of new physics and via artifacts of systematic errors \cite{Efstathiou:2021ocp,Mortsell:2021nzg,Mortsell:2021tcx,Freedman:2021ahq}. For instance, Ref. \cite{Beenakker:2021vff} outlines seven key theoretical assumptions which might be broken to alleviate this tension, while modified cosmological frameworks and exotic physics proposals have been put forward to explain such a discrepancy (see, for instance, Refs. \cite{Nunes:2018xbm,Poulin:2018zxs,Zhou:2021xov,DeFelice:2020sdq,DeFelice:2020cpt,Thiele:2021okz,Dainotti:2021pqg,Banihashemi:2018oxo,Guo:2018ans,Poulin:2018cxd,Kreisch:2019yzn,Vattis:2019efj,Lin:2019qug,DiValentino:2019exe,Vagnozzi:2019ezj,Abadi:2020hbr,DiValentino:2019ffd,Vagnozzi:2021gjh,Ye:2020btb,Akarsu:2019hmw,Gonzalez:2020fdy,Vagnozzi:2021tjv,Blinov:2021mdk,vanPutten:2021hlu,DiValentino:2020vnx,Okamatsu:2021jil,Hill:2021yec,delaMacorra:2021hoh,Freese:2021rjq,Wang:2021kxc,Gurzadyan:2021jrw,Huang:2021dba,Braglia:2020auw,Bernardo:2021qhu}, and Refs. \cite{DiValentino:2021izs,Shah:2021onj} for recent reviews). A considerable number of models tried to alleviate the $H_0$ tension from the point of view of the CMB calibration technique which is derived directly from the measurement of the angular scale subtended by the sound horizon scale $r_s$. One should remark that all the available probes that adopt the value of $r_s$ within the concordance model of cosmology are characterised by relatively low values of $H_0$, and this was therefore the main driving force behind the proposal of theoretical frameworks which modify the physics prior to last scattering. However, Ref. \cite{Jedamzik:2020zmd} showed that the modification of $r_s$ only might not be enough to address the $H_0$ tension completely, and we are therefore still lacking a compelling model addressing the Hubble tension while keeping a good fit to all available data. Moreover, Ref. \cite{Baxter:2020qlr} considered the possibility of measuring $H_0$ from the CMB lensing power spectrum in a way that is independent of $r_s$. Indeed, they reported a slightly higher value of $H_0=73.5\pm5.3\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ which is more consistent with late--time measurements of the Hubble constant, although the inferred errors with current data sets are relatively large which should improve with upcoming cosmological surveys. Another important cosmological parameter that is increasingly coming into tension is the growth of large scale structure where early--time model dependent measurements appear to be in tension with late--time observations \cite{DiValentino:2020vvd}. We consider this through the $f\sigma_8(z)$ parameter which quantifies the growth rate of cosmological perturbations and matter power spectrum overdensities normalized on scales of $8 h^{-1} {\rm Mpc}$ \cite{Kazantzidis:2018rnb}. This tension has been reported at the level of $\sim2 - 3.5\sigma$ with respect to Planck estimates, and even higher in some studies \cite{Heymans:2020gsg,Hildebrandt:2016iqg,Kuijken:2015vca}. For this reason we also consider this cosmological parameter in our work, since it is timely to consider a data driven reconstruction of the currently available $f\sigma_8(z)$ data points, which would provide an alternative avenue to understand this reported tension instead of solely focusing on the proposal of alternative models which would normally introduce extra degrees of freedom (see, for instance, Refs. \cite{Barros:2020bgg,Zumalacarregui:2020cjh,Choi:2020pyy,Heimersheim:2020aoc,Chamings:2019kcl,DiValentino:2019jae,DiValentino:2019ffd,Buen-Abad:2017gxg,Abellan:2021bpx,DES:2020mpv,Abellan:2020pmw,Xiao:2019ccl,Pandey:2019plg,Chudaykin:2017ptd,DiValentino:2017oaw,Enqvist:2015ara,Feng:2017nss,Mccarthy:2017yqf,MacCrann:2014wfa} and references therein). The growth rate of matter density perturbations $f(z)$, is inferred from the peculiar velocities arising from redshift space distortions (RSD) \cite{Kaiser:1987qv} measurements in galaxy redshift surveys which are a velocity--induced mapping from real--space to redshift--space due to the line--of--sight peculiar motions of objects that introduce anisotropies in their clustering patterns. Such an effect depends on the growth of cosmic structure, hence making RSD probes sensitive to the combination of $f\sigma_8(z)$. From the ANN reconstruction results of the cosmic expansion as well as from the cosmic growth history, we will be quantifying the deviations of observational data from the concordance model of cosmology via two formulations of null tests. Rather than a parametric analysis which is carried out via a model--fitting technique of alternative cosmological models, the considered null tests will be based on general consistency relations that the concordance model of cosmology must obey. These consistency relations are also easy to be interpreted since these are constant if the Universe is described by the $\Lambda$CDM model regardless of the parameters of the model. In this way, we can determine how well we will be able to rule out the concordance model without prior assumptions on the underlying cosmological parameters. Such a nonparametric analysis avoids biasing the derived results by fitting specific cosmological models, which makes our null test analysis as model--independent as possible. The motivation for this work is to further advance the prospect of model-independent reconstruction techniques for late--time cosmological data using ANNs. We do this by first providing more information about ANNs in our context in Sec.~\ref{sec:method}. We immediately apply this to expansion data in Sec.~\ref{sec:hubble} where we describe how we train our ANN. We perform a similar analysis for $f\sigma_8$ data in Sec.~\ref{sec:fs8} but with some caveats. Finally, we close with a summary and conclusion in Sec.~\ref{sec:conc}. \section{\label{sec:method}Methodology} \tikzset{% every neuron/.style={ circle, fill=green!70, minimum size=32pt, inner sep=0pt }, mid neuron/.style={ circle, fill=blue!70, minimum size=32pt, inner sep=0pt }, last neuron/.style={ circle, fill=red!70, minimum size=32pt, inner sep=0pt }, neuron missing/.style={ draw=none, fill=none, scale=4, text height=0.333cm, execute at begin node=\color{black}$\vdots$ }, } \begin{figure}[t!] \centering \begin{tikzpicture}[shorten >=1pt,->,draw=black!50, node distance=2.5cm] \tikzstyle{annot} = [text width=5em, text centered] \foreach \m/\l [count=\y] in {1} \node [every neuron/.try, neuron \m/.try] (input-\m) at (0,-1.5*\y) {}; \foreach \m [count=\y] in {1,2,3,missing,4} \node [mid neuron/.try, neuron \m/.try ] (hidden-\m) at (5,2-\y*1.5) {}; \foreach \m [count=\y] in {1,2} \node [last neuron/.try, neuron \m/.try ] (output-\m) at (10,1.25-2*\y) {}; \foreach \name / \y in {1} \path[yshift=-.1cm] node[above] (input+\name) at (0,-1.6\name) {\large$z$}; \foreach \l [count=\i] in {1,2,3,k} \node[below] at (hidden-\i) {\large$\mathfrak{n}_\l$}; \foreach \name / \y in {{\large $\Upsilon(z)$} / 1, {\large$\sigma_\Upsilon^{}(z)$} / 2} \path[yshift=-.1cm] node[above, right of=H-3] (output-\y) at (7.5,1.35-2*\y) {\name}; \foreach \i in {1} \foreach \j in {1,2,3,...,4} \draw [->] (input-\i) -- (hidden-\j); \foreach \i in {1,2,3,...,4} \foreach \j in {1,2} \draw [->] (hidden-\i) -- (output-\j); \foreach \l [count=\x from 0] in {\large Input, \large Hidden, \large Output} \node [align=center, above] at (\x*5,2) {\l \\ \large layer}; \end{tikzpicture} \caption{The general structure of the adopted ANN, where the input is the redshift of a cosmological parameter $\Upsilon(z)$, and the outputs are the corresponding value and error of $\Upsilon(z)$.} \label{fig:ANN_structure} \end{figure} We will now briefly outline the adopted ANN technique \cite{2015arXiv151107289C} which will be used in the reconstruction of the cosmic expansion data in Sec.~\ref{sec:hubble}, as well as the reconstruction of the cosmological growth rate in Sec.~\ref{sec:fs8}. The general structure of an ANN is composed of an input layer that is connected to a hidden layer, or a series of successive hidden layers, and an output layer, where the elements of each layer are known as neurons. The first layer consists of the features of the data set, which will consequently provide the values for the first layer of neurons. On the other hand, the output layer consists of neurons whose values must be evaluated by an error function which measures the difference between the value given by the ANN and the expected one. We should remark that these input, hidden and output layers give structure to the network and provide a direction that input signals must take for the network output. We illustrate such a structure for a one hidden layer ANN with $\mathfrak{n}_k$ neurons in Fig. \ref{fig:ANN_structure}, such that for each redshift point at the input layer, it outputs a generic cosmological parameter $\Upsilon(z)$ and its corresponding uncertainty $\sigma_\Upsilon^{}(z)$. The ANN process involves the application of a linear transformation (composed of linear weights and biases) and a nonlinear activation on the input layer, and then the inferred results are propagated to the succeeding layer, until a linear transformation is applied to the output layer. In this way, an input signal will traverse the entire network in a structured manner. With the use of activation functions, which modify the data they receive before transmitting it to the next layer, an ANN is able to model highly complex relationships between features encapsulated in the data. We will be considering the Exponential Linear Unit (ELU) \cite{2015arXiv151107289C} as the activation function, specified by \begin{equation} f(x) = \begin{cases} {x} & \text{if } x>0 \\ {\alpha(e^x-1)} & \text{if } x \leq 0 \end{cases}\,, \end{equation} where $\alpha$ is a positive hyperparameter that controls the value to which an ELU saturates for negative net inputs, which we set to unity. There is a range of standard activation functions with the ELU being a very popular one. We should remark that an ANN model is characterised by several intrinsic parameters, better known as hyperparameters, such as the number of layers, number of neurons, optimiser algorithm, among others. As already mentioned, in order to optimise the parameters of an ANN, the difference between the predicted result $\hat{\mathcal{Y}}$ and the ground truth $\mathcal{Y}$ is minimised during the training process of the ANN, which is then quantitatively mapped to a specific loss function. The loss function is minimised by an optimisation algorithm such as gradient descent combined with the back--propagation algorithm to calculate gradients. We shall be considering the mean absolute error loss function in the following analyses, better known as the L1 loss function, which minimises the absolute differences between $\hat{\mathcal{Y}}$ and $\mathcal{Y}$. However, we also consider the ANN reconstructions with the mean squared error (MSE) loss function that minimises the squared differences between $\hat{\mathcal{Y}}$ and $\mathcal{Y}$, along with the smooth L1 (SL1) loss function which uses a squared term if the absolute error falls below unity and absolute term otherwise. These loss functions quantify the degree to which the input data is modeled by the output reconstruction. The network parameters are updated by a gradient--based optimiser in each iteration. In our work, we adopt Adam's algorithm \cite{2014arXiv1412.6980K} as our optimiser, since this has also accelerated the convergence. It should be noted that an ANN with at least one hidden layer with a finite number of neurons can approach any continuous function if the activation function is continuous and non--linear \cite{HORNIK1990551}, therefore ANNs are applicable to cosmological data sets as they fulfil the latter requirements. We shall be using the \texttt{PyTorch}\footnote{\url{https://pytorch.org/docs/master/index.html}} based code for reconstructing functions from data called Reconstruct Functions with ANN (\texttt{ReFANN}\footnote{\url{https://github.com/Guo-Jian-Wang/refann}}) \cite{Wang:2019vxv}. Although this code could be used on CPUs, we ran this code on GPUs due to a significant decrease in computational time. We further make use of batch normalisation \cite{2015arXiv150203167I} which is implemented prior to every nonlinear layer, and which also accelerates the convergence due to the stabilisation in the distribution among ANN variables. \section{\label{sec:hubble}Hubble data} We first consider the generation of the $H(z)$ mock data points and the ANN training process in Sec. \ref{sec:hubble_training}, which will be used for structuring the number of layers and neurons of the ANN. The ANN will be used to reconstruct the Hubble diagram and perform a null test of the concordance model of cosmology. \subsection{\label{sec:hubble_training}Simulation and training of Hubble data} \begin{figure} \centering \includegraphics[width=0.485\linewidth]{Hz_mock_dist.pdf} \includegraphics[width=0.485\linewidth]{Hz_error.pdf} \caption{In the left panel we illustrate the distribution of the observational $H(z)$ data points and the assumed redshift distribution function, whereas in the right panel we depict the error of observational $H(z)$ along with their linear regression best-fit.} \label{fig:Hz_data} \end{figure} The adopted network model was optimised by using forty--seven mock $H(z)$ data points (identical to the number of observational $H(z)$ data points introduced in Sec.~\ref{sec:ANN_Hz_Omz}) which have been simulated in the context of the spatially--flat $\Lambda$CDM model in which \begin{equation} H(z)=H_0^\mathrm{mock}\sqrt{\Omega_{m,0}^\mathrm{mock}(1+z)^3+1-\Omega_{m,0}^\mathrm{mock}}\,, \end{equation} where we assumed that $H_0^{\mathrm{mock}}=70\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ and $\Omega_{m,0}^\mathrm{mock}=0.3$. We should remark that our final results are independent from the choice of these fiducial cosmological parameter values since we are simply using this model to structure the network rather than actually train it. Furthermore, the redshift distribution of the observational $H(z)$ was assumed to follow a Gamma distribution, specified by \begin{equation}\label{eq:gamma_dist} p(x;\,\alpha,\,\lambda)=\frac{\lambda^\alpha}{\Gamma(\alpha)}x^{\alpha-1}e^{-\lambda x}\,, \end{equation} where $\alpha$ and $\lambda$ are free parameters that are fitted with the considered observational data, while the gamma function is given by \begin{equation} \Gamma(\alpha)=\int_0^{\infty} e^{-t}t^{\alpha-1}\,\mathrm{d}t\,. \end{equation} This is a well known distribution for these data sets. The distribution of the observational $H(z)$ data points and the fitted redshift distribution function are shown in the left panel of Fig. \ref{fig:Hz_data}. In order to generate a mock data set of $H(z)$, we also need to take into account the uncertainty of the observational $H(z)$ data points. In the right panel of Fig. \ref{fig:Hz_data}, we illustrate the errors of the considered $H(z)$ data set as a function of redshift. As expected, the uncertainties tend to increase with the redshift. Consequently, for the generation of the $H(z)$ mock data set, we will be assuming a linear model \cite{Ma:2010mr,Velasquez-Toribio:2021ufm} for the error of $H(z)$, in which we are going to fit a first degree polynomial function of redshift. The mean fitting function is found to be $\sigma_H^0(z)=14.25+3.42z$, while the symmetric upper and lower error bands are respectively specified by $\sigma_H^+(z)=21.37+10.79z$ and $\sigma_H^-(z)=7.14-3.95z$. These fitting functions are also depicted in the right panel of Fig. \ref{fig:Hz_data}, in which one could easily observe that the majority of the data points are included in the area enclosed by the $\sigma_H^+(z)$ and $\sigma_H^-(z)$ functions (dashed lines). We again emphasize that the purpose of this exercise is to produce a mock data set that somewhat mimics the observed points so that we can structure our ANN appropriately before proceeding with the training. We can now randomly generate the error for our $H(z)$ mock data points, in which we assume that the error $\tilde{\sigma}_H^{}(z)$ follows the Gaussian distribution $\mathcal{N}(\sigma_H^0(z),\,\varepsilon_H^{}(z))$, where $\varepsilon_H^{}(z)=(\sigma_H^+(z)-\sigma_H^-(z))/4$, such that $\tilde{\sigma}_H^{}(z)$ falls in the area with a probability of 95\%. Therefore, every simulated Hubble parameter data point $H_\mathrm{sim}^{}(z_i)$ at redshift $z_i$, is computed via $H_\mathrm{sim}^{}(z_i)=H_\mathrm{fid}^{}(z_i)+\Delta H_i$, with the associated uncertainty of $\tilde{\sigma}_H^{}(z_i)$, where $\Delta H_i$ is determined via $\mathcal{N}(0,\,\tilde{\sigma}_H^{}(z_i))$. \begin{figure} \centering \includegraphics[width=0.485\linewidth]{Hz_CC_BAO_risk.pdf} \includegraphics[width=0.485\linewidth]{Hz_CC_BAO_loss.pdf} \caption{The normalised risk for $H(z)$ ANN models that have one hidden layer and a corresponding number of neurons in their hidden layer is illustrated in the left panel, while the evolution of the L1, SL1 and MSE loss functions is shown in the right panel.} \label{fig:Hz_risk_loss} \end{figure} Consequently, the data that is adopted to train the network is simulated according to the redshift distribution of the $H(z)$ data points with an assumed spatially--flat $\Lambda$CDM model, and we further consider the same number of mock data points as the number of observational data. As depicted in Fig. \ref{fig:ANN_structure}, the input of the neural network is the redshift, while the output is the corresponding Hubble parameter and its respective error at that redshift. In the training process, the parameters of the neural network will be determined via a learning process using the observational data sets. In our case, the entire $H(z)$ mock data points were used to train the network, and therefore we do not consider a validation and a test set of data like in the standard supervised learning techniques. For the determination of the optimal network model, we consider finding the optimal number of hidden layers along with the number of neurons in the hidden layers with a selection of loss functions. The initial learning rate is set to 0.01 which will decrease with the number of iterations (which describes the degree to which successive iterations override neuron hyperparameter values), while the training batch size is set to half of the number of the $H(z)$ data points. The network is trained after $10^5$ iterations, such that the loss function no longer decreases after this number of iterations, which is clearly illustrated in the right panel of Fig. \ref{fig:Hz_risk_loss} where the loss function is close to its asymptote. \begin{figure} \centering \includegraphics[width=0.485\linewidth]{Hz_ANN_layers.pdf} \includegraphics[width=0.485\linewidth]{Hz_ANN_neurons.pdf} \includegraphics[width=0.485\linewidth]{Hz_ANN_data_compare.pdf} \includegraphics[width=0.485\linewidth]{Hz_ANN_H0_priors.pdf} \caption{We depict the L1 $H(z)$ ANN reconstructions with different number of layers and neurons in the top-left and top-right panels, respectively. In the bottom-left panel we depict the $H(z)$ ANN reconstructions adapting the L1, SL1 and MSE loss functions, whereas in the bottom-right panel we illustrate the $H(z)$ ANN reconstructions when considering the L1 loss function without an $H_0$ prior (L1), with the R20 prior (R20) and the TRGB prior (TRGB). In all panels, the corresponding $H(z)$ data points are also included.} \label{fig:Hz_ANN} \end{figure} \begin{figure} \centering \includegraphics[width=0.785\linewidth]{H0_whisker_plot.pdf} \caption{We illustrate the inferred $1\sigma$ constraint on $H_0$ from the $H(z)$ ANN reconstructions as indicated on the vertical axis. The green and red bands illustrate the $1\sigma$ local measurements of $H_0^\mathrm{TRGB}$ and $H_0^\mathrm{R20}$, respectively.} \label{fig:H0_ANN} \end{figure} For the training procedure, we consider the mock $H(z)$ data set, with the number of hidden layers varying from one to three, and eight network models are trained with $2^n$ number of neurons, where $7\leq n\leq 14$. We therefore train a total of twenty-four network models, from which we will determine the optimal network configuration. This set of trained networks can then be used to select the optimal network structure on which to train the real data. Indeed, to select the optimal number of hidden layers of the network we adopt the risk statistic \cite{Wasserman:2001ng} \begin{equation} \mathrm{risk}=\sum_{i=1}^N\mathrm{bias}_i^2+\sum_{i=1}^N\mathrm{variance}_i^{}=\sum_{i=1}^N\left[H(z_i)-\bar{H}(z_i)\right]^2+\sum_{i=1}^N\sigma^2\left(H(z_i)\right)\,, \end{equation} where $N$ is the number of $H(z)$ data points, and $\bar{H}(z)$ denotes the fiducial value of $H(z)$. We then calculate the risk of eight models for each network structure, from which we inferred that the network model with one hidden layer minimises the risk with respect to the two and three hidden layer training networks. We should remark that this was the case for the L1, MSE and SL1 loss functions. The degree of complexity of the ANN should reflect the structure of the physical process which is producing the data. Given that we are using expansion data alone, the lack of complexity in the data seems to infer a simpler one-layer structure to the ANN. This is to be expected since we are only taking $H(z)$ data. The conclusion would naturally be altered if we had coupled this to other cosmological parameters. We now determine the optimal number of neurons with one hidden layer via the consideration of eight network models with a varying number of neurons. We illustrate the normalised risk values $\mathcal{R}$ for each network model in the left panel of Fig.~\ref{fig:Hz_risk_loss}, in which one could clearly observe that 2048 neurons minimise the risk function. Such a result was also found to be independent from the adopted loss function. Consequently, the network structure with one hidden layer and 2048 neurons was found to be the optimal network structure and will therefore be adopted in our $H(z)$ reconstructions. Furthermore, the L1 loss function shall be adopted in our ANN reconstruction of the observational $H(z)$ data set, since this was characterised by the lowest risk statistic with respect to the MSE and SL1 loss function networks. \begin{figure} \centering \includegraphics[width=0.485\linewidth]{Om_z_L1.pdf} \includegraphics[width=0.485\linewidth]{Om_z_R20.pdf} \includegraphics[width=0.485\linewidth]{Om_z_TRGB.pdf} \caption{Redshift evolution of the $\mathcal{O}m(z)$ null test without an $H_0$ prior (top--left), with the R20 prior (top--right) and with the TRGB prior (bottom). The \textit{Planck} constraint in the $\Lambda$CDM and $w$CDM frameworks are also illustrated for comparative purposes.} \label{fig:Om_z} \end{figure} \subsection{\label{sec:ANN_Hz_Omz}ANN \texorpdfstring{$H(z)$}{} reconstructions and \texorpdfstring{$\mathcal{O}m(z)$}{} null test} Further to the discussion in Sec.~\ref{sec:hubble_training}, an ANN with the L1 loss function having one hidden layer and 2048 neurons will be used for the reconstructions of the $H(z)$ observational data. The considered forty--seven $H(z)$ data points in the range of $0.07< z < 2.36$ were adopted from Refs. \cite{Jimenez:2003iv,Simon:2004tf,Stern:2009ep,Moresco:2012jh,Zhang:2012mp,Moresco:2015cya,Moresco:2016mzx,Ratsimbazafy:2017vga} in the case of cosmic chronometers (CC) \cite{Jimenez:2001gg} data, while Refs. \cite{Zhao:2018gvb,Gaztanaga:2008xz,Blake:2012pj,Samushia:2012iq,Xu:2012fw,BOSS:2014hwf,BOSS:2013igd,BOSS:2016wmc} were consulted in the case of $H(z)$ measurements extracted from the detection of radial baryonic acoustic oscillation (BAO) features under a $\Lambda$CDM prior. Henceforth, we will be referring to this joint CC and BAO $H(z)$ data set by CC$+$BAO. Furthermore, we occasionally make use of two independent local measurements of the Hubble constant, specified by $H_0^\mathrm{R20}=73.2\pm1.3\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ \cite{Riess:2020fzl} (R20) in the case of Cepheids distance scale and $H_0^\mathrm{TRGB}=69.8\pm1.88\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ \cite{Freedman:2020dne,Freedman:2019jwv} inferred via the Tip of the Red Giant Branch (TRGB) technique. We use these measurements of $H_0$ to put priors on the Hubble data in the training process. We illustrate the effect of the number of hidden layers on the ANN reconstruction of the observational $H(z)$ data in the top--left panel of Fig. \ref{fig:Hz_ANN}, in which an increase in the number of hidden layers tend to be characterised by some oscillatory features to accommodate the nearest neighbouring points. As depicted in the top--right panel of Fig. \ref{fig:Hz_ANN}, different number of neurons led to a minute difference in the reconstructions, while different loss functions are characterised by different reconstructions of $H(z)$, as shown in the bottom--left panel of Fig. \ref{fig:Hz_ANN}. The inferred Hubble constant constraints with the MSE, L1, and SL1 loss functions were found to be $H_0=69.76\pm14.82\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$, $H_0=68.93\pm11.90\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$, and $H_0=69.18\pm13.92\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$, respectively. Indeed, the variation in the ANN determination of $H_0$ was found to be independent from the adopted loss function, as illustrated in Fig. \ref{fig:H0_ANN}. These characteristics differ from the GP technique in which the extrapolated Hubble parameter to redshift zero is known to be dependent on the chosen kernel function \cite{Briffa:2020qli} and also on the range of hyperparameter values \cite{Sun:2021pbu}. We also analyse the effect of two Hubble constant prior values on our ANN determination of $H_0$ and the redshift evolution of the ANN reconstructed $H(z)$ function. The ANN reconstructions with the inclusion of the R20 and the TRGB priors are illustrated in the bottom--right panel of Fig. \ref{fig:Hz_ANN}, in which we also compare with the $H(z)$ ANN reconstruction without an $H_0$ prior. The derived Hubble constant constraints were found to be $H_0=70.24\pm10.08\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ with the R20 prior, and $H_0=69.47\pm12.37\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$ with the TRGB prior. When compared with the L1 reconstructed value of $H_0=68.93\pm11.90\,\mathrm{km}\,\mathrm{s}^{-1}\mathrm{Mpc}^{-1}$, we could observe that the mean value of the Hubble constant was affected by each prior, although the constraints are in an excellent agreement with one another, as illustrated in Fig. \ref{fig:H0_ANN}. Moreover, the reconstructed $H(z)$ functions are very similar to each other as well. Hence, the ANN reconstructions of $H(z)$ are nearly independent on an $H_0$ prior. This outcome has also been reported in Ref. \cite{Wang:2019vxv}, which distinguishes the ANN method of reconstruction from other machine learning techniques, such as GP in which an $H_0$ prior affects the reconstructed functions \cite{Briffa:2020qli}. We now consider a cosmic expansion diagnostic test of the $\Lambda$CDM model which is formulated in terms of the Hubble parameter function, that simplifies to a constant if the Universe is exactly described by the $\Lambda$CDM model. Deviations from the concordance model of cosmology via the reconstruction of cosmographic functions \cite{Velasquez-Toribio:2021ufm,Reyes:2021owe} and null tests \cite{Zunckel:2008ti,Sahni:2008xx,Shafieloo:2009hi,Clarkson:2007pz,Qi:2016wwb,Qi:2018pej,Bengaly:2020neu} have been exhaustively explored in the literature. The considered cosmic expansion consistency relation is given by \cite{Sahni:2008xx,Shafieloo:2009hi} \begin{equation}\label{eq:Om_z} \mathcal{O}m(z)=\frac{E^2(z)-1}{(1+z)^3-1}\,, \end{equation} where $E^2(z)=H^2(z)/H_0^2$. In the specific case of the $\Lambda$CDM model, this diagnostic function reduces to the matter density parameter $\Omega_{m}^0$ independent of the redshift. Hence, any non--constant evolution of $\mathcal{O}m(z)$ could be an indication of modified gravity or any other dark energy modification of the concordance model of cosmology. For instance, for the most elementary phenomenology of dark energy parametrised via a constant dark energy equation of state parameter $w$ within the framework of the $w$CDM model, a positive slope of the $\mathcal{O}m(z)$ diagnostic function is a characteristic of a phantom equation of state $(w<-1)$, while a negative slope is related to the quintessence dark energy model specified by $w>-1$. \begin{figure} \centering \includegraphics[height=0.329\linewidth]{fs8z_mock_dist.pdf} \includegraphics[height=0.329\linewidth]{fs8z_error.pdf} \caption{In the left panel we illustrate the distribution of the observational $f\sigma_8(z)$ data points and the assumed redshift distribution function, whereas in the right panel we depict the error of observational $f\sigma_8(z)$ along with their linear regression best-fit.} \label{fig:fs8z_data} \end{figure} We have used the inferred ANN $H(z)$ reconstructions to determine nonparametric model--independent reconstructions of the $\mathcal{O}m(z)$ function by using Eq. (\ref{eq:Om_z}). In the panels of Fig. \ref{fig:Om_z} we show the results of the ANN reconstructions of the considered cosmic expansion diagnostic function. In the top--left panel we have not assumed an $H_0$ prior, while in the other panels of Fig. \ref{fig:Om_z} we adopted the R20 and TRGB priors. One could easily observe that when we incorporate an $H_0$ prior, we get an improvement in the derived evolution of the ANN reconstruction, particularly at higher redshifts. This occurs due to the extra point at low redshift which has small uncertainties, which then propagate to the high redshift region. At low redshifts $(z\lesssim0.3)$, the ANN reconstruction is not well constrained, although at higher redshifts the profile of the ANN $\mathcal{O}m(z)$ reconstruction tends to be characterised by a negative slope, although an improvement in the data is required in order to distinguish between a preference to either phantom or non--phantom dark energy models (see, for instance, Refs. \cite{Qi:2016wwb,Qi:2018pej,Bengaly:2020neu} for similar conclusions). In Fig. \ref{fig:Om_z} we also illustrate the redshift evolution of the cosmic expansion null test within the $\Lambda$CDM and $w$CDM models when adopting the current \textit{Planck} constraints \cite{Aghanim:2018eyx}. All ANN reconstructions are in good agreement with the $\Lambda$CDM and $w$CDM models, although at $z\gtrsim2$ the ANN reconstructions deviate slightly from these models when adopting the TRGB or R20 $H_0$ priors. \section{\label{sec:fs8}Cosmic growth data} We now focus on the cosmological growth evolution via observational $f\sigma_8^{}(z)$ data. We would be following the outlined ANN training procedure of Sec.~\ref{sec:hubble_training}, in which we will be generating $f\sigma_8^{}(z)$ mock data points in Sec.~\ref{sec:fs8_training}, that we will then use for the ANN reconstruction of observational $f\sigma_8^{}(z)$ data. We will further consider a diagnostic test of the concordance model of cosmology at the end of Sec.~\ref{sec:ANN_fs8z_Omz}. \subsection{\label{sec:fs8_training}Simulation and training of \texorpdfstring{$f\sigma_8^{}(z)$}{} data} \begin{figure} \centering \includegraphics[width=0.485\linewidth]{fs8_risk.pdf} \includegraphics[width=0.485\linewidth]{fs8_loss.pdf} \caption{The normalised risk for $f\sigma_8(z)$ ANN models that have one hidden layer and a corresponding number of neurons in their hidden layer is illustrated in the left panel, while the evolution of the L1, SL1 and MSE loss functions is shown in the right panel.} \label{fig:fs8_risk_loss} \end{figure} The adopted network model was optimised by using sixty--three mock $f\sigma_8(z)$ data points, which is identical to the number of observational data points as summarised in Ref. \cite{Kazantzidis:2018rnb} that were originally reported in Refs. \cite{Blake:2012pj,Beutler:2012px,delaTorre:2013rpa,Chuang:2012qt,WMAP:2010qai,Blake:2013nif,Sanchez:2013tga,BOSS:2013rlg,Howlett:2014opa,Feix:2015dla,Okumura:2015lvp,WMAP:2012nax,BOSS:2016psr,Gil-Marin:2016wya,Huterer:2016uyq,Pezzotta:2016gbo,Howlett:2017asq,Mohammad:2017lzz,Shi:2017qpr,Zhao:2018gvb,Gil-Marin:2018cgo,BOSS:2016wmc,Wang:2017wia,Hou:2018yny,Feix:2016qhh}. For the generated $f\sigma_8(z)$ data points, the spatially--flat $\Lambda$CDM model will be adopted, in which the evolution of the linear growth rate $f(a)$, with respect to the scale factor $a=(1+z)^{-1}$, is governed by the equation \begin{equation} \frac{\mathrm{d}f(a)}{\mathrm{d}\ln a}+f^2(a)+\left(2+\frac{1}{2}\frac{\mathrm{d}\ln H^2(a)}{\mathrm{d}\ln a}\right)f(a)-\frac{3}{2}\Omega_m(a)=0\,, \end{equation} where $\Omega_m(a)=\Omega_{m,0}a^{-3}H_0^2/H^2(a)$, and $f(a)\equiv\mathrm{d}\ln\delta_m(a)/\mathrm{d}\ln a$ is the logarithmic derivative of the growth of matter perturbations $\delta_m\equiv\delta\rho_m/\rho_m$. We should remark that this equation is applicable on sub--horizon scales and in the linear regime. We will now consider the evolution of the matter density contrast $\delta_m(a)$, which is governed by \begin{equation} \delta_m^{\prime\prime}(a)+\left(\frac{3}{a}+\frac{H^\prime(a)}{H(a)}\right)\delta_m^\prime(a)-\frac{3}{2}\frac{\Omega_m(a)}{a^2}\delta_m(a)=0\,, \end{equation} where the prime denotes a derivative with respect to the scale factor. Consequently, the evolution of $\delta_m(a)$ could be specified in terms of the Gaussian hyper--geometric function ${}_{2}F_{1}(a,\,b;\,c;\,d)$ \cite{abramowitz1988handbook}, given by \begin{equation} \delta_m(a)=a\,{}_{2}F_{1}\left[\frac{1}{3},1;\frac{11}{6};a^3\left(1-\frac{1}{\Omega_{m,0}}\right)\right]\,. \end{equation} Since the RSD data is expressed in terms of $f\sigma_8(z)=f(z)\sigma_8(z)$, we shall now consider this quantity as follows \begin{equation} f\sigma_8^{}(a)=a\frac{\delta_m^\prime(a)}{\delta_m(a_0)}\sigma_{8,0}^{}\,, \end{equation} such that the present day linear theory amplitude of matter fluctuations averaged in spheres of radius $8\,h^{-1}\mathrm{Mpc}$ is specified by \cite{Arjona:2020kco} \begin{equation} \sigma_{8,0}^{}=\int_0^1\frac{f\sigma_8(x)}{x}\mathrm{d}x\,. \end{equation} For our mock data set, we considered $\sigma_{8,0}^{\mathrm{mock}}=0.8$ and $\Omega_{m,0}^\mathrm{mock}=0.3$, which are in agreement with the current observational constraints \cite{Aghanim:2018eyx}. It should be emphasised that our final results were found to be independent from the choice of these fiducial cosmological parameter values. Similar to the case of cosmic expansion data, we shall be assuming a Gamma distribution, as specified in Eq. (\ref{eq:gamma_dist}), for the redshift distribution of the observational $f\sigma_8(z)$ data points, which we also illustrate in the left panel of Fig. \ref{fig:fs8z_data}. \begin{figure} \centering \includegraphics[width=0.485\linewidth]{fs8_z_ANN_layers.pdf} \includegraphics[width=0.485\linewidth]{fs8_z_ANN_neurons.pdf} \includegraphics[width=0.4865\linewidth]{fs8_z_ANN_data.pdf} \includegraphics[height=0.3555\linewidth]{fs8_whisker_plot.pdf} \caption{We depict the L1 $f\sigma_8(z)$ ANN reconstructions with different number of layers and neurons in the top-left and top-right panels, respectively. In the bottom-left panel we depict the $f\sigma_8(z)$ ANN reconstructions adapting the L1, SL1 and MSE loss functions. In the latter panels, the corresponding $f\sigma_8(z)$ data points are also included. In the bottom-right panel we illustrate the inferred $1\sigma$ constraint on $f\sigma_{8,0}^{}$ from the $f\sigma_8(z)$ ANN reconstructions as indicated on the vertical axis.} \label{fig:fs8_ANN} \end{figure} We now generate a mock data set of $f\sigma_8(z)$, for which we also need to take into account the uncertainty of the observational $f\sigma_8(z)$ data points. In the right panel of Fig. \ref{fig:fs8z_data}, we illustrate the errors of the considered $f\sigma_8(z)$ data set as a function of redshift. Similar to the $H(z)$ data, the uncertainties of the $f\sigma_8(z)$ data set tend to increase with the redshift. We will therefore be assuming a first degree polynomial function of redshift \cite{Ma:2010mr,Velasquez-Toribio:2021ufm} for the error of $f\sigma_8(z)$. The mean fitting function is found to be $\sigma_{f\sigma_8}^0(z)=0.07+0.02z$, while the symmetric upper and lower error bands are respectively specified by $\sigma_{f\sigma_8}^+(z)=0.09+0.05z$ and $\sigma_{f\sigma_8}^-(z)=0.05-0.01z$. We further depict these fitting functions in the right panel of Fig. \ref{fig:fs8z_data}, in which one could easily observe that the majority of the data points are included in the area enclosed by the $\sigma_{f\sigma_8}^+(z)$ and $\sigma_{f\sigma_8}^-(z)$ functions (dashed lines). We shall now proceed with the random generation of the errors for our $f\sigma_8(z)$ mock data points, in which we assume that the error $\tilde{\sigma}_{f\sigma_8}^{}(z)$ follows the Gaussian distribution $\mathcal{N}(\sigma_{f\sigma_8}^0(z),\,\varepsilon_{f\sigma_8}^{}(z))$, where $\varepsilon_{f\sigma_8}^{}(z)=(\sigma_{f\sigma_8}^+(z)-\sigma_{f\sigma_8}^-(z))/4$, such that $\tilde{\sigma}_{f\sigma_8}^{}(z)$ falls in the area with a probability of 95\%. Therefore, every simulated growth rate data point ${f\sigma_8}_\mathrm{sim}^{}(z_i)$ at redshift $z_i$, is computed via ${f\sigma_8}_\mathrm{sim}^{}(z_i)={f\sigma_8}_\mathrm{fid}^{}(z_i)+\Delta {f\sigma_8}_i$, with the associated uncertainty of $\tilde{\sigma}_{f\sigma_8}^{}(z_i)$, where $\Delta {f\sigma_8}_i$ is determined via $\mathcal{N}(0,\,\tilde{\sigma}_{f\sigma_8}^{}(z_i))$. Similar to the cosmic expansion ANN analysis, the entire $f\sigma_8(z)$ mock data points were used to train the network. The set--up for the determination of the optimal ANN structure follows from the adopted methodology for the cosmic expansion data. Indeed, the initial learning rate is also set to 0.01 which will decrease with the number of iterations, while the training batch size is set to half of the number of the observational data points. Also, the network is trained after $10^5$ iterations, such that the loss function no longer decreases after this number of iterations, which is clearly illustrated in the right panel of Fig. \ref{fig:fs8_risk_loss}. From the risk parameter, we determined that the network model with one hidden layer is the best performing neural structure. Furthermore, from the normalised risk values $\mathcal{R}$ as depicted in the left panel of Fig.~\ref{fig:fs8_risk_loss}, one could clearly notice that 4096 neurons minimise the risk function, and therefore the latter number of neurons with one hidden layer shall be adopted in our analyses. Furthermore, we shall be using the L1 loss function, since this was characterised by the lowest risk statistic with respect to the MSE and SL1 loss function networks. \subsection{\label{sec:ANN_fs8z_Omz}ANN \texorpdfstring{$f\sigma_8^{}(z)$}{} reconstructions and \texorpdfstring{$\mathcal{O}m_{f\sigma_8^{}}^{}(z)$}{} null test} We now shift to the ANN reconstruction of observational $f\sigma_8(z)$ data with the optimal ANN structure of one hidden layer and 4096 neurons. We illustrate the ANN $f\sigma_8(z)$ reconstructions in the top and bottom-left panels of Fig. \ref{fig:fs8_ANN} along with the observational data points. Similar to the ANN reconstruction of the cosmic expansion data set, we notice that when one considers more layers, this leads to more complex features in the reconstruction which will adversely impact the performance of the reconstruction, as shown in the top--left panel of Fig. \ref{fig:fs8_ANN}. In the top--right panel of the latter figure, one could notice that the number of neurons affects the ANN reconstruction, which was not observed in the $H(z)$ data set. Furthermore, the loss functions also have a mild impact on the ANN reconstruction of the cosmic growth data set, as depicted in the bottom-left panel. We further compare the inferred $1\sigma$ constraint on the value of $f\sigma_{8,0}\equiv f\sigma_8(z=0)$ in the bottom-right panel of Fig. \ref{fig:fs8_ANN}, in which one could observe that the SL1 ANN reconstruction tends to be characterised by a larger uncertainty. One could also notice that the reported GP reconstructions \cite{Pinho:2018unz,Li:2019nux,LeviSaid:2021yat} are in agreement with the derived ANN reconstructions. For comparison purposes, we also reconstruct the $\delta^\prime(z)/\delta_0$ function as illustrated in the top--left panel of Fig. \ref{fig:delta_fs8}, in which this function is also in agreement with the GP reported function \cite{LeviSaid:2021yat}, although slightly more conservative with larger error bars. \begin{figure} \centering \includegraphics[height=0.32\linewidth]{delta_prime_on_delta0.pdf} \includegraphics[height=0.32\linewidth]{Delta_m_final.pdf} \includegraphics[width=0.585\linewidth]{Om_fs8_null_test_final_smoothed.pdf} \caption{ANN reconstructions of $\delta^\prime(z)/\delta_0$ (top--left) and $\Delta_m(z)$ (top--right) with the L1 loss function. In the bottom panel we illustrate the L1 ANN reconstruction of the $\mathcal{O}m_{f\sigma_8}(z)$ null test, along with the $\Lambda$CDM and $w$CDM predictions when adopting the \textit{Planck} parameter constraints.} \label{fig:delta_fs8} \end{figure} For the growth rate null test, we shall first consider the quantity $\Delta_m(a)$, specified by \cite{Arjona:2021mzf} \begin{equation}\label{eq:Delta_fs8} \Delta_m(a)=\frac{\delta_m(a)}{\delta_m(a_0)}=\frac{a\,{}_{2}F_{1}\left[\frac{1}{3},1;\frac{11}{6};a^3\left(1-\frac{1}{\Omega_{m,0}}\right)\right]}{{}_{2}F_{1}\left[\frac{1}{3},1;\frac{11}{6};\left(1-\frac{1}{\Omega_{m,0}}\right)\right]}\,, \end{equation} where we adopted the conventional normalisation of $a_0=1$. The ANN reconstruction of this function is illustrated in the top--right panel of Fig. \ref{fig:delta_fs8}. From this equation we need to express the matter density parameter $\Omega_{m}(z)$ as a function of $f\sigma_8(z)$ and the redshift, i.e. $\mathcal{O}m_{f\sigma_8^{}}^{}(z)$. We shall therefore consider a series expansion on Eq. (\ref{eq:Delta_fs8}) around $\Omega_{m,0}^{-1}=1$ up to the first fifteen terms, up to which the accuracy of this series expansion with respect to the analytical expression was found to be at the sub--percent level. We then apply the Lagrange inversion theorem to invert the series expansion and write the inverse matter density $\Omega_m^{-1}(z)$ as a function of $\Delta_m(a)$, i.e. $\Omega_m^{-1}(z)\equiv\mathcal{O}m_{f\sigma_8^{}}^{-1}(z,\,\Delta_m)$, leading to the $\mathcal{O}m_{f\sigma_8^{}}^{}(z)$ null test which is specified by $\mathcal{O}m_{f\sigma_8^{}}^{}(z)=\frac{1}{\mathcal{O}m_{f\sigma_8^{}}^{-1}(z,\,\Delta_m)}$. With this theoretical framework of the cosmic growth null test, from the ANN reconstruction of $f\sigma_8(z)$, we reconstruct the function of $\Delta_m(z)$ via Eq. (\ref{eq:Delta_fs8}) which will then be used to reconstruct $\mathcal{O}m_{f\sigma_8^{}}^{}(z)$. This is considered as a null test of the concordance model of cosmology, since in $\Lambda$CDM $\mathcal{O}m_{f\sigma_8^{}}^{}(z)=\Omega_{m,0}$, hence any deviations from this equality would lead to a data driven departure from the $\Lambda$CDM model. The ANN reconstructed cosmic growth null test is illustrated in Fig. \ref{fig:delta_fs8}, along with the current limits on $\mathcal{O}m_{f\sigma_8^{}}^{}(z)$ within the $\Lambda$CDM and $w$CDM models when adopting the \textit{Planck} model parameter constraints. In case of $w$CDM, the following analytical evolution of the matter density contrast was adopted for the illustration in Fig. \ref{fig:delta_fs8} \cite{Buenobelloso:2011sja} \begin{equation} \delta_m^{w\mathrm{CDM}}(a)=a\,{}_{2}F_{1}\left[-\frac{1}{3w},\frac{1}{2}-\frac{1}{2w};1-\frac{5}{6w};a^{-3w}\left(1-\frac{1}{\Omega_{m,0}}\right)\right]\,, \end{equation} with $w=-1.028\pm0.032$ \cite{Aghanim:2018eyx}. From the ANN reconstruction of $\mathcal{O}m_{f\sigma_8^{}}^{}(z)$, it is clear that the \textit{Planck} constraints on $\Lambda$CDM and $w$CDM models are much tighter for all the considered redshift range. Moreover, one could also observe that the ANN reconstructed cosmic growth null test is not found to be in good agreement with the $\Lambda$CDM and $w$CDM predictions, particularly between $0.25\lesssim z\lesssim 1.9$ in which the discrepancy is between $2\sigma$ and $\sim3.5\sigma$. Such a discrepancy supports the already reported tensions \cite{Joudaki:2019pmv,Asgari:2019fkq,Benisty:2020kdt,DiValentino:2020vvd} between the early--time and late--time cosmological data sets probing the large scale structure, although in our case the reported departure from the concordance model of cosmology is data driven and model--independent. \section{\label{sec:conc}Conclusions} The arena of nonparametric reconstruction techniques has drastically grown in recent years due to the increasingly polarizing features of the emergence of cosmological tensions in cosmological data sets. In tandem, this has also been coupled to the introduction of a number of important null tests on concordance cosmology. By and large, the central approach to these reconstruction approaches has relied on GP to various extends which suffers from several critical deficiencies which can be principally summarized by the kernel selection problem, in which different kernels must be surveyed for consistency among each other, and the overfitting of Gaussian uncertainties particularly at low redshifts, which are a focal point of interest in this context. In this work, we have explored the possibility of using trained ANNs to reconstruct late--time cosmological data where we similarly assume Gaussian uncertainties for comparisons' sake. We did this both for cosmic expansion data and for the growth of large scale structure data. ANNs utilize an unsupervised process for learning meaning that the structure of the ANN is of crucial importance in accurately mimicking input data. For this reason, in both cases, we first worked on designing the ANN structure in Secs.~\ref{sec:hubble_training} and \ref{sec:fs8_training} where the lack of complexity in the data led to a preference for a one layer system with similar numbers of neurons in the network. As discussed in these sections, the mock data was produced in a very simplified manner, a choice which is consistent with the eventual reconstructions that follow. Other approaches led to near identical results in this regard. The reconstructed $H(z)$ and $f\sigma_8(z)$ profiles are respectively shown in Figs.~\ref{fig:Hz_ANN} and \ref{fig:fs8_ANN} where the one layer systems clearly approximate the data much better than the other options which exhibit more oscillatory behavior. The reconstructions are also largely stable for increasing numbers of neurons in this respect. On this point, the reconstructions both appear consistent across the various loss functions which again points to the resilience of these reconstructed profiles. $H(z)$ is slightly different to $f\sigma_8(z)$ in that there are drastically different priors that have appeared in the literature in recent years while $f\sigma_8(z)$ continues to tolerate a slightly lower tension between literature values. For this reason, we also show how priors on $H_0$ impact the reconstruction of this expansion parameter in Fig.~\ref{fig:Hz_ANN}. Surprisingly, it turns out that the $H(z)$ reconstructions are largely independent of any choice of prior. This means that the ANN gives a more equal weighting to the distributed observation points rather than the initial or low redshift elements. Beyond the reconstructions of the expansion and growth profiles, we also perform null tests to examine the consistency of the reconstructions against an $\mathcal{O}m(z)$ diagnostic. This diagnostic can help reveal any preference for deviations away from the $\Lambda$CDM concordance model. In Fig.~\ref{fig:Om_z} the diagnostic for the reconstructed $H(z)$ data gives large uncertainties for low values of the redshift which then diminish for larger redshifts. This occurs due to a divergence at $z=0$ and is independent of the choice of prior, as shown in the separate plots. In Fig.~\ref{fig:delta_fs8}, we show this diagnostic for the $f\sigma_8(z)$ reconstructions. We similarly show the region plots for both $\Lambda$CDM and $w$CDM using Planck parameter values. Here, a sizable difference arises between the reconstructed data and its model analogues which may point to the need for the consideration of other null tests. The ANN approach is drastically different to the GP method where the over-fitting and kernel selection issues have been largely suppressed or eliminated. The source of these properties is in the fact that ANNs have a much larger number of hyperparameters which are optimized. This means that the resulting trained ANN is a much better imitation of the natural process producing the observations as compared with GP. Another important observation of the ANN technique is that its $H(z)$ reconstruction is largely independent of literature priors unlike its GP counterpart which is significantly altered for different priors. One expects these foundational differences to emerge due to the fact that the ANN structure assumes far less than GP in this context, thus giving a more authentic profile for these cosmological parameters. It would be interesting to probe other data sets and forecast observations for surveys expected to report results over the next few years. It may also be intriguing to explore other null tests of the concordance model which may further expose the impact of reported cosmological tensions. However, ultimately it would be one would want to couple these different cosmological parameters in some model independent way through a process involving an ANN structure. Alternatively, one may also want to use these ANN reconstructions to constrain possible cosmological models such as modified gravity and dark matter models. \begin{acknowledgments} The authors would like to acknowledge networking support by the COST Action CA18108 and funding support from Cosmology@MALTA which is supported by the University of Malta. This research has been carried out using computational facilities procured through the European Regional Development Fund, Project No. ERDF-080 ``A supercomputing laboratory for the University of Malta''. The authors would also like to acknowledge funding from ``The Malta Council for Science and Technology'' in project IPAS-2020-007. KFD acknowledges support by the Hellenic Foundation for Research and Innovation (H.F.R.I.) under the “First Call for H.F.R.I. Research Projects to support Faculty members and Researchers and the procurement of high-cost research equipment grant” (Project Number: 2251). \end{acknowledgments} \bibliographystyle{JHEP}
1,116,691,501,354
arxiv
\section{Introduction} The formula for the absolute entropy of a monoatomic ideal gas is named after Otto Sackur and Hugo Tetrode who independently derived it in~1912~\cite{sackur2,tetrode,sackur}. In classical thermodynamics the entropy of a monoatomic ideal gas is \begin{equation}\label{Skl} S(E,V,N) = kN \left( \frac{3}{2} \ln \frac{E}{N} + \ln \frac{V}{N} + s_0 \right), \end{equation} where $E$, $V$ and $N$ are the kinetic energy, the volume and the number of atoms, respectively. In classical physics the constant $s_0$ is undetermined. The achievement of Sackur and Tetrode was to compute $s_0$. At first sight this does not look very exciting, however, in order to compute $s_0$ they had to work out the size of ``elementary cells or domains'' in phase space. Only with this knowledge it is possible to count the number of states in classical phase space which is a prerequisite for the computation of Boltzmann's absolute entropy given by~\cite{boltzmann,planck3} \begin{equation}\label{SW} S = k \ln W. \end{equation} In this formula, $W$ is the number of possibilities to realize a system compatible with some given boundary conditions. Sackur and Tetrode determined the volume of phase space cells as $h^n$ where $h$ is Planck's constant and $n$ is the number of degrees of freedom. Until then, $h$ was primarily associated with harmonic oscillators and photons. With the work of Sackur and Tetrode it became clear that Planck's constant was not only relevant for counting the number of states in the case of photons but also in the case of massive particles. In this way, $h$ became ubiquitous in statistical physics, more than ten years before the advent of quantum mechanics. This was an amazing result because a priori Planck's constant in the expression $h \nu$ for the energy of a photon has nothing to do with the phase-space volume associated with massive particles. This connection was clarified only later by quantum mechanics. We want to stress that the elegance of the work of Sackur and Tetrode derives from the combination of theoretical considerations and usage of experimental data with which they were able to lend credibility to their result. They did so by successfully applying their equation to the then available data on mercury, whose vapor is monoatomic and behaves in good approximation as an ideal gas, Below we list the articles of Sackur and Tetrode and the achievements therein, written in the course of the development of their equation. The titles are literal translations from the German titles. \begin{enumerate} \renewcommand{\theenumi}{\roman{enumi}} \item O.~Sackur, \textit{The application of the kinetic theory of gases to chemical problems}~\cite{sackur1} (received October 6, 1911): In this paper Sackur develops the formula for the entropy~$S$ of a monoatomic ideal gas as a function of the size of the elementary cell. \item O.~Sackur, \textit{The meaning of the elementary quantum of action for gas theory and the computation of the chemical constant}~\cite{sackur2} (no ``received date'', must have been written in spring 1912): Here Sackur postulates that the size of the elementary cell is $h^n$ and obtains the absolute entropy $S$ of a monoatomic ideal gas. Using $S$, he computes the vapor pressure over a solid and makes a comparison with data on neon and argon. The numerical results, are, however, not completely satisfying. \item H.~Tetrode, \textit{The chemical constant and the elementary quantum of action}~\cite{tetrode} (received March 18, 1912): Tetrode gives an illuminating derivation of $S$, assuming that the size of the elementary cell is $(zh)^n$. He fits the parameter $z$ by using data on the vapor pressure of liquid mercury. Due to some numerical mistakes he obtains $z \approx 0.07$.\footnote{Actually, from Tetrode's equations~(12) and~(13) we would rather deduce $z \approx 0.02$.} \item H.~Tetrode, erratum to \textit{The chemical constant and the elementary quantum of action}~\cite{tetrode} (received July 17, 1912): Tetrode corrects the numerics and obtains now $z \sim 1$. He acknowledges the papers~\cite{sackur2,sackur1} of Sackur by noting that the formula for $S$ has been developed by both of them at the same time. More precisely, he refers to a formula for the so-called ``chemical constant'' pioneered by Nernst~\cite{nernst}, which we will define later. \item O.~Sackur, \textit{The universal meaning of the so-called elementary quantum of action}~\cite{sackur} (received October 19, 1912): He obtains good agreement ($\pm 30\%$) with the data on the vapor pressure of mercury and comments on the paper by Tetrode. \end{enumerate} The paper is organized as follows. In section~\ref{derivation} we describe the different approaches of Sackur and Tetrode to derive their equation and add some comments. Since historically the corro\-boration of the Sackur--Tetrode equation by using data on the vapor pressure of (liquid) mercury was crucial, we give a detailed account of it in section~\ref{vapor pressure}. Moreover, we redo the numerics by using modern mercury data in section~\ref{fit} and obtain a reasonably good value of Planck's constant. In section~\ref{conclusions} our conclusions are presented. A derivation of Kirchhoff's equation, which is used in the numerical computation, is found in the appendix. \section{The Sackur--Tetrode equation} \label{derivation} \subsection{Tetrode's derivation} The starting point of Tetrode's reasoning is the entropy formula~(\ref{SW}) which should, according to Nernst's heat theorem~\cite{nernst}, give the correct value of the entropy without any additive constant. Then he considers a system with $n$ degrees of freedom and phase space coordinates $q_1, \ldots, p_n$, for which he connects $W$ with the number of configurations of phase space points. In order to have a finite entropy, it is necessary to discretize phase space, which Tetrode does by introducing ``elementary domains'' of volume \begin{equation} \delta q_1\, \delta p_1 \cdots \delta q_n\, \delta p_n = \sigma = (zh)^n, \end{equation} where $h$ is Planck's constant and $z$ is a dimensionless number. Then he argues that, in a system of $\nu$ identical particles, configurations which are related only by exchange of particles should not be counted as different. Therefore, denoting by $W'$ the number of configurations in phase space, the entropy for such a system is \begin{equation} S = k \ln \frac{W'}{\nu !}. \end{equation} This is to avoid the Gibbs paradox and to obtain $S$ as an extensive quantity, though Tetrode does not mention Gibbs in this context. Moving on to the monoatomic gas consisting of $\nu \equiv N$ atoms with mass $m$ and spatial volume $V$, the number of degrees of freedom is $n = 3N$ and, for a given maximal energy $E$ of the gas, the volume occupied in phase space is computed by \begin{equation} \mathcal{V}(E,V,N) = \int \mathrm{d}^3 x_1 \int \mathrm{d}^3 p_1 \cdots \int \mathrm{d}^3 x_N \int \mathrm{d}^3 p_N \quad \mbox{with} \quad \frac{1}{2m} \left( {\vec p_1}^{\,2} + \cdots + {\vec p_N}^{\,2} \right) \leq E. \end{equation} Utilizing the gamma function, this phase space volume is expressed as \begin{equation} \mathcal{V}(E,V,N) = \frac{(2\pi mE)^{\frac{3N}{2}}\, V^N}% {\Gamma\left(\frac{3N}{2} + 1 \right)}. \end{equation} According to the arguments above, the entropy is then given by \begin{equation}\label{S1} S = k \ln \frac{\mathcal{V}(E,V,N)}{(zh)^{3N} N!}. \end{equation} In the last step Stirling's formula is used, to wit the approximations \begin{equation} \ln N! \simeq N ( \ln N - 1 ) \quad \mbox{and} \quad \ln \Gamma\left( \frac{3N}{2} + 1 \right) \simeq \frac{3N}{2} \left( \ln \frac{3N}{2} - 1 \right) \end{equation} for large $N$. This leads to Tetrode's final result \begin{equation}\label{s-tetrode} S(E,V,N) = kN \left( \frac{3}{2} \ln \frac{E}{N} + \ln \frac{V}{N} + \frac{3}{2} \ln \frac{4\pi m}{3 (zh)^2} + \frac{5}{2} \right) \end{equation} for the entropy of a monoatomic ideal gas. This derivation is of an amazing lucidity. No wonder that 100 years later it is one of the standard methods in modern textbooks. The only amendment to Tetrode's derivation comes from quantum mechanics which fixes the size of the elementary domain to $h^n$, i.e. requires $z=1$; the latter result was obtained by Tetrode through a fit to the data of the vapor pressure of mercury. From equation~(\ref{s-tetrode}) with $z=1$ we infer that the constant $s_0$ of equation~(\ref{Skl}) is given by \begin{equation} s_0 = \frac{3}{2}\, \ln \frac{4\pi m}{3h^2} + \frac{5}{2}. \end{equation} \subsection{Sackur's derivation} It is much harder to follow Sackur's line of thoughts. Here we sketch the derivation of the entropy formula in~\cite{sackur}, because there he gives the most detailed account of his derivation. In this paper he first derives Planck's law of radiation by considering a system of radiators, before he moves on to the ideal monoatomic gas. In both cases Sackur defines a time interval $\tau$ in which the system is monitored and an energy interval $\Delta \varepsilon$ for the discretization of energy. For the gas the time $\tau$ is assumed to be so small that during this time collisions between atoms can be neglected. Therefore, during the time interval of length $\tau$, each of the kinetic energies associated with the three directions in space, $\varepsilon_x$, $\varepsilon_y$, $\varepsilon_z$, of every atom can be assumed each to lie in a well-defined energy interval of length $\Delta \varepsilon$. In other words, Sackur imagines a three-dimensional energy space with $x$, $y$ and $z$-axis referring to the kinetic energies $\varepsilon_x$, $\varepsilon_y$ and $\varepsilon_z$, respectively, and with energy unit $\Delta \varepsilon$ on every axis. In this way, the energy space is divided into cubes of volume $(\Delta \varepsilon)^3$ and the kinetic energy of every particle lies, during the time interval $\tau$, in a well-defined cube. If the $i$-th energy cube is given by $n_k \Delta\varepsilon \leq \varepsilon_k < (n_k + 1) \Delta\varepsilon$ ($k=x,y,z$) with integers $n_k$, the energy $\varepsilon_i$ associated with this cube can, for instance, be defined as \begin{equation} \varepsilon_i = (n_x + n_y + n_z) \Delta\varepsilon. \end{equation} Sackur considers further the probability $w$ of of observing, during the time interval $\tau$, atoms with kinetic energy $\varepsilon_k$ ($k=x,y,z$) lying in a specific energy interval associated with the $k$-axis; he argues that $w$ will be proportional to the product $\tau \Delta \varepsilon$, because the smaller $\tau$ and $\Delta \varepsilon$ are, the smaller $w$ will be. Hence, since there are three directions in space, the number of atoms in the $i$-th energy cube, $N_i$, will be proportional to $(\tau \Delta \varepsilon)^3$. In this way, Sackur justifies the Ansatz \begin{equation}\label{ansatz} N_i = N f(\varepsilon_i) \left( \tau \Delta \varepsilon \right)^3, \end{equation} where $N$ is the total number of atoms in the volume $V$. He goes on by distributing the $N$ atoms into $r$ energy cubes, in exactly the same way as in the case of harmonic oscillators and photons. The number of possibilities for putting $N_1$ atoms into cube~1, $N_2$ atoms into cube~2, etc.\ is given by \begin{equation}\label{WS} W = \frac{N!}{N_1! N_2! \cdots N_r!} \quad \mbox{with} \quad N = N_1 + N_2 + \cdots + N_r. \end{equation} Note that Sackur computes the number of possibilities $W$ for a given decomposition of $N$ into the numbers $N_1, \ldots, N_r$, which clearly implies that he assumes \emph{distinguishable} atoms; for indistinguishable atoms, a fixed decomposition would simply correspond to a \emph{single} state and thus $W=1$. According to Boltzmann and Planck, the entropy is obtained by \begin{equation}\label{S} S = k \ln W = k N \ln N - k \sum_i N_i \ln N_i = -kN \sum_i \frac{N_i}{N} \ln \frac{N_i}{N} \end{equation} for large numbers $N_i$ and the most probable distribution is given by the maximum of $S$ under the conditions \begin{equation}\label{NE} \sum_i N_i = \sum_i N f(\varepsilon_i) \left( \tau \Delta \varepsilon \right)^3 = N, \quad \sum_i N_i \,\varepsilon_i = \sum_i N f(\varepsilon_i) \left( \tau \Delta \varepsilon \right)^3 \varepsilon_i = E. \end{equation} This procedure superficially resembles the derivation of the canonical ensemble, however, its spirit is completely different. We know that the ST equation is only valid for a dilute gas, and Tetrode's derivation implicitly assumes that the occupation numbers, i.e.\ the numbers of particles occupying the energy levels of single-particle states, are very small; otherwise the expression for the number of distinguishable configurations in phase space would be much more complicated than $W'/N!$ and effects of spin and statistics would have to be taken into account. However, Sackur in his derivation assumes the opposite, namely occupation numbers $N_i \gg 1$. Finding the maximum of $S$ of equation~(\ref{S}) amounts to computing the stationary point of the functional $-\int \mathrm{d} \varepsilon f \ln f$, under the conditions of a fixed total number of atoms and a fixed energy, where the function $f$ is defined in the Ansatz~(\ref{ansatz}). The sought for stationary point is obtained from the maximum of \begin{equation} \Phi(f, \varepsilon) = -f \ln f + \left( \alpha' + 1 \right) f - \beta \varepsilon f, \end{equation} where the parameters $\alpha'$ and $\beta$ are Lagrange multipliers: \begin{equation} \frac{\partial \Phi}{\partial f} = -\ln f + \alpha' - \beta \varepsilon = 0 \quad \Rightarrow \quad f(\varepsilon) = e^{\alpha' - \beta \varepsilon} = \alpha e^{-\beta \varepsilon} \quad \mbox{with} \quad \alpha = e^{\alpha'}. \end{equation} Eventually, Sackur arrives at the Boltzmann distribution \begin{equation} f(\varepsilon) = \alpha e^{-\beta \varepsilon}. \end{equation} Plugging $N_i$ with this $f$ into formula~(\ref{S}) and using equation~(\ref{NE}), the simple expression \begin{equation}\label{S2} S = -3kN \ln (\tau \Delta\varepsilon) - kN \ln \alpha + k \beta E \end{equation} for the entropy ensues. In equation~(\ref{S2}) there are three unknowns: $\tau \Delta\varepsilon$, $\alpha$ and $\beta$. At this point, referring to Sommerfeld~\cite{sommerfeld}, Sackur states that the smallest action that can take place in nature is given by Planck's constant $h$. Therefore, he makes the bold assumption that \begin{equation} \tau \Delta \varepsilon = h, \end{equation} which he had already made successfully for the derivation of Planck's law of radiation in the same paper. The other two parameters are in principle determined by equation~(\ref{NE}). Sackur then argues that, for simplicity, in the two integrals of equation~(\ref{NE}) summation can be replaced by integration. For this purpose he makes the following step: \begin{equation}\label{pk} \varepsilon_k = \frac{p_k^2}{2m} \;\; (k = x,y,z) \quad \Rightarrow \quad \mathrm{d} \varepsilon_k = \frac{p_k}{m}\, \mathrm{d} p_k = \frac{\bar x_k}{\tau}\, \mathrm{d} p_k, \end{equation} where the $\bar x_k$ are the average Cartesian components of the distance covered by the atoms during the time $\tau$. Then Sackur connects the product of the three average distances with the volume $V$ of the gas by equating it with the volume per atom: \begin{equation}\label{v/n} \bar x \bar y \bar z = \frac{V}{N}. \end{equation} It is hard to understand why this equation should hold, but with equations~(\ref{pk}) and~(\ref{v/n}) he effectively introduces an integration $\mathrm{d}^3 x\, \mathrm{d}^3 p$ in phase space.\footnote{These manipulations introduce an ambiguity in the integration boundaries: In $\mathrm{d}\varepsilon_k$ the integration is from zero to infinity, while in $\mathrm{d} p_k$ Sackur integrates from minus infinity to plus infinity.} Moreover, since Sackur nowhere introduces the concept of indistinguishable atoms, he needs the factor $1/N$ in equation~(\ref{v/n}) for avoiding Gibbs paradox, as we will see shortly. So he ends up with \begin{equation} \tau^3 \mathrm{d} \varepsilon_x \mathrm{d} \varepsilon_y \mathrm{d} \varepsilon_z = \frac{V}{N}\, \mathrm{d} p_x \mathrm{d} p_y \mathrm{d} p_z \end{equation} for the integration in equation~(\ref{NE}) and obtains \begin{equation} 1 = \frac{\alpha V m^3}{N} \left( \frac{2\pi}{m \beta} \right)^{3/2} \quad \mbox{and} \quad E = \frac{3\alpha V m^3}{2\beta} \left( \frac{2\pi}{m \beta} \right)^{3/2}. \end{equation} These two equations are easily solved for $\alpha$ and $\beta$. Plugging the solution \begin{equation} \beta = \frac{3N}{2E} \quad \mbox{and} \quad \alpha = \frac{N}{V} \left( \frac{3N}{4\pi mE} \right)^{3/2} \end{equation} into equation~(\ref{S2}), Sackur arrives at his final result \begin{equation}\label{s-sackur} S(E,V,N) = kN \left( \frac{3}{2} \ln \frac{E}{N} + \ln \frac{V}{N} + \frac{3}{2} \ln \frac{4\pi m}{3 h^2} + \frac{3}{2} \right). \end{equation} Comparing this expression with Tetrode's result~(\ref{s-tetrode}), we see that there is a difference in the last term in parentheses; Sackur has $3/2$ while while Tetrode has the correct number $5/2$. Thus \begin{equation} \left. S(z=1) \right|_\mathrm{Tetrode} - \left. S \right|_\mathrm{Sackur} = kN, \end{equation} which Sackur observed and commented upon in~\cite{sackur}. It is interesting to note that in his previous paper~\cite{sackur2} Sackur actually had the correct number. It is kind of amazing that Sackur, with his line of reasoning, arrives at nearly the correct result, being off only by $kN$. This difference is indeed important for the comparison of the entropy formula with the data from vapor pressure of mercury~\cite{tetrode,sackur}; anticipating equation~(\ref{vp}), we see that a determination of Planck's constant with Sackur's formula would result in a value which is too low by a factor of $e^{-1/3} \approx 0.72$ where $e$ is Euler's number. We conclude this section with a comment on equation~(\ref{v/n}). We know that $S$ is an extensive quantity, i.e. $S(\zeta E, \zeta V, \zeta N) = \zeta S(E,V,N)$ holds for all $\zeta > 0$. If the factor $1/N$ had been absent in equation~(\ref{v/n}), we would have to replace $V$ by $NV$ in equation~(\ref{s-sackur}); but then $S$ would not be an extensive quantity, as one can easily check. \subsection{Discussion} Let us present here, in particular, for comparison with Sackur's treatment, the derivation of the entropy of a monoatomic ideal gas by using the canonical partition function $Z$. Since we are dealing with non-interacting particles, $Z$ is given by \begin{equation} Z = \frac{Z_1^N}{N!}, \end{equation} where $Z_1$ is the partition function of a single particle. The factor $1/N!$ is present to take into account that the particles are indistinguishable. Then the entropy is given by \begin{equation}\label{SZ1} S = k \left( \ln Z + \beta E \right) = kN \left( \ln \frac{Z_1}{N} + 1 + \frac{\beta E}{N} \right), \end{equation} where $E$ is the total energy of the $N$ particles and $\beta = 1/(kT)$. Furthermore, Stirling's formula has been used to replace $\ln N!$ by $N(\ln N - 1)$. If $E/N$ does not depend on $N$, which is the case for the ideal gas, this equation displays the full dependence on $N$. For the monoatomic ideal gas, in the classical approximation, the single-particle partition function is given by the integral \begin{equation} Z_1 = \frac{1}{h^3} \int_{\mathcal{V}} \mathrm{d}^3x \int \mathrm{d}^3p\, \exp \left( -\beta \frac{{\vec p}^{\,2}}{2m} \right) = \frac{V}{\lambda^3} \quad \mbox{with} \quad \lambda = \frac{h}{\sqrt{2\pi m kT}} \end{equation} being the thermal de Broglie wave length. The integration domain $\mathcal{V}$ is the space taken by the gas, i.e.\ the container with volume $V$. Plugging $Z_1$ into equation~(\ref{SZ1}) yields the desired entropy \begin{equation}\label{Scan} S(T,V,N) = kN \left( \ln \frac{V}{\lambda^3N} + \frac{5}{2} \right) \end{equation} as a function of temperature, volume and particle number. We compare Tetrode's and Sackur's result with the entropy formula~(\ref{Scan}) by substituting \begin{equation} E = \frac{3}{2}\,NkT \end{equation} in equations~(\ref{s-tetrode}) and~(\ref{s-sackur}).\footnote{In Tetrode's formula we set $z=1$.} We find what we have announced earlier: Tetrode's result exactly agrees with equation~(\ref{Scan}), while Sackur's result differs by $kN$. We can easily locate the origin of the difference. Considering the definitions of $\alpha$ and $Z_1$ and taking into account equation~(\ref{NE}), we find that \begin{equation} \alpha = \frac{N}{h^3 Z_1}. \end{equation} Insertion of this expression into equation~(\ref{S2}) leads to the entropy~(\ref{SZ1}), with the ``1'' within the parentheses being absent. Effectively Sackur replaces $\ln N! \simeq N(\ln N - 1)$ by $N\ln N$ in his derivation and does, therefore, not fully take into account indistinguishability of the atoms. The entropy of the monoatomic ideal gas as a function of the pressure $p$ instead of the volume $V$ is obtained with the ideal-gas equation by the substitution $V = NkT/p$. As mentioned in the introduction, Sackur and Tetrode tested their equation on mercury vapor. This element has seven stable isotopes with various nuclear spins $s_k$~\cite{aw}. Therefore, in principle for mercury one has to add the corresponding residual entropy \begin{equation}\label{Sres} S_\mathrm{res}(\mbox{Hg}) = Nk\,\sum_{k=1}^7 P_k \left( -\ln P_k + \ln(2s_k + 1) \right), \end{equation} where the $P_k$ are the isotopic abundances ($\sum_k P_k = 1$), to the Sackur--Tetrode formula. Of course, in~1912 the mercury isotopes were not known. However, as we will see in the next section, in the mercury test only the entropy difference between gaseous and liquid phases is relevant. For both phases, however, the same residual entropy is expected and thus $S_\mathrm{res}(\mbox{Hg})$ of equation~(\ref{Sres}) drops out. \section{The vapor pressure of mercury and Planck's constant} \label{vapor pressure} How to subject the \emph{absolute} entropy of a monoatomic ideal gas to experimental scrutiny? Sackur and Tetrode applied the following procedure. Consider the latent heat $L(T)$ of a monoatomic substance for the phase transition from the liquid to the gaseous phase. In terms of the absolute molar entropies, the latent heat is given by \begin{equation}\label{L} L(T) = T \left( s_\mathrm{vapor}(T, \bar p(T)) - s_\mathrm{liquid}(T, \bar p(T)) \right), \end{equation} where $\bar p(T)$ denotes the pressure along the coexistence curve, i.e.\ the vapor pressure. If the vapor behaves in good approximation like a monoatomic ideal gas, then the Sackur--Tetrode equation in the form \begin{equation}\label{st-molar} s_\mathrm{vapor} = R \left( \ln \frac{kT}{\bar p \lambda^3} + \frac{5}{2} \right) \end{equation} with the molar gas constant $R$ can be substituted for $s_\mathrm{vapor}(T, \bar p(T))$. For the liquid phase, neglecting the $p$-dependence, the absolute entropy can be expressed as an integral over the heat capacity: \begin{equation}\label{s-liquid} s_\mathrm{liquid} = \int_0^T \mathrm{d} T'\, \frac{c_p(T')}{T'}. \end{equation} Note that here the integration includes the solid and liquid phases, and the latent heat of melting. After insertion of $s_\mathrm{vapor}$ and $s_\mathrm{liquid}$ into equation~(\ref{L}), one obtains an expression for the vapor pressure: \begin{equation}\label{vp} \ln \bar p(T) = -\frac{L(T)}{RT} + \ln \frac{(2\pi m)^{3/2} (kT)^{5/2}}{h^3} + \frac{5}{2} - \int_0^T \mathrm{d} T'\, \frac{c_p(T')}{RT'}. \end{equation} Similar derivations can be found in~\cite{zemanski,reif}. Since equation~(\ref{vp}) is a direct consequence of equation~(\ref{st-molar}), it serves as a testing ground for the Sackur--Tetrode equation. For this test not only data on the vapor pressure $\bar p(T)$ are needed, but also data on the latent heat $L(T)$ and the heat capacity $c_p(T)$ in the condensed phase must be available. While for $\bar p(T)$ and $L(T)$ it is sufficient to have data in a certain temperature interval, one needs to know $c_p(T)$ as a function of $T$ down to absolute zero. In 1912 the most comprehensive set of data was available on mercury. This was utilized by Sackur and Tetrode to test their equation. In this test they followed slightly different approaches. Both employed the value of Planck's constant $h$ as determined from black-body radiation and inserted it into equation~(\ref{vp}). Then Sackur directly computed the vapor pressure of mercury from equation~(\ref{vp}) and compared his results with the experimental data, whereas Tetrode replaced $h$ in equation~(\ref{vp}) by $zh$ and carried out a fit of $z$ to the data. Now we want to delineate how Sackur and Tetrode actually performed the numerical evaluation of equation~(\ref{vp}). We follow the exposition of Sackur in~\cite{sackur} because his account is sufficiently detailed and easy to follow. On the right-hand side of equation~(\ref{vp}) we have to discuss the term with $L(T)$ and the integral. In treating the latent heat as a function of $T$, Sackur uses Kirchhoff's equation---see equation~(\ref{kirchhoff}) in the appendix. Furthermore, he assumes that in the temperature interval he considers, which is from $0^\circ\,\mbox{C}$ to $360^\circ\,\mbox{C}$, the heat capacity in the liquid phase can be regarded to have the constant value $c_p^\mathrm{liquid}$. If at a reference temperature $T_1$ the latent heat is $L_1$, then due to Kirchhoff's equation \begin{equation}\label{L1} L(T) = L_1 + \left(\frac{5}{2}\,R - c_p^\mathrm{liquid} \right) (T-T_1). \end{equation} The integral on the right-handed side of equation~(\ref{vp}) is treated by splitting it into the part in the solid phase, the contribution of the phase transition, and the part in the liquid phase. Denoting the latent heat of melting by $L_m$ and the melting point by $T_m$, this integral reads \begin{equation} \int_0^T \mathrm{d} T'\, \frac{c_p(T')}{T'} = \int_0^{T_m} \mathrm{d} T'\, \frac{c_p^\mathrm{solid}(T')}{T'} + \frac{L_m(T_m)}{T_m} + c_p^\mathrm{liquid} \ln \frac{T}{T_m}. \end{equation} Again the approximation that the heat capacity of the liquid is temperature-independent has been used. Implicitly the additional approximation that the melting temperature $T_m$ is independent of the pressure has been made. The final form of the vapor pressure, prepared for the numerical evaluation, is thus \begin{eqnarray} \ln \bar p(T) &=& - \frac{L_1 + \left( c_p^\mathrm{liquid} - \frac{5}{2}R \right)T_1}{RT} + \frac{5}{2} \ln T - \int_0^{T_m} \mathrm{d} T'\, \frac{c_p^\mathrm{solid}(T')}{RT'} \nonumber \\[2mm] && \label{vp1} -\frac{L_m(T_m)}{RT_m} - \frac{c_p^\mathrm{liquid}}{R} \ln \frac{T}{T_m} + \ln \frac{(2\pi m)^{3/2} k^{5/2}}{h^3} + \frac{c_p^\mathrm{liquid}}{R}. \end{eqnarray} This equation corresponds to Sackur's equation on top of p.~82 of~\cite{sackur} and we have written the terms in the same order as there. We have refrained, however, from converting the natural logarithm to the logarithm to the base of ten, which was used by Sackur. As mentioned earlier, Sackur and Tetrode actually determine the \emph{chemical constant}, defined as \begin{equation}\label{chem} \mathcal{C} = \frac{1}{\ln 10} \times \ln \frac{(2\pi m)^{3/2} k^{5/2}}{h^3} = \log \frac{(2\pi m)^{3/2} k^{5/2}}{h^3}, \end{equation} from the data and compare this value of $\mathcal{C}$ with the value computed with Planck's constant obtained from black-body radiation. At that time, the chemical constant was a commonly used quantity. It appears not only in the vapor pressure but also in the law of mass action of chemical reactions in the gas phase~\cite{nernst}. Note that the conversion of the logarithm mentioned above brings about a division by $\ln 10 \approx 2.3026$ in many places in the equations in~\cite{tetrode,sackur}. In equation~(\ref{vp1}), in the integral over $c_p^\mathrm{solid}(T)/T$ both Sackur and Tetrode use a model by Nernst~\cite{nernst1} for the specific heat of solid mercury. This model is a kind of Einstein model~\cite{einstein} but is sums two frequencies, $\omega$ and $2\omega$. It is interesting to note that the paper of Debye concerning the Debye model~\cite{debye} has a ``received date'' July 24, 1912, and is thus prior to Sackur's paper~\cite{sackur}. Actually, Sackur refers to it in~\cite{sackur}, but only in the part concerning Planck's law of radiation; in the integration over the solid phase of mercury he uses nevertheless Nernst's model. We conclude this section by summarizing and commenting on the approximations which lead to equation~(\ref{vp1}). In essence the following approximations have been made: \begin{enumerate} \renewcommand{\theenumi}{\roman{enumi}} \item The vapor is treated as a classical ideal gas. \item The molar volume $v_l$ of the liquid is neglected compared to the molar volume $v_g$ of the vapor. \item In the liquid phase the dependence on $p$ of the isobaric heat capacity is negligible in the considered temperature interval. \item\label{4} There are two technical assumptions which facilitate the numerics: The temperature dependence of the heat capacity in the liquid phase is neglected and the melting temperature $T_m$ is pressure independent. \end{enumerate} From the first assumption it follows that the heat capacity of a monoatomic vapor is constant with the value \begin{equation}\label{cp-v} c^\mathrm{vapor}_p = \frac{5}{2}\,R, \end{equation} which is an important ingredient in equation~(\ref{L1}). The thermal equation of state, \begin{equation} p V = n_m R T, \end{equation} where $n_m$ the number of moles of the gas, has been used in equation~(\ref{st-molar}) and in the derivation of Kirchhoff's equation---see appendix. The second assumption, which occurs only in the derivation of Kirchhoff's equation, is well justified because the order of magnitude of the ratio of the molar volumes is $v_g/v_l \sim 10^3$. To discuss the third assumption we note that via the Gibbs potential we obtain the relation \begin{equation} \left. \frac{\partial c^\mathrm{liquid}_p}{\partial p} \right|_T = -T v \left( \alpha^2 + \left. \frac{\partial \alpha}{\partial T} \right|_p \right) \quad \mbox{with} \quad \alpha = \frac{1}{v} \left. \frac{\partial v}{\partial T} \right|_p, \end{equation} where $\alpha$ is the thermal expansion coefficient. This equation leads to a linear approximation of the heat capacity with respect to the pressure: \begin{equation} c^\mathrm{liquid}_p(T,p) \approx c^\mathrm{liquid}_p(T,p_0) - T \left( \alpha^2 + \left. \frac{\partial \alpha}{\partial T} \right|_p \right)_{p=p_0} v(T,p_0)\,(p-p_0). \end{equation} The pressure $p_0$ is a reference pressure. It is well known that the $p$-dependence of $c_p$ for liquids is suppressed for two reasons. First of all, the product $v p \sim 1\,\mbox{J}\, \mbox{mol}^{-1}$ where $v$ is the molar volume of the liquid and $p \sim 1\,\mbox{bar}$ is rather small. Secondly, the thermal expansion coefficient $\alpha$ of a liquid is small as well; for instance for mercury $\alpha \approx 1.8 \times 10^{-4}$\,K$^{-1}$ at 1\,bar. Thus, the third assumption is very well justified. However, in general the heat capacity of a liquid depends on the temperature, although not drastically. For mercury it drops by $4\%$ between $-38.84^\circ\,\mbox{C}$, which is the melting point, and $200^\circ\,\mbox{C}$~\cite{crc1}. \section{Our fit of Planck's constant to mercury data} \label{fit} It is worthwhile to use the thermodynamic data on mercury available at present and employ a slight variation of the method of Sackur and Tetrode described in the previous section in order to check the accuracy with which Planck's constant can be determined in this way. We follow Tetrode's approach in replacing $h$ by $zh$ in equation~(\ref{vp}). In the following we will plug in the modern meanvalue of $h$ and determine $z$ from the data. The best modern value of $h$, recommended by CODATA~\cite{nist-fc}, is \begin{equation}\label{h} 6.626 069 57(29) \times 10^{-34}\,\mathrm{J}\,\mathrm{s} \end{equation} In order to account for the slight temperature dependence of the heat capacity of liquid mercury we make the ansatz \begin{equation}\label{cp-l} c_p^\mathrm{liquid}(T) = a_0 + a_1 T + a_2 T^2 \end{equation} and fit the coefficients $a_0$, $a_1$ and $a_2$ to the input data from the table presented in~\cite{crc1}. In this table one can also read off that from the melting point up to a temperature of about $200^\circ\,\mbox{C}$ the heat capacity of gaseous mercury agrees exactly with the ideal-gas value~(\ref{cp-v}). Thus we confine ourselves to the temperature interval from $-38.84^\circ\,\mbox{C}$ to $200^\circ\,\mbox{C}$, in which the ansatz~(\ref{cp-l}) should be sufficient. With equations~(\ref{L}) and~(\ref{cp-l}), and taking into account Kirchhoff's equation, we obtain \begin{equation} L(T) = L_0 + \frac{5}{2}\,R (T-T_0) - a_0 \left( T-T_0 \right) -\frac{1}{2} a_1 \left( T^2 - T_0^2 \right) - \frac{1}{3} a_2 \left( T^3 - T_0^3 \right), \end{equation} while inserting equation~(\ref{cp-l}) into the entropy formula~(\ref{s-liquid}) gives \begin{equation} s_\mathrm{liquid}(T) = s_0 + a_0 \ln \frac{T}{T_0} + a_1 (T-T_0) + \frac{1}{2} a_2 \left( T^2 - T_0^2 \right). \end{equation} As a reference temperature we take $T_0 = 298.15\,\mbox{K}$, which allows us to use the enthalpy of formation and the standard molar entropy from the CODATA Key Values for Thermodynamics~\cite{key}: \begin{equation} L_0 = 61.38 \pm 0.04\,\, \mbox{kJ}\,\mbox{mol}^{-1}, \quad s_0 = 75.90 \pm 0.12\,\, \mbox{J}\,\mbox{K}^{-1}\,\mbox{mol}^{-1}. \end{equation} The value of $s_0$ saves us from the non-trivial task of determining the integral in equation~(\ref{s-liquid}) with the boundaries $T=0$ and $T=T_0$. The input data for the vapor pressure of mercury we take from the table in~\cite{crc2}. In the legend of this table estimated uncertainties of the vapor pressure values are given, which we use in the method of least squares in order to fit the parameter $z$. A further input parameter is the atomic weight of mercury, $A = 200.95(2)$~\cite{aw}. The mass value for mercury is then $m = Au$ where $u$ is the atomic mass unit. For the determination of $h$ from mercury data we can safely neglect errors in the physical constants $R$, $k$ and $u$. With the above input, our best fit value for $z$ is $\bar z = 1.003$ at $\chi^2_\mathrm{min} = 4.2$. Since we have at disposal vapor pressure measurements at 75 temperatures~\cite{crc2} in the considered interval, but we determine only one parameter, the number of degrees of freedom is 74. For such a large number of degrees of freedom the above value of the minimal $\chi^2$ tells us that the fit is perfect. We take into account the following sources of uncertainties in $z$: the statistical error determined by $\chi^2(z) = \chi^2_\mathrm{min} + 1$, the errors in $A$, $L_0$ and $s_0$, and an error in $c_p$. We obtain the uncertainties $\pm 0.0002$ for the statistical error and $\pm 0.0005$ for the error in $A$. These errors are one order of magnitude smaller than the errors originating in $L_0$ and $s_0$ which are $\pm 0.004$ and $\pm 0.005$, respectively. We have no information on the error in the heat capacity of liquid mercury in~\cite{crc1}. Therefore, we simply vary $a_0$ by $\pm 1\%$ as a generous error estimate~\cite{giauque}; the resulting uncertainty, however, is smaller than the statistical error. In summary, our value of $z$ is \begin{equation} z = 1.003 \pm 0.004\,(L_0) \pm 0.005\,(s_0). \end{equation} Of course, the error estimate above is not a sound statistical computation, but we can safely argue that, with existing thermodynamic data on the equilibrium of liquid and gaseous phases of mercury, Planck's constant can be determined with an accuracy of about one percent. Improving the accuracy of $L_0$ and $s_0$ might improve the determination of $h$, but due to the approximations pointed out in the previous section, thermodynamic data can most probably never compete with quantum physics data for this purpose. \section{Conclusions} \label{conclusions} Planck's quantum hypothesis in 1900 was a revolutionary step which he justified by referring to Boltzmann, because in this way he could count the number of different photon states and compute the entropy of a photon gas by using formula~(\ref{SW}). The importance of the quantum hypothesis became clear only gradually. In the beginning, Planck's constant played a role in loosely connected or seemingly unconnected phenomena. The unified perspective was achieved only later with quantum mechanics and quantum field theory. However, the importance of the quantum hypothesis for atomic and molecular physics, including thermodynamic quantities like heat capacities, was suspected quite early, for instance, by Sommerfeld~\cite{sommerfeld} who connected Planck's constant with the ``action\footnote{Here action has the usual meaning of the time integral over the Lagrangian.} in pure molecular processes.'' In the beginning, apart from black-body radiation, the phenomena to which the quantum hypothesis could be applied were scarce. In 1905 it was used by Einstein to explain the photoelectric effect. A bit later Johannes Stark could interpret features of the light spectrum emitted by canal rays and of the X-ray spectrum produced by the impact of electrons with the help of the quantum hypothesis. In 1907 Einstein put forward the ``Einstein model'' of the heat capacity of solids where $h \nu$ was now associated with the energy of vibrations of a crystal; this theory could account for deviations from the Dulong--Petit law at high temperatures but gave the wrong behavior at low temperatures. This flaw was cured by Debye~\cite{debye}, who developed his model practically at the same time as Sackur and Tetrode derived their equation. The Bohr model of the atom was to follow in 1913. As a side remark, Ernest Rutherford's paper on the atomic nucleus appeared in 1911, in the same year when Heike Kamerlingh Onnes discovered superconductivity. For an extensive account of the evolution of the ``old quantum theory'' we refer the reader to~\cite{rechenberg}. Just as Planck more than ten years earlier, Sackur and Tetrode referred to Boltzmann in the derivation of their equation. One can view the Sackur--Tetrode equation and its successful test with thermodynamic data as one of the very first confirmations of Planck's quantum hypothesis. This equation was a quite fundamental step towards modern physics as it demonstrated the ubiquity of Planck's constant in statistical physics. We stress once more that the outstanding feature of the papers of Sackur and Tetrode was the combination of theoretical ideas with an ingenious usage of experimental data. One may speculate why the work of Sackur and Tetrode is not that well known in the physics community as one would expect from its importance in the development of quantum theory and statistical physics. One reason is certainly that both died rather young. Sackur (1880--1914), who was actually a physical chemist, died in an explosion in the laboratory of Fritz Haber, only two years after the Sackur--Tetrode equation. On the other hand, Tetrode (1895--1931) was a wunderkind who published his first research paper, namely the paper on the Sackur--Tetrode equation, at the age of 17. Later on he rather lived in seclusion, though he did publish a few papers which were appreciated by the community\footnote{Tetrode published a total of six papers~\cite{dieks}.} and kept some contact with eminent contemporary physicists before he prematurely died of tuberculosis. \paragraph{Acknowledgements:} The author thanks E.R.\ Oberaigner for useful discussions and P.O.\ Ludl for a critical reading of the manuscript.
1,116,691,501,355
arxiv
\section{Introduction} The goal of this paper is to study the possible fate of a nonlocal diffusion strategy for a biological population in presence of a highly oscillating distribution of resource. The study of dispersal strategies and the comparison between local and nonlocal diffusive behaviors have recently attracted a great attention and several researches have been developed both in terms of experiments and from the purely mathematical point of view (see for instance \cite{Vis_al, Hum_al, Fried, Mon_Pel_Ver} and references therein). Remarkably, the phenomenon of possibly nonlocal hunting strategies has attracted also the attention of the mass-media, and related news can be found in popular newspapers and magazines (see e.g. \cite{VEN}). In this framework, even the distinction between local and nonlocal strategies is somehow a delicate issue and it is still not exactly clear in all situations what factors favor one behavior against the other. Of course, in general, as we know even from experience in our everyday life, it may be very difficult to deduce from overall principles\footnote{By ``overall principles'' we mean the availability of a general method, depending on the measurement of some parameters in the environment, which allows a population to choose an optimal strategy. We are referring to the impossibility of having a satisfactory and complete model for population dynamics, due to the complexity of the biological world.} the optimal strategy to follow in each complex situation. Therefore, it is not surprising that the question of detecting the optimal strategy in a logistic mathematical model cannot have just a simple answer that is valid in every situation, and, concretely, very different dispersal strategies have been directly observed in nature.\medskip Detecting, analyzing and understanding the differences between diffusive strategies is therefore a difficult, but important, task in biology. One of the possible distinctions among the different strategies lies in rigorously defining the concept of ``locality'' (when a predator, roughly speaking, diffuses randomly in the neighborhood looking for an available prey) versus ``nonlocality'' (the short periods of hunting activity are followed by rather long journeys of the predator in the search for food). As expected, hunting strategies of predators are definitely influenced by the distribution of the resources. When the resources are ``easily'' available, it is conceivable that predators do not need to elaborate a nonlocal hunting strategy and indeed it can be more convenient not to drift too much to take advantage of the rather abundant resource in their neighborhood. Conversely, when the prey is sparse, it may be worth for predators to interchange the local hunting activity with suitable nonlocal travels in different possible regions. Of course, the more sophisticated the species involved in the hunt, the easier the latter phenomenon is expected to occur: namely, an intelligent species of preys will run away from the danger, thus making the distribution of resources for the predator sparse, and therefore making a nonlocal hunting strategy possibly more favorable. However, in the model considered in this paper the resource~$\sigma$ is independent of the distribution of the populations, so this effect is not taken into consideration by the setting discussed here. \medskip It is also evident that the distinction between local or nonlocal strategy is a mathematical abstraction based on the consideration of different space/time scales: i.e., the ambient space that the population has at its disposal is not infinitely large in the real cases, and species cannot really perform discontinuous, nonlocal jumps. Nevertheless, a good mathematical model in which different scales are taken into account may furnish a justification for the diffusive strategy in a ``large enough'' environment in which the time scales of travel and hunting activities can be somehow distinguished in practice. \medskip We will try to give a rigorous mathematical framework to these na\"{\i}ves con\-si\-de\-ra\-tions by showing the possible advantages of the long-jump dispersal strategies (i.e. the ones based on nonlocal diffusion) in regimes where the distribution of resources may be considerably different at different points of the ambient space. Not too surprisingly having in mind the concrete applications, we will use for this scope the mathematical framework of linearized systems and scaling properties of the eigenvalues, which take into account the stability property of equilibrium configurations. \medskip Our mathematical framework can be discussed as follows. Reaction-diffusion systems provide an effective continuous model for the biological problem of competition between different species. The typical example of local reaction-diffusion equation is \begin{equation}\label{cl_rd} u_t=\Delta u+(\sigma-u)u\quad{\rm in}\ (0,T)\times\Omega\,. \end{equation} We study here the case of Dirichlet boundary conditions. Though other boundary conditions may be also taken into account to model different situations, our focus on the Dirichlet data is motivated by biological considerations (for instance, prescribing the solution to vanish outside a given domain corresponds to a confinement situation, for instance in a hostile environment). In this model, the environment is represented by the open bounded set $\Omega\subset\mathbb{R}^n$, with~$n\ge2$, and a heterogeneous resource $\sigma:\Omega\to[0,+\infty)$ is given (stationary in time). The growth of the population density $u$ depends on a dispersal differential operator and on the reproductive rate of the population itself, which is proportional to the temporary availability of the resource $(\sigma-u)$. Dirichlet boundary conditions model a lethal environment for the population $u$ outside the domain $\Omega$. A reaction-diffusion system involves at least two species, with distribution $u$ and~$v$, whose behavior is ruled by a reaction-diffusion equation like \eqref{cl_rd}. The two competing species differ for some special features: indeed, \eqref{cl_rd} has to be modified in order to describe the foraging and reproductive habits of the species and further data concerning the environment. As it is customary in Adaptive Dynamics (see \cite{Diek} and \cite{Ha}), the first step in the study of the evolution of a given feature is to single it out and then assume that the two populations differ for this feature only. For instance, in our main\footnote{Up to Section \ref{sec:lin} we investigate the opposite situation, too, that is, when the resident population has a nonlocal dispersal strategy and the mutant population has a local one.} case the resident population has a local dispersal strategy and the mutant population has a nonlocal one. In \cite{Fried} and \cite{Diek}, one can find a comprehensive survey of the problem and of the standard approach in Adaptive Dynamics; many different features have been studied and compared in \cite{Doc_Hut_Mi_Per, Can_Co_Hu1, Can_Co_Hu2} (different dispersal rates and genetic mutations), in \cite{Hu_Mi_Po} (time-periodic sources) and in \cite{Can_Co_Lou1,Can_Co_Lou2,Chen_Ham_Lou} (addition of a chemotactic component depending on the gradient of the resource). We are interested in the comparison of the dispersal strategies: in particular, we focus on the competition between a population with ``standard'' diffusion and a second population with nonlocal dispersal. Therefore, our model is \begin{equation}\label{our_model} \left\{ \begin{array}{l} u_t=\hphantom{-(-)^s}\Delta u +\left(\sigma-(u+v)\right)u\\ v_t=-(-\Delta)^s v\,+\left(\sigma-(u+v)\right)v\,.\\ \end{array} \right. \end{equation} At a discrete level, the ``standard'' assumption is that the motion of the po\-pu\-la\-tion is governed by a random walk and this obviously leads to a Laplacian operator in the continuous model. Analogously, since our interest is focused on a second population with nonlocal dispersal, we adopt the fractional Laplacian operator as dispersal operator for the second distribution. The choice of such nonlocal diffusion operator is motivated by the fact that the fractional Laplacian has good stability properties in terms of the associated stochastic processes (it is the ``continuous version'' of the discrete motion governed by L\'evy flights, see e.g.~\cite{Val1} for a simple motivation and~\cite{Be} for more advanced material), it possesses natural scaling features and it seems also to appear in real experiments (see e.g.~\cite{Vis_al, Hum_al}). The present literature on the subject of nonlocal dispersal mostly considers convolution operators (see \cite{Doc_Hut_Mi_Per,Ka_Lou_Shen1,Ka_Lou_Shen2,Can_Co_Lou_Ryan,Co_Da_Ma}). In particular, in \cite{Ka_Lou_Shen1}, the model under investigation is \begin{equation}\label{conv_model} \left\{ \begin{array}{l} u_t=\mu\Delta u +\left(\sigma-(u+v)\right)u\\ v_t=\nu\left(\delta^{-n}\int_D k\left(\frac{\cdot-y}{\delta}\right)v(y)\,dy-v\right)+\left(\sigma-(u+v)\right)v\,,\\ \end{array} \right. \end{equation} where $\mu,\nu$ are the dispersal rates of the two populations, respectively, and $\delta$ is the dispersal distance of the second population. Of course, it is a delicate business to decide, in concrete situations, which models better describe the dispersion of a real biological population, and many nonlocal terms have been taken into account in order to comprise long-range effects. In general, we believe that fractional equations may be an important tool to further understand the complex problems arising in the mathematical modelization of biological species and we hope that the framework given in this paper can lead to a further development of the subject. \medskip In Section \ref{sec:model} we provide details and further explanations about the model considered here and some basic facts about the fractional Laplacian operator. We study the stability of a stationary solution $(\tilde u,0)$ of the aforementioned system, by means of a formal linearization at $(\tilde u,0)$, that we explain in Subsection~\ref{sec:lin}. The complete understanding of the global dynamics of a general system of diffusive and competing populations is beyond the scope of this paper and it seems, at first glance, very challenging from a mathematical point of view, since a variety of possible situations may occur. Nevertheless, let us stress that even the analysis of the stability of a stationary solution (also called ``invasibility'' analysis) is interesting and meaningful from an evolutionary point of view, as it is suggested in the principal literature in Adaptive Dynamics (again, see \cite{Diek}). In fact, a small perturbation around $(\tilde u,0)$ mirrors the occurrence of a genetic mutation in the first population, involving the dispersal strategy. At $(\tilde u,0)$ the first population benefits from an equilibrium state, while the second one does not even exist. Then a small portion of the first population (with density $\tilde u$) undergoes a genetic mutation, which starts a second population (with very small density $v$) which competes for the resource with the former. Of course, the genetic mutation of this theoretical experiment involves only the hunting/dispersal strategy, passing from a local to a nonlocal one. In this context, the expected outcome of the analysis of the stationary solution is, in most of the cases experienced in practice, stability, that is, the second population does not find the right conditions to evolve and it gets rapidly extinguished. On the contrary, (even partial) instability of these type of equilibria is rather surprising and interesting, since in this case the new dispersal strategy is convenient enough to allow a short term survival of the second species and to provide a situation of coexistence of two different populations. \medskip The core of this paper is Section \ref{sec:ciccia}, where we show how the stability of $(\tilde u,0)$ (namely, the sign of the eigenvalues associated with the linearized system) depends on the distribution of the resource $\sigma$. In particular, we will show that if a certain relationship between the variation of $\sigma$ and the fractional Poincar\'e-Sobolev constant in $\Omega$ is fulfilled (see Definition \ref{def:sscat}), then the linearized system has a positive eigenvalue and $(\tilde u,0)$ is unstable. It is transparent from Definition \ref{def:sscat} that the distributions leading to instability of $(\tilde u,0)$ (and suggesting convenience of a nonlocal dispersal strategy) are those with a ``huge variation''. The last part of Section \ref{sec:ciccia} is devoted to show that such a distribution $\sigma$ may occur. Summarizing, the result that states that the local dispersive strategy may become unstable in presence of a new population endowed with nonlocal diffusive strategies can be formally stated as follows: \begin{theorem}\label{MAIN} Let $\Omega\subset\mathbb{R}^n$ be an open subset of $\mathbb{R}^n$ with Lipschitz boundary and let $s\in (0,1)$. There exist bounded functions $\sigma:\Omega\to[0,+\infty)$ and~$\tilde u:\Omega\to[0,+\infty)$ such that $(u,v):=(\tilde u,0)$ is a linearly unstable equilibrium for the system \begin{equation}\label{90} \left\{ \begin{array}{rcll} u_t= &\Delta u &\!\!\!\!+(\sigma-(u+v))u & \quad\text{in }\Omega\\ v_t= &\!\!\!\!-(-\Delta)^s v &\!\!\!\!+(\sigma-(u+v))v & \quad\text{in }\Omega\\ u\hphantom{_t}= & \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! 0 & & \quad \text{on }\partial\Omega\\ v\hphantom{_t}= & \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! 0 & & \quad \text{in }\mathbb{R}^n\setminus\Omega\,. \end{array} \right. \end{equation} More precisely, the function~$\tilde u$ is a solution of \begin{equation}\label{90X} \left\{ \begin{array}{ll} \Delta\tilde u(x)+(\sigma(x)-\tilde u(x))\tilde u(x)=0 & {\mbox{ in }}\Omega,\\ \tilde u=0 & {\mbox{ on }}\partial\Omega, \end{array} \right. \end{equation} and the linearization of system in~\eqref{90} at~$(\tilde u,0)$ has a negative and a positive eigenvalue. \end{theorem} The existence of distributions of resource $\sigma$ which support the phenomenon described in Theorem \ref{MAIN} is motivated by concrete models. To be precise, in this paper we examine two particular cases: \begin{enumerate} \item[1.] a rescaled resource $\sigma_\lambda$ on a sufficiently small domain $\Omega_\lambda$ in Section \ref{subsec:ra}; \item[2.] a large multiple of the characteristic function of a ball in Section \ref{subsec:cpt}. \end{enumerate} We remark that Theorem~\ref{MAIN} states that~$\tilde u$ is a linearly stable solution of the autonomous, scalar Fisher-KPP equation in~\eqref{90X}, but~$(\tilde u,0)$ is a linearly unstable equilibrium for the system in~\eqref{90}. More explicitly, the positive eigenvalue of the linearized system takes into account the fact that if the density of the first population undergoes a small variation without the appearance of the second species, then the system has the tendency to return to the original position. Conversely, the negative eigenvalue shows that if a second population appears, then the system does not go back to the original situation, and the second species has indeed chances to survive and colonize the environment. It is interesting to contrast this result with those obtained in \cite{Ka_Lou_Shen1}: when the dispersal rates are equal\footnote{In our model, we do not even take into account different dispersal rates $\mu$ and $\nu$.}, that is $\mu=\nu$, if the dispersal distance $\delta$ in \eqref{conv_model} is sufficiently small, then $v$ can invade the local population $u$ but $u$ cannot invade the nonlocal population $v$. This suggests, as a general principle, that the smaller spreader may be favored by evolution, especially in hostile environments. Our approach is rather different: in some sense, we consider the dispersal distance as already fixed (in the definition of fractional Laplacian) and we investigate the dependence of the possibility of an invasion based on the availability of the resource $\sigma$. Our attempt is to put in evidence the role of the environment (more precisely, the fact that Definition \ref{def:sscat} is fulfilled) in the selection of the dispersal strategy. \medskip Roughly speaking, the condition (in Definition \ref{def:sscat}) which allows the in\-sta\-bi\-li\-ty of the system records the fact that the first population, with local diffusion, cannot saturate the given resource and leaves enough ``leftovers'' for the second species to survive. \medskip In this sense, a natural question is to determine whether a population exhausts the resource. For this, as a second result, we provide an example of a purely nonlocal phenomenon in population modeling. We show that, fixed any arbitrarily small~$\varepsilon>0$ and given any resource~$\sigma\in C^k(B_1,\,[0,+\infty))$, there exists a resource~$\sigma_\varepsilon\in C^k(B_1,\,[0,+\infty))$ that is~$\varepsilon$-close to~$\sigma$ in the norm of~$C^k(B_1)$, a radius~$R_{\varepsilon,\sigma}>1$ and a function~$u_\varepsilon$ which vanishes outside~$B_{R_{\varepsilon,\sigma}}$, which is~$s$-harmonic in~$B_1$, which equals to~$\sigma_\varepsilon$ in~$B_1$ and which therefore satisfies $$ (-\Delta)^s u_\varepsilon =(\sigma_\varepsilon - u_\varepsilon)u_\varepsilon \quad{\mbox{ in~$B_1$.}}$$ That is, up to an arbitrarily small error, a nonlocal population can locally adapt to any given resource (provided that the density of the population is artificially and appropriately regulated in a suitable region). The formal statement of this result goes as follows. \begin{theorem}\label{NNC} Let~$k\in\mathbb{N}$ and~$\sigma\in C^k(B_1,\,[0,+\infty))$. Fix~$\varepsilon>0$. Then there exists $\sigma_\varepsilon \in C^k(B_1)$ with \begin{equation}\label{CU1} \|\sigma-\sigma_\varepsilon\|_{C^k(B_1)}\le\varepsilon \end{equation} and there exist $R_{\varepsilon,\sigma}>1$ and~$u_\varepsilon \in C^k(B_1)\cap C^s(\mathbb{R}^n)$ such that \begin{eqnarray} && u_\varepsilon (x) =\sigma_\varepsilon (x)\quad \text{for any }x\in B_1\label{CU4}\\ && (-\Delta)^s u_\varepsilon(x) =0 \quad \text{for any }x\in B_1\label{CU2}\\ && u_\varepsilon (x) =0 \quad \text{for any }x\in \mathbb{R}^n\setminus B_{R_{\varepsilon,\sigma}}\,.\label{CU3} \end{eqnarray} In particular \begin{equation}\label{CU5} (-\Delta)^s u_\varepsilon(x) =(\sigma_\varepsilon(x) - u_\varepsilon(x))\,u_\varepsilon(x) \quad \text{for any }x\in B_1\,. \end{equation} \end{theorem} It is worth mentioning that Theorem~\ref{NNC} heavily relies on the nonlocal feature of the equation and it does not have any local counterpart (this will be clearly explained in Section~\ref{sec:cur}). Let us stress the fact that Theorem~\ref{NNC} does not prove (and cannot prove, since this would be false in general) that a nonlocal population always exhausts completely the resource, since a small error~$\varepsilon$ has to be taken into account. In a sense, the solution given by Theorem~\ref{NNC} is different than the original one, since it does not attain the homogeneous Dirichlet boundary datum: it has prescribed, non-homogeneous (but compactly supported) Dirichlet boundary datum outside the strategic region in which the equation is satisfied\footnote{The fact that the Dirichlet boundary condition is not homogeneous reflects mathematically the practical condition of performing an effective distribution plan for the population outside the strategic region.}. We observe that Theorem~\ref{NNC} has important (though socially not embraceable!) practical consequences. For instance, a given population may have a strong intention to consume all the given resource in a region of particular strategic importance (say, a region contained in the ball~$B_1$ in our example). Indeed, in concrete cases, this strategic area might be favorable for generating a new competing species, or might be easily accessible by a similar population coming from abroad which can be considered dangerous or undesired by the local population, and the possible leftover of the resource might obviously favor the newcomers. For these reasons, a ``socially conservative'' (and rather unkind!) population may wish to avoid to leave available resources in strategic regions which can be used by unwanted competitors. The result in Theorem~\ref{NNC} says, roughly speaking, that in this case, a nonlocal population is able to find a suitable, somehow ``artificial'', distribution of population far away in order to consume the resource in the strategic region and thus penalize the newcomers (viceversa, a local population cannot do that). Notice that this suitable distribution of the conservative population may require a modification of the conditions far away: indeed the supporting ball $B_{R_{\varepsilon,\sigma}}$ in Theorem~\ref{NNC} may become larger and larger for small $\varepsilon$: that is, in a sense, the conservative population may need to change its plan ``close to infinity'' in order to consume more efficiently the inner resource (in this sense, a ``global plan'' for the population distribution is in order, and it is indeed conceivable that an optimal use of resources may involve strategic plans on the distribution of the population in the large). \medskip The rest of this paper is organized as follows. In Section~\ref{sec:model} we recall the basic notation about the population dynamics model that we study. The linearized dynamics of the system is then analyzed in Section~\ref{sec:ciccia}, where we will also give two examples that establish Theorem~\ref{MAIN}. Finally, in Section~\ref{sec:cur} we will prove Theorem~\ref{NNC} and show that it is a new phenomenon, which only arises in nonlocal dispersion models. \section{Biological models and mathematical tools}\label{sec:model} \subsection{Population dynamics} Let us denote by $u,v:[0,T)\times\Omega\to[0,+\infty)$ the densities of two species coexisting in the same domain $\Omega$ and competing for a common resource $\sigma:\Omega\to\mathbb{R}$. Here and in the rest of the paper we consider as a domain an open, bounded set $\Omega\subset\mathbb{R}^n$ with Lipschitz boundary $\partial\Omega$. The resource $\sigma$ belongs to the space of measurable, essentially bounded functions $L^\infty(\Omega)$. We study the linear stability of a stationary solution of the reaction-diffusion system with Dirichlet boundary conditions \begin{equation}\label{syst_dyn} \left\{ \begin{array}{ll} u_t=\hphantom{-(-)^s}\Delta u +\left(\sigma-(u+v)\right)u\quad &\text{in }[0,T)\times\Omega\\ v_t=-(-\Delta)^s v\,+\left(\sigma-(u+v)\right)v\quad &\text{in }[0,T)\times\Omega\\ u(t,\cdot)=0 & \text{on }\partial\Omega,\,\forall\,t\in[0,T)\\ v(t,\cdot)=0 & \text{in }\mathbb{R}^n\setminus\Omega,\,\forall\,t\in[0,T)\,. \end{array} \right. \end{equation} For this, we perform a formal linearization around a stationary point $(\tilde u,0)$ of \eqref{syst_dyn} and then we focus only on the corresponding linearized system, that is \[ \left\{ \begin{array}{ll} \hphantom{(}-\Delta u\hphantom{)^s}\!\!\!=(\sigma-2\tilde u)u-\tilde uv \quad & {\rm in}\ \Omega\\ (-\Delta)^sv=(\sigma-\tilde u)v \quad & {\rm in}\ \Omega\\ \hphantom{(}u=0 \quad & {\rm on}\ \partial\Omega\\ \hphantom{(}v=0 \quad & {\rm in}\ \mathbb{R}^n\setminus\Omega\,. \end{array} \right. \] \begin{remark}{\rm Though the global dynamics is beyond the scope of this paper, we recall that there is a detailed and specialized literature about the well-posedness of the initial value problem associated to \eqref{syst_dyn} with $u(0,x)=u_0(x)$ and $v(0,x)=v_0(x)$ for some given functions $u_0,v_0$ (see \cite{Mo} for the well-posedness of the problem in $C^k(\overline\Omega)$, for instance). The analysis of the global dynamics of semilinear parabolic systems is performed through the theory of Monotone Dynamical Systems (see \cite{Hirsch}, \cite{Hirsch2} and \cite{Smith}). For the fractional Fisher-KPP equation one should see \cite{Ca_Ro1} and \cite{Be_Ro_Ro}, for instance. See also Section \ref{sec:glob} for further remarks. }\end{remark} Before focusing on the aforementioned linearized system, let us recall some useful definitions and facts about the pseudodifferential operator $(-\Delta)^s$ that is involved in \eqref{syst_dyn}. \subsection{The nonlocal dispersive strategy and the fractional Laplacian}\label{subsec:fl} Consider an open set $\Omega\subset\mathbb{R}^n$ and $s\in(0,1)$, the Gagliardo seminorm of a measurable function $u$ is defined as \[ [u]_{H^s(\mathbb{R}^n)}:=\left(\int_{\mathbb{R}^n} \int_{\mathbb{R}^n} \frac{|u(x)-u(y)|^2}{|x-y|^{n+2s}}\,dx\,dy\right)^{\frac 12}\,. \] The fractional Sobolev space that we denote here~$H^s_0(\Omega)$ is the linear set containing all the measurable functions~$u:\mathbb{R}^n\to\mathbb{R}$ such that: \begin{itemize} \item $\|u\|_{L^2(\Omega)}<+\infty$, \item $[u]_{H^s(\mathbb{R}^n)}<+\infty$, and \item $u(x)=0$ for a.e.~$x\in\mathbb{R}^n\setminus\Omega$. \end{itemize} The Gagliardo seminorm is naturally related to the fractional Laplacian, since \[ (-\Delta)^su(x):=\left(\frac{\Gamma(n/2+s)}{\pi^{2s+n/2}\Gamma(-s)}\right)\lim_{\varepsilon\to 0}\int_{\mathbb{R}^n\setminus B_\varepsilon(x)}\frac{u(x)-u(y)}{|x-y|^{n+2s}}\,dy\,, \] where~$\Gamma$ is the Euler's function. For an introduction to the fractional Laplacian and the fractional Sobolev spaces see for instance~\cite{Di_Nez_Pa_Val}. In our framework, the scalar version of \eqref{syst_dyn}, that is \[ v_t=-(-\Delta)^sv+(\sigma-v)v\,, \] is known as Fisher-KPP equation with fractional diffusion and for the many established results one can see, for instance, \cite{Ca_Ro1} and \cite{Stan_Vaz}. In this section we summarize the results needed in this paper only. \begin{theorem}[Fractional Poincar\'e-Sobolev embedding theorem]\label{thm_sobineq} Fix $s\in(0,1)$ and an open bounded set $\Omega\subset\mathbb{R}^n$ with Lipschitz boundary. There exists a positive constant $C_\sharp=C_\sharp(s,\Omega)$ such that \begin{equation}\label{fsi} \forall\,\phi\in H^s_0(\Omega)\,,\quad\|\phi\|^2_{L^2(\Omega)}\le C_\sharp[\phi]^2_{H^s(\mathbb{R}^n)}\,. \end{equation} This means that $H^s_0(\Omega)$ is continuously embedded in $L^2(\Omega)$. \end{theorem} \begin{proof} We give the proof, which is of classical flavor, for the facility of the reader. We argue by contradiction, supposing that there exists a sequence~$\phi_k\in H^s_0(\Omega)$ such that~$\|\phi_k\|_{L^2(\Omega)}\ge k[\phi_k]_{H^s(\mathbb{R}^n)}$. We define $$ \psi_k := \frac{\phi_k}{\|\phi_k\|_{L^2(\Omega)} }.$$ Then~$\psi_k\in H^s_0(\Omega)$ and \begin{equation}\label{p0o1} \left( \int_{\mathbb{R}^n}\int_{\mathbb{R}^n} \frac{|\psi_k(x)-\psi_k(y)|^2}{ |x-y|^{n+2s}}\,dx\,dy\right)^{\frac12}= [\psi_k]_{H^s(\mathbb{R}^n)} = \frac{ [\phi_k]_{H^s(\mathbb{R}^n)} }{ \|\phi_k\|_{L^2(\Omega)} }\le \frac{1}{k}.\end{equation} Therefore $$ \left(\int_{\Omega}\int_{\Omega} \frac{|\psi_k(x)-\psi_k(y)|^2}{ |x-y|^{n+2s}}\,dx\,dy\right)^{\frac12}\le \frac{1}{k}.$$ Also, $\|\psi_k\|_{L^2(\Omega)}=1$. Therefore, by compactness (see e.g. Theorem~7.1 in~\cite{Di_Nez_Pa_Val}, used here with~$p=q=2$), we obtain that, up to a subsequence, $\psi_k$ converges to some~$\psi$ in~$L^2(\Omega)$ and a.e. in~$\Omega$. Defining~$\psi(x):=0$ for any~$x\in\mathbb{R}^n\setminus\Omega$, we have that~$\psi_k=\psi=0$ a.e. in~$\mathbb{R}^n\setminus\Omega$, and consequently~$\psi_k$ converges to~$\psi$ a.e. in~$\mathbb{R}^n$. Thus, by taking the limit in~\eqref{p0o1} and using Fatou's Lemma, \begin{eqnarray*}&&\int_{\mathbb{R}^n}\int_{\mathbb{R}^n} \frac{|\psi(x)-\psi(y)|^2}{ |x-y|^{n+2s}}\, dx\,dy\le \liminf_{k\to+\infty} \int_{\mathbb{R}^n}\int_{\mathbb{R}^n} \frac{|\psi_k(x)-\psi_k(y)|^2}{ |x-y|^{n+2s}}\, dx\,dy\\&&\qquad\le \liminf_{k\to+\infty}\frac{1}{k^2}=0.\end{eqnarray*} Accordingly~$\psi$ must be constant in~$\mathbb{R}^n$ and therefore identically equal to zero (up to sets of null measure). This implies that $$ 1= \lim_{k\to+\infty} \|\psi_k\|_{L^2(\Omega)} =\lim_{k\to+\infty} \|\psi_k-\psi\|_{L^2(\Omega)}=0.$$ This is a contradiction and it proves the desired result. \end{proof} In the following, we will always assume $C_\sharp(s,\Omega)$ to be the sharp constant such that \eqref{fsi} holds, namely \begin{equation}\label{CP} C_\sharp^{-1}(s,\Omega) = \inf_{\substack{\phi\in H^s_0(\Omega) \\ \phi\not\equiv0}} \frac{[\phi]^2_{H^s(\mathbb{R}^n)}}{\|\phi\|^2_{L^{2}(\Omega)} } = \inf_{\substack{\phi\in H^s_0(\Omega) \\ \phi\not\equiv0}} \frac{[\phi]^2_{H^s(\mathbb{R}^n)}}{\|\phi\|^2_{L^{2}(\mathbb{R}^n)} }\,. \end{equation} \begin{remark}\label{rmk:rs}{\rm If~$r>0$ and~$\phi\in H^s_0(B_1)$, one can consider the rescaled function~$\phi_r(x):=r^{-n/2}\phi(x/r)$. Then~$\phi_r$ vanishes a.e. outside~$B_r$. Moreover, $\|\phi_r\|_{L^{2}(\mathbb{R}^n)}=\|\phi\|_{L^{2}(\mathbb{R}^n)}$ and~$ [\phi_r]_{H^s(\mathbb{R}^n)}=r^{-s} [\phi]_{H^s(\mathbb{R}^n)}$. Accordingly, \[ C_\sharp(s,B_r)=r^{2s} C_\sharp(s,B_1)\,. \] }\end{remark} \subsection{Linearization of the system}\label{sec:lin} Let $\Omega\subset\mathbb{R}^n$ and $\sigma\in L^\infty(\Omega)$ be as in Section \ref{sec:model}. Our purpose is a qualitative study of an equilibrium state of the following system \begin{equation}\label{syst} \left\{ \begin{array}{l} u_t=\hphantom{-(-}\Delta u\hphantom{)^s}\!+\left(\sigma-(u+v)\right)u\\ v_t=-(-\Delta)^sv+\left(\sigma-(u+v)\right)v \end{array} \right. \end{equation} More precisely, we look for an equilibrium state of the form $(\tilde u,0)$ with $\tilde u\in H^1_0(\Omega)$ and $\tilde u\ge 0$. For the sake of completeness, we also investigate the existence of an equilibrium state of the form $(0,\tilde v)$, with $\tilde v\in H^s_0(\Omega)$ and $\tilde v\ge 0$. The linearization of \eqref{syst} at $(0,\tilde v)$ and further conclusions are postponed to Section \ref{sec:cur}. \begin{definition}\label{def:nontriv}{\rm Given a bounded function~$\sigma:\Omega\to[0+\infty)$, we say that~$\sigma$ satisfies a reverse Poincar\'e-Sobolev condition if \begin{equation}\label{poscond} \sup_{u\in H^1_0(\Omega)} \int_\Omega\sigma(x)u(x)^2\,dx-\int_\Omega|\nabla u|^2\,dx>0\,. \end{equation} Furthermore, $\sigma$ satisfies a reverse fractional Poincar\'e-Sobolev condition with parameter $s$ if \begin{equation}\label{poscond_frac} \sup_{v\in H^s_0(\Omega)}\int_\Omega\sigma(x)v(x)^2\,dx-[v]^2_{H^s(\mathbb{R}^n)}>0\,. \end{equation} }\end{definition} In order to make computations easier, we give a sufficient condition that ensures~\eqref{poscond}. \begin{lemma}\label{COMP} Let~$\lambda_1(\Omega)$ be the first eigenvalue of the Laplacian in~$\Omega$ with Dirichlet boundary condition and let~$\phi_1\in H^1_0(\Omega)$ be the corresponding eigenfunction. If \begin{equation}\label{hp_exist} \lambda_1(\Omega)\int_\Omega \phi_1(x)^2\,dx<\int_\Omega\sigma(x)\phi_1(x)^2\,dx\,, \end{equation} then the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} is satisfied. \end{lemma} \begin{proof} By construction \begin{equation*} \left\{ \begin{array}{ll} -\Delta\phi_1=\lambda_1(\Omega)\phi_1\quad & \text{in }\Omega\\ \phi_1=0 & \text{on }\partial\Omega\,, \end{array} \right.\end{equation*} and so, by~\eqref{hp_exist}, $$ \int_\Omega |\nabla\phi_1(x)|^2\,dx= \lambda_1(\Omega)\int_\Omega \phi_1(x)^2\,dx<\int_\Omega\sigma(x)\phi_1(x)^2\,dx$$ which proves~\eqref{poscond}.\end{proof} \begin{remark}\label{0oUU} {\rm It is worth noticing that condition~\eqref{hp_exist} is satisfied, for a fixed domain~$\Omega$, for any resource~$\sigma$ that is sufficiently large in an open subset of~$\Omega$. Hence, fixed~$\Omega$, there are many examples of smooth resources satisfying~\eqref{hp_exist} and therefore~\eqref{poscond}.} \end{remark} \begin{remark} {\rm We also observe that the converse of Lemma~\ref{COMP} does not hold true, i.e. the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} does not necessarily imply~\eqref{hp_exist}: as an example, one may consider $\Omega=(0,\pi)$, $\sigma(x) = \varepsilon^{-29/10}\chi_{ (0,\varepsilon)}(x)$ and~$u(x)=|x|^{2/3}$, with~$\varepsilon>0$ suitably small. Then~$u\in H^1_0(\Omega)$ and~\eqref{poscond} holds true, since \begin{eqnarray*} && \int_\Omega\sigma(x)u(x)^2\,dx-\int_\Omega|\nabla u|^2\,dx = \varepsilon^{-29/10} \int_0^{\varepsilon}x^{4/3}\,dx -\frac49\int_0^\pi x^{-2/3}\,dx\\&&\qquad=\frac{3}{7} \varepsilon^{-29/10} \varepsilon^{7/3} -\frac{4\pi^{1/3}}{3}>0.\end{eqnarray*} On the other hand, in this case~$\phi_1(x)=\sin x$, $\lambda_1(\Omega)=1$, and \begin{align*} \hphantom{=} & \,\lambda_1(\Omega)\int_\Omega \phi_1(x)^2\,dx- \int_\Omega\sigma(x)\phi_1(x)^2\,dx\\ = & \,\int_0^\pi \sin^2 x\,dx -\varepsilon^{-29/10} \int_0^\varepsilon \sin^2 x\,dx \\ = & \,\frac{\pi}{2} -\frac{\varepsilon^{-29/10}}{2}\big( \varepsilon-\sin\varepsilon\cos\varepsilon\big)\,. \end{align*} Thus, since, by a Taylor expansion, $$ \sin\varepsilon\cos\varepsilon = (\varepsilon +O(\varepsilon^3))(1+O(\varepsilon^2)) = \varepsilon +O(\varepsilon^3)$$ it follows that $$ \varepsilon^{-29/10} \big( \varepsilon-\sin\varepsilon\cos\varepsilon\big) = O(\varepsilon^{1/10}) $$ and so $$ \lambda_1(\Omega)\int_\Omega \phi_1(x)^2\,dx- \int_\Omega\sigma(x)\phi_1(x)^2\,dx =\frac{\pi}{2}-O(\varepsilon^{1/10}) >0,$$ which shows that~$\phi_1$ does not satisfy~\eqref{hp_exist}. }\end{remark} The fractional equivalent of Lemma \ref{COMP} is stated in the following lemma. We omit its proof, which would be a repetition of the proof of Lemma \ref{COMP}. \begin{lemma}\label{lem:comp_frac} Let $\phi_s\in H^s_0(\Omega)$ a minimizer for the Rayleigh quotient \eqref{CP}. If \[ C_\sharp^{-1}(s,\Omega)\|\phi_s\|^2_{L^2(\Omega)}<\int_\Omega\sigma(x)\phi_s(x)^2\,dx\,, \] then the reverse fractional Poincar\'e-Sobolev condition with parameter $s$ in \eqref{poscond_frac} is satisfied. \end{lemma} \begin{remark}\label{rmk:mangime}{\rm As we noticed in Remark \ref{0oUU} for the local analog of the reverse fractional Poincar\'e-Sobolev condition, if the resource $\sigma$ is sufficiently abundant in the domain $\Omega$, then condition \eqref{poscond_frac} is satisfied. } \end{remark} The reverse Poincar\'e-Sobolev condition in~\eqref{poscond} is a useful tool to obtain non-trivial solution of the local stationary equation, as stated in the following result. \begin{theorem}\label{thm:sta_sol} Consider $s\in(0,1]$ and a bounded function $\sigma:\Omega\to[0,+\infty)$ satisfying either \begin{itemize} \item[{\rm (1)}] the reverse Poincar\'e-Sobolev condition in \eqref{poscond} (when $s=1$) or \item[{\rm (2)}] the reverse fractional Poincar\'e-Sobolev condition in \eqref{poscond_frac} (when $s<1$). \end{itemize} Then there exists a non-trivial, non-negative function $\tilde u\in H^s_0(\Omega)$ (i.e.~$\tilde u\ge 0$ and $\tilde u\not\equiv 0$) satisfying \begin{equation}\label{eig0} \left\{ \begin{array}{ll} (-\Delta)^s\tilde u(x)=(\sigma(x)-\tilde u(x))\tilde u(x)\quad & \text{in }\Omega\\ \tilde u=0 & \text{on }\partial\Omega\,. \end{array} \right. \end{equation} \end{theorem} \begin{proof} The proof is a minimization argument, based on coercivity and energy methods. Though the idea of the proof is rather standard, see e.g. \cite{Can_Co}, we provide the necessary details for the facility of the reader. The proof also gives us the possibility of a comparison between local and nonlocal case. First, we prove the theorem for the local case $s=1$ and then we provide the suitable changes in order to prove the nonlocal case, too. \begin{itemize} \item[(1)] Consider the following energy \[ E(u):=\int_\Omega \frac{|\nabla u|^2}{2}-\sigma\frac{u^2}{2}+\frac{|u|^3}{3} \] defined on $H^1_0(\Omega)$. Notice that the Euler-Lagrange equation for $E$ gives \[ -\Delta u=(\sigma-|u|)u\,. \] We show that the energy $E$ is coercive in $H^1_0(\Omega)$, that is \begin{equation}\label{COE} E(u)\to +\infty\ \text{as }\|u\|_{H^1_0(\Omega)}\to+\infty\,. \end{equation} For this, we use the Young inequality with exponents~$3/2$ and~$3$ to see that, for any~$a$, $b\ge0$, \[ ab\le \frac 23 a^{\frac 32}+\frac 13 b^3\,. \] In particular, taking~$a:=2^{-2/3} u^2 $ and~$b:=2^{-1/3} \|\sigma\|_{L^\infty(\Omega)}$, we obtain that $$ \sigma\frac{u^2}{2} \le \|\sigma\|_{L^\infty(\Omega)}\frac{u^2}{2} \le \frac{|u|^3}{3}+\frac{ \|\sigma\|_{L^\infty(\Omega)}^{3} }{6},$$ hence $$ -\sigma\frac{u^2}{2} +\frac{|u|^3}{3} \ge -c_0,$$ for some~$c_0>0$ independent of~$u$. Accordingly, $$ E(u)\ge \int_\Omega \frac{|\nabla u(x)|^2}{2}\,dx-c_0|\Omega|,$$ that establishes~\eqref{COE}. As a consequence of~\eqref{COE}, we have that~$E$ has a global minimum $\overline u\in H^1_0(\Omega)$, satisfying \[ -\Delta\overline u=(\sigma-|\overline u|)\overline u\,. \] Since $\overline u$ is a minimum, then $\tilde u:=|\overline u|$ is a minimum too, because $E(\overline u)=E(|\overline u|)$. Thus we can consider a non-negative function $\tilde u\ge 0$ satisfying \[ -\Delta\tilde u=(\sigma-\tilde u)\tilde u\,. \] We conclude the proof by showing that condition \eqref{poscond} guarantees that $E(\tilde u)<0$ and then $\tilde u\not\equiv 0$. By~\eqref{poscond}, there exists a function $u\in H^1_0(\Omega)$ with \[\int_\Omega\sigma(x)u(x)^2\,dx-\int_\Omega|\nabla u|^2\,dx>0\,.\] By density, we can suppose that~$u\in C^\infty_0(\Omega)$. For every $\varepsilon>0$ we can rewrite the energy $E$ evaluated at $\varepsilon u$ as \[ E(\varepsilon u)=\varepsilon^2\left(\int_\Omega\frac{|\nabla u|^2}{2}-\sigma\frac{u^2}{2}+\varepsilon\frac{u^3}{3}\right)\,, \] hence $E(\tilde u)\le E(\varepsilon u)<0$ provided $\varepsilon$ is small enough. \item[(2)] The energy \[ E_s(v):=\int_{\mathbb{R}^n\times\mathbb{R}^n}\frac{|v(x)-v(y)|^2}{|x-y|^{n+2s}}\,dx\,dy-\int_\Omega\sigma\frac{v^2}{2}+\frac{|v|^3}{3} \] is well defined\footnote{For the sake of simplicity, we omit the multiplicative normalization constants.} and coercive in $H^s_0(\Omega)$ and the proof is the same as in the local case. Moreover, the Euler-Lagrange equation for $E_s$ is \[ (-\Delta)^sv=(\sigma-|v|)v\,. \] Consequently, $E_s$ has a global minimum $\overline v$ and $\tilde v=|\overline v|$ is a minimum, too, because \begin{align*} E_s(\tilde v) & =\int_{\mathbb{R}^n\times\mathbb{R}^n}\frac{||\overline v(x)|-|\overline v(y)||^2}{|x-y|^{n+2s}}\,dx\,dy-\int_\Omega\sigma\frac{|\overline v|^2}{2}+\frac{|\overline v|^3}{3}\\ & \le \int_{\mathbb{R}^n\times\mathbb{R}^n}\frac{|\overline v(x)-\overline v(y)|^2}{|x-y|^{n+2s}}\,dx\,dy-\int_\Omega\sigma\frac{\overline v^2}{2}+\frac{|\overline v|^3}{3}=E_s(\overline v)\,. \end{align*} As we proved in part (1), condition \eqref{poscond_frac} ensures that $E_s(\tilde v)<0$ and thus $\tilde v\not\equiv 0$.\qedhere \end{itemize} \end{proof} The result in Theorem~\ref{thm:sta_sol} and several variations of it are rather of classical flavor: with slightly different assumptions on $\sigma$ (take, for instance, $\sigma>0$ in $\Omega$) and a branching condition matching \eqref{poscond} for the existence of non-trivial solutions, it can be found in \cite{Am_Pro} and in \cite{Be}. In view of Theorem \ref{thm:sta_sol} and comparing with Remark \ref{0oUU} and Remark \ref{rmk:mangime}, we obtain the fact that the richer the environment is, the easier the survival of a population. This fact, which matches the intuition, finds a detailed quantification in the following observation. \begin{remark}{\rm Since the reverse (fractional) Poincar\'e-Sobolev inequalities \eqref{poscond} and \eqref{poscond_frac} seem to play a symmetric role in Theorem \ref{thm:sta_sol}, let us compare them more carefully. In some sense, the Dirichlet boundary conditions being equal for \eqref{eig0} when $s=1$ and when $s<1$, the nonlocal population has an advantage when the diameter of the domain tends to $0$. More precisely, as we remarked in Remark \ref{0oUU}, a resource $\sigma$ needs to be sufficiently large in order to meet \eqref{hp_exist}, which implies \eqref{poscond}. How large should $\sigma$ be is proportional to the first eigenvalue of the Laplacian $\lambda_1(\Omega)=C_\sharp(1,\Omega)^{-1}$. Now, if $\Omega=B_r$, we observe that \[ \lambda_1(B_r)=\frac{1}{r^2C_\sharp(1,B_1)}\longrightarrow +\infty\qquad\text{as }r\to 0\,, \] that is, the environment becomes more and more lethal for the local population, because \eqref{hp_exist} is very difficult to satisfy. The situation is milder for the nonlocal population, because, thanks to Remark \ref{rmk:rs} \[ \frac{1}{\lambda_1(B_r)C_\sharp(s,B_r)}=\frac{r^2C_\sharp(1,B_1)}{r^{2s}C_\sharp(s,B_1)}\longrightarrow 0\qquad\text{as }r\to 0\,. \] This means that the criticality of the domain size is slower to prevail on a nonlocal population. In this sense, the lethal property of the boundary (as described by the homogeneous Dirichlet datum outside the domain) has a different influence on local and nonlocal populations, depending on the scale of the domain. For instance, for small balls (when~$1/r\gg 1/r^s$), nonlocal populations are favored. Conversely, for large balls (when~$1/r\ll 1/r^s$), local populations are favored (heuristically, because the local diffusion has little chance to reach the deadly boundary). In the perspective of an applied analysis, one can find explicit fractional Sobolev constants in \cite{Co_Ta}. }\end{remark} In the remaining part of this section, we focus on the local case, that is, on the properties of a stationary point for the system \eqref{syst} of type $(\tilde u,0)$. This is motivated by the evolutionary point of view of studying the effect of the advent of a new population (this approach is indeed often adopted in the literature, see e.g. \cite{Doc_Hut_Mi_Per}, \cite{Ha}). Of course, we think that it would be also an interesting problem to investigate the cases of a dominant nonlocal population (corresponding to a stationary point of type $(0,\tilde v)$) and of the possible coexistence of two different populations, namely provide concrete assumptions on the resources and the domains that allow the existence of equilibria~$(u^*,v^*)$ with both~$u^*$ and~$v^*$ nontrivial. Of course, an easier approach to the existence of mixed states~$(u^*,v^*)$ may be taken by studying the case of different resources in the two logistic equations, but we do not address this problem in the present paper. \begin{remark}{\rm As a byproduct of the proof of Theorem~\ref{thm:sta_sol}, we have that the solution found is an energy minimizer. That is, if~$\tilde u$ is the solution obtained in Theorem~\ref{thm:sta_sol}, then~$E(\tilde u+\varepsilon u)\ge E(\tilde u)$, for any~$u\in H^1_0(\Omega)$. Accordingly, the map $$ \varepsilon \mapsto {\mathcal{E}}(\varepsilon):=E(\tilde u+\varepsilon u)$$ attains its minimum at~$\varepsilon=0$ and therefore \begin{equation}\label{MiM} 0\le {\mathcal{E}}''(0)= \int_\Omega |\nabla u|^2 -\sigma u^2 +2\tilde u u^2\,dx.\end{equation} In particular, the solution is linearly stable, i.e. the second derivative of the energy is a positive quadratic form. }\end{remark} The energy functional is quite useful to capture the stability of the pure states, such as the ones of the type~$(\tilde u,0)$. For related approaches, also based on the linearization of semilinear systems, see e.g.~\cite{Can_Co}. Also, it is useful to recall that the population~$\tilde u$ cannot beat the resource~$\sigma$, as stated in the following result: \begin{lemma}\label{TAP} Consider a bounded function $\sigma:\Omega\to[0,+\infty)$ and a non-negative solution~$\tilde u\in H^1_0(\Omega)$ of~\eqref{eig0}. Then~$\tilde u(x)\le \|\sigma\|_{L^\infty(\Omega)}$, for any~$x\in\Omega$. \end{lemma} \begin{proof} Let~$\Theta:= \|\sigma\|_{L^\infty(\Omega)}$. We test equation~\eqref{eig0} against~$v:=\max \{ \tilde u-\Theta,\,0\}$ and we see that $$ \int_{\Omega} |\nabla v|^2 =\int_\Omega \nabla \tilde u \cdot \nabla v=\int_\Omega (\sigma-\tilde u)\tilde u v =\int_{\{\tilde u\ge\Theta\}} (\sigma-\tilde u)\tilde u(\tilde u-\Theta) \,.$$ Now observe that, in~$\{\tilde u\ge\Theta\}$, we have~$\sigma-\tilde u\le \Theta-\tilde u\le0$, which shows that $$ \int_{\Omega} |\nabla v|^2\le 0.$$ Accordingly, $v$ vanishes identically and so~$\tilde u\leq\Theta$. \end{proof} \begin{corollary}\label{0dj789js} Consider a bounded function $\sigma:\Omega\to[0,+\infty)$ and a non-negative solution~$\tilde u\in H^1_0(\Omega)$ of~\eqref{eig0}. Then~$\tilde u$ is continuous inside~$\Omega$. \end{corollary} \begin{proof} One defines~$\Theta:=\|\sigma\|_{L^\infty(\Omega)}$ and tests equation~\eqref{eig0} against~$v:=\max \{ \tilde u-\Theta,\,0\}$ to obtain the desired result (see e.g. \cite{Can_Co}). \end{proof} {F}rom now on, we focus on the stability of the system around the stationary point $(\tilde u,0)$, where the distribution of resources $\sigma$ satisfies \eqref{poscond} and $\tilde u\in H^1_0(\Omega)$ is a non-trivial, non-negative solution of \eqref{eig0}. The linearization of the system \eqref{syst} at $(\tilde u, 0)$ gives, as a result, the linear operator \begin{equation}\label{lin_syst}\begin{split} L_{(\tilde u,0)}(u,v)\,&=\left( \begin{array}{cc} \Delta +(\sigma-2\tilde u) & -\tilde u \\ 0 & -(-\Delta)^s +(\sigma-\tilde u) \end{array} \right)\left( \begin{array}{c} u \\ v \end{array} \right)\\ &= \left( \begin{array}{c} \Delta u+(\sigma-2\tilde u)u -\tilde u v \\ -(-\Delta)^s v +(\sigma-\tilde u)v \end{array} \right) \,,\end{split} \end{equation} for any~$(u,v)\in H^1_0(\Omega)\times H^s_0(\Omega)$. The associated quadratic form, with respect to the duality in~$H^1_0(\Omega)\times H^s_0(\Omega)$, is \begin{equation}\label{Q Def} Q_{(\tilde u,0)}(u,v) =-[u]_{H^1(\mathbb{R}^n)}^2 -[v]_{H^s(\mathbb{R}^n)}^2 +\int_\Omega (\sigma-2\tilde u)u^2 -\tilde u uv +(\sigma-\tilde u)v^2 \,dx\,,\end{equation} for any~$(u,v)\in H^1_0(\Omega)\times H^s_0(\Omega)$. {F}rom the triangular form of~$L_{(\tilde u,0)}$, the relevant information is concentrated on the signs of the principal eigenvalues of the pseudodifferential operators on the diagonal of~\eqref{lin_syst}. In this spirit, we first point out that the direction~$(\tilde u,0)$ is always linearly stable. This is pretty obvious if we think at the biological model, since~$(\tilde u,0)$ is the stationary configuration of just one population, and slightly and proportionally modifying the density of this population without letting any new population come into the environment should not drive the system too far from the previous equilibrium. The formal statement goes as follows: \begin{lemma}\label{9dv77} As long as there exists a solution $\tilde u$ for \eqref{eig0}, we have that $$ Q_{(\tilde u,0)}(\tilde u,0)<0.$$ \end{lemma} \begin{proof} By testing~\eqref{eig0} against~$\tilde u$, we obtain that $$ [\tilde u]_{H^1(\mathbb{R}^n)}^2=\int_\Omega(\sigma-\tilde u)^2\tilde u^2\,dx.$$ As a consequence, $$ Q_{(\tilde u,0)}(\tilde u,0)= -[\tilde u]_{H^1(\mathbb{R}^n)}^2 +\int_\Omega (\sigma-2\tilde u)\tilde u^2\,dx = -\int_\Omega \tilde u^3\,dx.$$ The latter term is strictly negative, thanks to Theorem~\ref{thm:sta_sol} and so we obtain the desired result. \end{proof} We point out that Lemma~\ref{9dv77} is a particular case of a more general stability result. Namely, the stationary configuration~$(\tilde u,0)$, which corresponds to the local population colonizing the whole of the environment, is also linearly stable with respect to all the perturbations in which only the the density of the local species varies (i.e. the possible source of instability in this setting may only come from the advent of a nonlocal population). The formal result goes as follows: \begin{lemma} As long as there exists a solution $\tilde u$ for \eqref{eig0}, we have that $$ Q_{(\tilde u,0)}(u,0)\le0$$ for any~$u\in H^1_0(\Omega)$. \end{lemma} This lemma is well-known, due to the variational characterization of the associated eigenvalue problem. We include a proof for the convenience of the reader. \begin{proof} {F}rom~\eqref{Q Def}, $$ Q_{(\tilde u,0)}(u,0) =-\int_\Omega |\nabla u|^2\,dx +\int_\Omega (\sigma-2\tilde u)u^2 \,dx,$$ hence the claim follows from~\eqref{MiM}. \end{proof} In view of Lemma~\ref{9dv77}, we obtain that a good way to detect the possible linear instability of the point $(\tilde u, 0)$ is to rely upon the perturbations of the form~$(0,v)$, i.e. in the possible advent of a new population with different diffusive strategy. The purpose of the next section is therefore to understand when it is possible to obtain that $$ Q_{(\tilde u,0)}(0,v_\star)>0,$$ for a suitable choice of~$v_\star\in H^s_0(\Omega)$. \section{Linear instability}\label{sec:ciccia} Our aim in this section is to enlighten the connection between the distribution of resources $\sigma$ and the possible instability of the system, which would suggest some convenience in a nonlocal dispersal strategy of the second species $v$. For this, we introduce the following notation: \begin{definition}\label{def:sscat}{\rm Let $\sigma:\Omega\to[0,+\infty)$ satisfy the reverse Poincar\'e-Sobolev condition of Definition \ref{def:nontriv}. Let $\tilde u\ge 0$ be a non-trivial solution of the non-linear equation \eqref{eig0}, provided by Theorem \ref{thm:sta_sol}. We say that the pair~$(\sigma,\tilde u)$ is mismatched in~$\Omega$ if there exists~$x_0\in\Omega$ and~$r>0$ with~$B_r(x_0)\subset\Omega$ and \begin{equation}\label{cond2} \inf_{x\in B_r(x_0)}\big( \sigma(x)-\tilde u(x)\big) >\frac{1}{C_\sharp(s,B_r(x_0))}\,. \end{equation} In this formula, the constant~$C_\sharp(s,B_r(x_0))$ is the sharp fractional Poincar\'e-Sobolev constant with respect to the ball~$B_r(x_0)$ provided by Theorem \ref{thm_sobineq}. }\end{definition} Roughly speaking, condition~\eqref{cond2} says that the solution~$\tilde u$ is not capable to exhaust the whole of the resource~$\sigma$ in the whole of the domain: that is, at least, in the region~$B_r(x_0)$, the population does not manage to take advantage of all the resource at its disposal and there is at least a quantity~$C_\sharp(s,\Omega)^{-1}r^{-2s}$ as a leftover. In Subsection 3.2 we will see an example of mismatching $(\sigma,\tilde u)$ and it will be clear in that case that the mismatch condition depends basically on $\sigma$ only. In our setting, condition~\eqref{cond2} is sufficient to ensure linear instability, as given by the following result. \begin{proposition}\label{thm:pos_eig} If the mismatch condition in~\eqref{cond2} is satisfied, then there exists~$v_\star\in H^s_0(\Omega)$ such that~$ Q_{(\tilde u,0)}(0,v_\star)>0$. \end{proposition} \begin{proof} By~\eqref{CP} and~\eqref{cond2}, we know that there exists~$x_0\in\Omega$ and~$r>0$ such that \begin{equation}\label{BB} B_r(x_0)\subset\Omega\end{equation} and $$ \inf_{x\in B_r(x_0)} \big( \sigma(x)-\tilde u(x)\big)>\frac{1}{ C_\sharp(s,B_r(x_0))} = \inf_{\substack{\phi\in H^s_0(B_r(x_0)) \\ \phi\not\equiv0}} \frac{[\phi]_{H^s(\mathbb{R}^n)}^2}{\|\phi\|_{L^{2}(B_r(x_0))}^2}.$$ As a consequence, there exists~$v_\star\in H^s_0(B_r(x_0))$ such that~$v_\star\not\equiv0$ and \begin{equation}\label{BB2} \inf_{x\in B_r(x_0)} \big( \sigma(x)-\tilde u(x)\big)> \frac{[v_\star]_{H^s(\mathbb{R}^n)}^2}{\|v_\star\|_{L^{2}(B_r(x_0))}^2}.\end{equation} Now notice that~$\|v_\star\|_{L^{2}(B_r(x_0))}= \|v_\star\|_{L^{2}(\Omega)}$ and~$v_\star$ vanishes a.e. outside~$\Omega$, thanks to~\eqref{BB}. This gives that~$v_\star\in H^s_0(\Omega)$. Moreover, by~\eqref{Q Def} and~\eqref{BB2}, \begin{align*} Q_{(\tilde u,0)}(0,v_\star) & =-[v_\star]_{H^s(\mathbb{R}^n)}^2+\int_{B_r(x_0)} (\sigma-\tilde u)v_\star^2 \,dx\\ & >-[v_\star]_{H^s(\mathbb{R}^n)}^2 + \frac{[v_\star]_{H^s(\mathbb{R}^n)}^2}{\|v_\star\|_{L^{2}(B_r(x_0))}^2} \,\int_{B_r(x_0)} v_\star^2 \,dx=0\,, \end{align*} which gives the desired result. \end{proof} \begin{remark}{\rm Proposition~\ref{thm:pos_eig} proves the linear instability of the point $(\tilde u, 0)$ with respect to perturbation of the type~$(0,v_\star)$ (compare with the theory of Monotone Dynamical Systems in \cite{Smith} or see \cite{Can_Co}). Indeed, \[ Q_{(\tilde u,0)}(0,v_\star)=\|v_\star\|_{L^2}^2\lambda(\Omega)\,, \] where $\lambda(\Omega)$ is the principal eigenvalue of the linear pseudodifferential operator $-(-\Delta)^s+(\sigma-\tilde u)$ (see the characterization of the principal eigenvalue by Rayleigh quotient in \cite{Be_Ro_Ro}), thus $L_{(\tilde u, 0)}$ has a negative eigenvalue and a positive one and the stability of a stationary state is determined by the spectrum of the linearization (for this general principle see \cite{Mo}). Heuristically, this can be understood as follows: by formally plugging~$(u,v)=(\tilde u, 0)+ \varepsilon (0,v_\star)+o(\varepsilon)$ into~\eqref{90} we obtain $$ v_t= -(-\Delta)^s v+(\sigma-(u+v))v =-\varepsilon(-\Delta)^s v_\star +\varepsilon (\sigma-\tilde u-\varepsilon v_\star) v_\star+o(\varepsilon).$$ Thus, since~$v_t=\varepsilon\partial_t v_\star+o(\varepsilon)$, we formally obtain $$ \partial_t v_\star = -(-\Delta)^s v_\star +(\sigma-\tilde u) v_\star+o(1).$$ Hence $$ \partial_t \|v_\star\|_{L^2(\mathbb{R}^n)}^2 = 2\int_{\mathbb{R}^n} v_\star \partial_t v_\star\,dx = Q_{(\tilde u,0)}(0,v_\star) +o(1),$$ which is positive by Proposition~\ref{thm:pos_eig}. Therefore,Proposition~\ref{thm:pos_eig} states that the size of the new population (measured in the $L^2$-norm) has chances to increase (at least for short times). }\end{remark} These type of linearization arguments in the neighborhood of equilibria that correspond to only one biological species are widely used in Adaptive Dynamics, see for instance \cite{Diek}, \cite{Ha}, \cite{Hut_Mar_Mi_Vic} and the references therein. The rest of this section is devoted to show that the assumptions of Proposition \ref{thm:pos_eig} hold for some $\sigma:\Omega\to\mathbb{R}$. \subsection{Rescaling arguments}\label{subsec:ra} We propose here a rather simple rescaling argument which gives the existence of a domain $\Omega_\lambda$ and a distribution of resources $\sigma_\lambda$ satisfying the assumptions in Proposition \ref{thm:pos_eig}. The main drawback of this argument is the fact that the domain $\Omega_\lambda$ changes with the parameter. On the other side, it is immediately evident that the resource $\sigma_\lambda$ leads to instability at $(\tilde u_\lambda,0)$ when it starts being sparse and far from being homogeneous. We consider here a smooth function $\sigma:\Omega\to[0,+\infty)$ satisfying the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} (recall Remark~\ref{0oUU}) and the corresponding stationary solution $\tilde u$ given by Theorem \ref{thm:sta_sol}. We see that, in this case, the population~$\tilde u$ does not exhaust the resource~$\sigma$ in the whole of~$\Omega$. More precisely, we have: \begin{lemma}\label{CON90} Let~$\sigma:\Omega\to[0,+\infty)$ be a smooth function satisfying the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} and let~$\tilde u$ be the corresponding stationary solution given by Theorem \ref{thm:sta_sol}. Then there exist~$x_0\in\Omega$, $r>0$ and~$c_0>0$ such that~$B_r(x_0)\subset\Omega$ and $$ \sigma(x)-u(x)\ge c_0$$ for any~$x\in B_r(x_0)$. \end{lemma} \begin{proof} By testing~\eqref{eig0} against~$\tilde u$, we obtain that $$ 0<\int_\Omega |\nabla \tilde u(x)|^2\,dx= \int_\Omega (\sigma(x)-\tilde u(x))\tilde u^2(x)\,dx.$$ This implies that there exists~$x_0\in\Omega$ such that~$ \sigma(x_0)-\tilde u(x_0)>0$. The desired result follows from the continuity of~$\tilde u$ given by Corollary~\ref{0dj789js}. \end{proof} In the notation of Lemma~\ref{CON90}, by possibly translating the domain, we can assume that $x_0=0$, and so \begin{equation}\label{0cty}{\mbox{$\sigma-\tilde u\ge c_0>0$ in $B_r$. }}\end{equation} Then we consider the family of rescaled domains \[ \Omega_\lambda:=\{\lambda^{-\frac 12}y:\,y\in\Omega\} \] and rescaled functions \[ \sigma_\lambda(x):=\lambda\sigma(\sqrt\lambda x)\,,\quad\forall\,x\in\Omega_\lambda \] with $\lambda\ge 1$. Then \[ \tilde u_\lambda(x):=\lambda\tilde u(\sqrt\lambda x)\,,\quad\forall\,x\in\Omega_\lambda \] is a positive stationary solution for the equation \eqref{eig0} with resource $\sigma_\lambda$, since \[ \left(\Delta\tilde u_\lambda+(\sigma_\lambda-\tilde u_\lambda)\tilde u_\lambda\right)(x)=\left(\lambda^2\Delta\tilde u+\lambda^2(\sigma-\tilde u)\tilde u\right)(\sqrt\lambda x)=0\,,\quad\forall\,x\in\Omega_\lambda\,. \] \begin{proposition}\label{thm:ex_source} There exists $\Lambda\ge 1$ such that, for every $\lambda\ge\Lambda$, the pair $(\sigma_\lambda,\tilde u_\lambda)$ is mismatched in the corresponding domain $\Omega_\lambda$, according to Definition~\ref{def:sscat}. \end{proposition} \begin{proof} We take~$r_\lambda:=\lambda^{-\frac 12}r$. By~\eqref{0cty}, \begin{equation}\label{bfd5e} \begin{split} & \inf_{|x|<r_\lambda} \big(\sigma_\lambda(x)-\tilde u_\lambda(x)\big) = \inf_{|x|<\lambda^{-\frac 12}r } \lambda\, \big(\sigma(\sqrt{\lambda}x)-\tilde u_\lambda(\sqrt{\lambda}x)\big) \\ &\qquad= \inf_{|y|<r } \lambda\, \big(\sigma(y)-\tilde u_\lambda(y)\big)\ge c_0\lambda. \end{split}\end{equation} On the other hand, by Remark~\ref{rmk:rs}, $$ C_\sharp(s,B_{r_\lambda})=r_\lambda^{2s} C_\sharp(s,B_1) =\lambda^{-2}r^{2s} C_\sharp(s,B_1).$$ By comparing this with~\eqref{bfd5e}, we conclude that $$ \inf_{x\in B_{r_\lambda}} \big(\sigma_\lambda(x)-\tilde u_\lambda(x)\big) \ge c_0\lambda> \frac{\lambda^{s}}{r^{2s} C_\sharp(s,B_1)}= \frac{1}{C_\sharp(s,B_{r_\lambda}(x_0))},$$ provided that \begin{equation*} \lambda>\left(c_0\,r^{2s}\,C_\sharp(s,B_1)\right)^{-\frac{1}{1-s}}\,. \qedhere \end{equation*} \end{proof} {F}rom Propositions~\ref{thm:ex_source} and~\ref{thm:pos_eig}, we obtain that there exists~$v_{\star,\lambda}\in H^s_0(\Omega_\lambda)$ such that~$ Q_{(\tilde u_\lambda,0)}(0,v_{\star,\lambda})>0$, as long as $\lambda$ is large enough, hence~$(\tilde u_\lambda,0)$ is linearly unstable. This is a first example that shows the validity of Theorem~\ref{MAIN} (a different one will be constructed in the remaining part of this paper). It is worth pointing out that the condition that~$\lambda$ is large translates into the fact that the domain~$\Omega_\lambda$ is small and the resource~$\sigma_\lambda$ is very unevenly distributed. In some sense, the nonlocal diffusion may allow the population to take advantage of the small region in which the resource is abundant, while a less diffusive population may starve in the portion of the environment with limited resource. \subsection{Branching arguments}\label{subsec:cpt} In this subsection we focus on a particular family of distributions, indeed we assume $B_r(x_0)\subset\Omega$ and \[ \sigma_\tau(x):=\tau \chi_{B_r(x_0)}(x)=\left\{\begin{array}{lr} \tau \quad & x\in B_r(x_0)\\ 0 & x\notin B_r(x_0) \end{array} \right. \] We show that there exist $\tau,r>0$ such that the assumptions of Proposition \ref{thm:pos_eig} hold. First of all we have to deal with with Definition \ref{def:nontriv}, which located a branching point for solutions of \eqref{eig0}. For this, for any~$\tau\in\mathbb{R}$, $x_0\in\mathbb{R}^n$, $r>0$, such that~$B_r(x_0)\subset\Omega$, we introduce the quantity \begin{equation}\label{RE} e(\tau,x_0,r):=\sup_{\substack{u\in H^1_0(\Omega) \\ \|u\|_{L^2(\Omega)}=1}} \tau\int_{B_r(x_0)}u^2 - \int_\Omega|\nabla u|^2\,. \end{equation} We observe that if~$\tau\le 0$ then obviously~$ e(\tau,x_0,r)\le 0$. Thus we use the following notation. \begin{definition}\label{def:ciobar}{\rm We denote \[ \underline\tau(x_0,r):=\sup\left\{\tau\in\mathbb{R}\;:\; e(\tau,x_0,r)\le0 \right\}\,. \] }\end{definition} Now we discuss some basic properties of the quantities that we have just defined. \begin{lemma}\label{finite} The quantity introduced in Definition~\ref{def:ciobar} is finite, namely \[\underline\tau(x_0,r)\in [0,+\infty)\,.\] \end{lemma} \begin{proof} Let~$\phi\in C^\infty_0(B_r)$ with~$\|\phi\|_{L^2(B_r)}=1$, and let~$u(x):=\phi(x-x_0)$. Then~$\|u\|_{L^2(\Omega)}= \|u\|_{L^2(B_r(x_0))}= \|\phi\|_{L^2(B_r)}=1$, and $$ e(\tau,x_0,r)\ge \tau\int_{B_r(x_0)}u^2 - \int_{\Omega}|\nabla u|^2 =\tau- \int_{B_r}|\nabla\phi|^2 >0$$ provided that~$\tau>\int_{B_r}|\nabla\phi|^2$. \end{proof} \begin{lemma}\label{est} For any~$\tau_1\le \tau_2$ we have that $$ e(\tau_2,x_0,r)-e(\tau_1,x_0,r) \in [0,\tau_2-\tau_1].$$ \end{lemma} \begin{proof} Fix~$\varepsilon>0$. For any~$i\in\{1,2\}$, there exists~$u_{(i,\varepsilon)}\in H^1_0(\Omega)$, with $\|u_{(i,\varepsilon)}\|_{L^2(\Omega)}=1$ such that $$ e(\tau_i,x_0,r)\le \varepsilon+ \tau_i\int_{B_r(x_0)}u_{(i,\varepsilon)}^2 - \int_\Omega|\nabla u_{(i,\varepsilon)}|^2.$$ Therefore \begin{eqnarray*} e(\tau_2,x_0,r)-e(\tau_1,x_0,r) &\ge& \tau_2\int_{B_r(x_0)}u_{(1,\varepsilon)}^2 - \int_\Omega|\nabla u_{(1,\varepsilon)}|^2 -e(\tau_1,x_0,r) \\ &\ge& \tau_1\int_{B_r(x_0)}u_{(1,\varepsilon)}^2 - \int_\Omega|\nabla u_{(1,\varepsilon)}|^2 -e(\tau_1,x_0,r) \\ &\ge& -\varepsilon, \end{eqnarray*} and \begin{eqnarray*} e(\tau_1,x_0,r)-e(\tau_2,x_0,r)&\ge& \tau_1\int_{B_r(x_0)}u_{(2,\varepsilon)}^2 - \int_\Omega|\nabla u_{(2,\varepsilon)}|^2 -e(\tau_2,x_0,r) \\ &\ge& (\tau_1-\tau_2) \int_{B_r(x_0)}u_{(2,\varepsilon)}^2-\varepsilon \\ &\ge& -(\tau_2-\tau_1)\int_{\Omega}u_{(2,\varepsilon)}^2-\varepsilon \\ &=& -(\tau_2-\tau_1)-\varepsilon. \end{eqnarray*} The desired result now follows by taking~$\varepsilon$ as small as we wish. \end{proof} \begin{corollary}\label{cor e tau} If $\tau\downarrow \underline\tau(x_0,r)$, then~$e(\tau,x_0,r)\to0$. \end{corollary} \begin{proof} Suppose not, i.e. there exists a sequence \begin{equation}\label{PO}\tau_j\ge \underline\tau(x_0,r)\end{equation} with~$\tau_j\to \underline\tau(x_0,r)$ as~$j\to+\infty$, such that \begin{equation}\label{PO2} |e(\tau_j,x_0,r)|\ge a,\end{equation} for some~$a>0$. We claim that \begin{equation}\label{PO2.1} e(\tau_j,x_0,r)\ge a.\end{equation} We prove it by contradiction: if not, by~\eqref{PO2}, we would have that~$e(\tau_j,x_0,r)\le-a$. Thus, we set $$\tau_a:=\underline\tau(x_0,r)+\frac{a}{2}.$$ We notice that~$\tau_a>\underline\tau(x_0,r)$, therefore, by Definition~\ref{def:ciobar}, we have that \[e(\tau_a,x_0,r)>0\,.\] In addition, we have that~$ \tau_a>\tau_j$ if~$j$ is large enough, thus we make use of Lemma~\ref{est} and we obtain that, for large~$j$, $$ 0+a \le e(\tau_a,x_0,r)-e(\tau_j,x_0,r)\le \tau_a -\tau_j.$$ Taking the limit in~$j$, we conclude that $$ a\le \tau_a-\underline\tau(x_0,r)=\frac{a}{2}.$$ This is a contradiction and~\eqref{PO2.1} is established. Also, by Definition~\ref{def:ciobar}, we know that there exists a sequence~$\tilde\tau_j\le \underline\tau(x_0,r)$ with~$\tilde\tau_j\to \underline\tau(x_0,r)$, such that~$e(\tilde\tau_j,x_0,r)\le0$. Accordingly, by~\eqref{PO2.1}, \begin{equation}\label{76} e(\tau_j,x_0,r)-e(\tilde\tau_j,x_0,r)\ge a. \end{equation} Notice that~$\tau_j\ge \underline\tau(x_0,r)\ge \tilde\tau_j$ and $$ \lim_{t\to+\infty} \tau_j-\tilde\tau_j= \underline\tau(x_0,r)-\underline\tau(x_0,r)=0.$$ Thus, by Lemma~\ref{est} $$ \lim_{t\to+\infty} e(\tau_j,x_0,r)-e(\tilde\tau_j,x_0,r) \le \lim_{t\to+\infty} \tau_j-\tilde\tau_j =0.$$ This is in contradiction with~\eqref{76} and so the desired result is proved. \end{proof} Before stating and proving the main theorem of this subsection, we investigate the behavior of $\underline\tau(x_0,r)$ under scaling. \begin{proposition}\label{prop:stima_ciobar} Fix $s'\in (0,1)$. There exists a constant $\tau_*:=\tau_*(s',\Omega)$ such that \[ \underline\tau(x_0,r)\ge r^{-2s'}\tau_*(s',\Omega) \] for every~$x_0\in\Omega$ and~$r>0$ such that~$B_r(x_0)\subset\Omega$. \end{proposition} \begin{proof} We claim that \begin{equation}\label{CLA} \int_{B_r(x_0)}u^2\le c(s',\Omega)\,r^{2s'}\|\nabla u\|^2_{L^2(\Omega)}, \end{equation} for some constant~$c(s',\Omega)>0$. Once~\eqref{CLA} is proved, one can finish the proof of the desired result by arguing as follows. One sets~$\tau_*(s',\Omega):=1/c(s',\Omega)$. Then, for every~$\tau\le r^{-2s'}\tau_*(s',\Omega)$ (i.e. for every~$\tau\le 1/(c(s',\Omega)\,r^{2s'})$), one has that \[ \int_\Omega |\nabla u|^2-\tau \int_{B_r(x_0)} u^2\ge \int_\Omega |\nabla u|^2-\frac{1}{c(s',\Omega)\,r^{2s'}} \int_{B_r(x_0)} u^2\ge 0\,, \] where the latter inequality is a consequence of the claim \eqref{CLA}. This gives that $e(\tau,x_0,r)\ge0$ for any~$\tau\le r^{-2s'}\tau_*(s',\Omega)$, and so, by Definition~\ref{def:ciobar}, we have that~$\underline\tau(x_0,r)\ge r^{-2s'}\tau_*(s',\Omega)$, thus proving the desired result. Due to these observations, it only remains to prove~\eqref{CLA}. To this scope, we observe that, given~$p>2$, by the H\"older inequality with exponents $\frac{p}{2}$ and~$\frac{p}{p-2}$, we have $$ \int_{B_r(x_0)}u^2\le \left(\omega_n r^n\right)^{\frac{p-2}{p}}\|u\|^2_{L^{p}(\Omega)}.$$ Therefore, the claim in~\eqref{CLA} is established if we show that there exists~$p>2$ such that \begin{equation}\label{CLA2} r^{\frac{(p-2)n}{p}} \|u\|^2_{L^{p}(\Omega)} \le C(s',\Omega,p)\, r^{2s'}\,\|\nabla u\|^2_{L^2(\Omega)}, \end{equation} for some~$C(s',\Omega,p)>0$. So, now it only remains to prove~\eqref{CLA2}. To this goal, we deal separately\footnote{The case $n\ge 3$ is simpler because the Sobolev conjugated exponent $2^*=2n/(n-2)$ is not critical. Indeed, in this case the parameter $s'$ does not play much role.} with the cases $n=2$ and $n\ge 3$. We start with $n\ge 3$. In this case, we denote by $p:=\frac{2n}{n-2}>2$ the Sobolev conjugate exponent of 2. Notice that~${\frac{(p-2)n}{p}}=2$ and the Sobolev inequality (see e.g. formula~(7.26) in~\cite{Gil_Tru}) bounds~$\|u\|^2_{L^{p}(\Omega)}$ with~$C(\Omega)\,\|\nabla u\|^2_{L^2(\Omega)}$, for some~$C(\Omega)>0$. Hence, if we denote by~$D_0>0$ the diameter of~$\Omega$, we have that $$ r^{\frac{(p-2)n}{p}} \|u\|^2_{L^{p}(\Omega)} =r^2 \|u\|^2_{L^{p}(\Omega)}\le C_0 r^{2s'} D_0^{2-2s'} \|\nabla u\|^2_{L^2(\Omega)},$$ and estimate~\eqref{CLA2} follows in this case. For the case $n=2$, we observe that $$ \lim_{p\to+\infty} \frac{p-2}{p} = 1> s',$$ so we can choose an even integer~$p=p(s')\in (2,+\infty)$ large enough such that \begin{equation}\label{ciobar_choice_p} \frac{p-2}{p}>s'\,. \end{equation} Also, the critical Sobolev embedding (see e.g. formula~(7.38) in~\cite{Gil_Tru}) yields that \begin{equation}\label{0dhjk} \int_\Omega {\rm exp}\,\left( \frac{|u(x)|}{c_1 \|\nabla u\|_{L^2(\Omega)} }\right)^2\,dx\le c_2\,|\Omega|,\end{equation} for suitable~$c_1$, $c_2>0$. Then, since $$ e^t=\sum_{k=0}^{+\infty} \frac{t^k}{k!}\ge \frac{t^{p/2}}{(p/2)!},$$ we deduce from~\eqref{0dhjk} that $$ \int_\Omega \left( \frac{|u(x)|}{\|\nabla u\|_{L^2(\Omega)} }\right)^p \,dx\le C(\Omega,p),$$ for some~$C(\Omega,p)>0$. Therefore $$ \|u\|^2_{L^{p}(\Omega)}\le C'(\Omega,p)\,\|\nabla u\|_{L^2(\Omega)}^2 ,$$ for some~$C'(\Omega,p)>0$. As a consequence, if~$D_0>0$ is the diameter of~$\Omega$, \begin{align*} r^{\frac{(p-2)n}{p}} \|u\|^2_{L^{p}(\Omega)} & = r^{2\left(\frac{(p-2)}{p}-s'\right)} \,r^{2s'}\,\|u\|^2_{L^{p}(\Omega)}\\ & \le C'(\Omega,p) \,D_0^{2\left(\frac{(p-2)}{p}-s'\right)}\,r^{2s'} \|\nabla u\|_{L^2(\Omega)}^2\,. \end{align*} This completes the proof of~\eqref{CLA2} when~$n=2$. \end{proof} \begin{theorem}\label{thm:percpt} Let~$r$, $\tau>0$. Consider the family of distributions $\sigma_\tau=\tau\chi_{B_r(x_0)}$ and a corresponding family of stationary solutions $\tilde u_\tau\in H^1_0(\Omega)$, that is \[ -\Delta\tilde u_\tau=(\sigma_\tau-\tilde u_\tau)\tilde u_\tau\,. \] If $\tau\downarrow \underline\tau(x_0,r)$, then $\tilde u_\tau\to 0$ uniformly. \end{theorem} \begin{proof} First of all, we notice that \begin{equation}\label{bound+} \tilde u_\tau\le \tau, \end{equation} thanks to Lemma~\ref{TAP}. Now we fix~$\varepsilon\in(0,1)$ and we claim that \begin{equation}\label{L3} \| \tilde u_\tau\|_{L^3(\Omega)}\le\varepsilon, \end{equation} provided that~$\tau$ is close enough to~$\underline\tau(x_0,r)$. To establish this, we test the equation against~$\tilde u_\tau$ itself, and we obtain that $$\int_\Omega| \nabla \tilde u_\tau|^2= \int_\Omega (\sigma_\tau-\tilde u_\tau)\tilde u_\tau^2 =\tau \int_{B_r(x_0)} \tilde u_\tau^2 -\int_\Omega \tilde u_\tau^3,$$ which in turn gives $$ \| \tilde u_\tau\|_{L^3(\Omega)}^3=\int_\Omega \tilde u_\tau^3= \tau \int_{B_r(x_0)} \tilde u_\tau^2-\int_\Omega |\nabla \tilde u_\tau|^2 \le e(\tau,x_0,r),$$ thanks to~\eqref{RE}. This and Corollary~\ref{cor e tau} imply~\eqref{L3}. Now we set~$g(x):= (\sigma_\tau-\tilde u_\tau)\tilde u_\tau$. Notice that~$-\Delta u_\tau=g$ in~$\Omega$ and, by~\eqref{bound+} and Lemma~\ref{finite}, $$ |g|\le(\sigma_\tau+\tilde u_\tau)\,\tilde u_\tau\le 2\tau\,\,\tilde u_\tau\le 2(\underline\tau(x_0,r)+1)\, \tilde u_\tau\le C_0 \,\tilde u_\tau,$$ for some~$C_0>0$ independent of~$\tau$, as long as~$\tau$ is sufficiently close to~$\underline\tau(x_0,r)$. In particular, by~\eqref{bound+} and~\eqref{L3}, \begin{equation}\label{SM} \|g\|_{L^{n+3}(\Omega)}\le C_0\,\left( \int_\Omega \tilde u_\tau^{n+3} \right)^{\frac{1}{n+3}} \le C_1 \,\left( \int_\Omega \tilde u_\tau^{3} \right)^{\frac{1}{n+3}} \le C_1\varepsilon^{\frac{3}{n+3}},\end{equation} for some~$C_1>0$. Moreover, using the H\"older inequality with exponents~$3$ and~$3/2$, $$ \|\tilde u_\tau\|_{L^2(\Omega)}^2 =\int_\Omega \tilde u_\tau^2 \le |\Omega|^{\frac13} \,\left(\int_\Omega \tilde u_\tau^3\right)^{\frac23}=|\Omega|^{\frac13}\, \|\tilde u_\tau\|_{L^3(\Omega)}^2,$$ therefore, recalling~\eqref{L3} and~\eqref{SM}, $$ \|\tilde u_\tau\|_{L^2(\Omega)}+\|g\|_{L^{n+3}(\Omega)} \le |\Omega|^{\frac16} \varepsilon+ C_1\varepsilon^{\frac{3}{n+3}} \le C_2\varepsilon^{\frac{3}{n+3}},$$ for some~$C_2>0$. We combine this information with Theorem~8.15 of~\cite{Gil_Tru} (used here with~$f:=0$ and~$q:=2(n+3)>n$), thus we obtain that $$ \|\tilde u_\tau\|_{L^\infty(\Omega)} \le C \,\big( \|\tilde u_\tau\|_{L^2(\Omega)}+\|g\|_{L^3(\Omega)}\big)\le C\,C_2\,\varepsilon^{\frac{3}{n+3}},$$ for some~$C>0$, as long as~$\tau$ is sufficiently close to~$\underline\tau(x_0,r)$, which is the desired claim. \end{proof} \begin{corollary}\label{cor:exist_sigma} Fix~$s'\in (s,1)$. Let~$r$, $\tau>0$. Assume that \begin{equation} \label{idjjsjsj} r< \left(\frac{ C_\sharp(s,B_1) \,\tau_*(s',\Omega)}{2}\right)^{\frac{1}{2(s'-s)}}, \end{equation} where~$C_\sharp(s,B_1)$ is the Poincar\'e-Sobolev constant in~\eqref{CP} and~$\tau_*(s',\Omega)$ is given by Proposition~\ref{prop:stima_ciobar}. Consider the family of distributions $\sigma_\tau=\tau\chi_{B_r(x_0)}$. Then there exists~$\tau>\underline\tau(x_0,r)$ such that both the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} and the mismatch condition in~\eqref{cond2} are satisfied.\end{corollary} \begin{proof} By taking~$\tau$ large enough, one can easily fulfill~\eqref{hp_exist}. This and Lemma~\ref{COMP} guarantee the reverse Poincar\'e-Sobolev condition in~\eqref{poscond}. In particular, by Theorem~\ref{thm:sta_sol}, we can consider the solution~$\tilde u_\tau$ corresponding to the resource~$\sigma_\tau$. Now we fix \begin{equation}\label{VE} \varepsilon\in\left(0,\,\frac{ \tau_*(s',\Omega)}{2\,r^{2(s'-s)} }\right).\end{equation} Thanks to Theorem \ref{thm:percpt}, we can choose $\tau$ sufficiently close to $\underline\tau(x_0,r)$ such that $\|\tilde u_\tau\|_{L^\infty(\Omega)}\le r^{-2s}\varepsilon$. Therefore, for every $x\in B_r(x_0)$, we have that \[ \sigma_\tau(x)-\tilde u_\tau(x)\ge\sigma_\tau(x)-r^{-2s}\varepsilon>\underline\tau(x_0,r)-r^{-2s}\varepsilon .\] {F}rom this and Proposition \ref{prop:stima_ciobar}, we have that, for every $x\in B_r(x_0)$, \[ \sigma_\tau(x)-\tilde u_\tau(x)\ge r^{-2s'}\tau_*(s',\Omega)-r^{-2s}\varepsilon\,. \] So, recalling~\eqref{VE}, $$ \inf_{x\in x\in B_r(x_0)}\big( \sigma_\tau(x)-\tilde u_\tau(x)\big)>\frac{ r^{-2s'}\tau_*(s',\Omega) }{2}.$$ Thus, from Remark~\ref{rmk:rs} and~\eqref{idjjsjsj}, we obtain \begin{align*} \frac{1}{C_\sharp(s,B_r(x_0))} & = \frac{1}{r^{2s} C_\sharp(s,B_1)} = \frac{r^{-2s'}\,r^{2(s'-s)}}{ C_\sharp(s,B_1)}\\ & <\frac{ r^{-2s'}\, C_\sharp(s,B_1) \,\tau_*(s',\Omega) }{ 2\,C_\sharp(s,B_1)}\\ & =\frac{r^{-2s'}\tau_*(s',\Omega)}{2}<\inf_{x\in B_r(x_0)}\left(\sigma_\tau(x)-\tilde u_\tau(x)\right)\,. \end{align*} This establishes the mismatch condition in~\eqref{cond2}. \end{proof} {F}rom Proposition~\ref{thm:pos_eig} and Corollary~\ref{cor:exist_sigma}, it follows that we have constructed another example for which the equilibrium~$(\tilde u_\tau,0)$ is linearly unstable, confirming again Theorem~\ref{MAIN}. Once again, this example corresponds to a resource that is unevenly spread in the environment, and the nonlocal diffusion may compensate such unbalanced distribution of resource. As a final observation, we would like to stress that most of the techniques discussed in this paper are of quite general nature and can be efficiently exploited in similar problems with different species and different dispersive properties. \section{A purely nonlocal phenomenon}\label{sec:cur} Goal of this section is to rely the stability of stationary points of type $(0,\tilde v)$ with Theorem \ref{NNC} and show how, with our arguments, there is no hope to prove an analogue of Theorem \ref{MAIN} for $(0,\tilde v)$. We include the proof of Theorem~\ref{NNC} and clarify that it is a purely nonlocal feature. The linearization of the system \eqref{syst} at $(0,\tilde v)$ gives \[ L_{(0,\tilde v)}(u,v)=\left(\begin{array}{cc} \Delta+(\sigma-\tilde v) & 0 \\ -\tilde v & -(-\Delta)^s+(\sigma-2\tilde v) \end{array}\right) \left(\begin{array}{c} u\\ v \end{array}\right)\,. \] Thus, any instability result would be a consequence of an inequality of type \[ Q_{(0,\tilde v)}(u_\star,0)=-[u_\star]_{H^1}^2+\int(\sigma-\tilde v)u_\star^2>0\,. \] This means that, if we want to run the same argument that we did in Section \ref{sec:ciccia} for $(\tilde u,0)$, then we have to find an analogue for the mismatch condition \eqref{cond2}. Roughly speaking, we need to know that, at least in certain circumstances, the amount of leftovers of the dominant population $\tilde v$ exceeds a given constant, depending on the size of the domain. But, around a stationary point of type $(0,\tilde v)$, the nonlocal population $\tilde v$ tends to exhaust all the available resource $\sigma$ in the domain $\Omega$. This claim is motivated by Theorem \ref{NNC}, because formula~\eqref{CU4} states that the population $u_\varepsilon$ locally fits with any given resource, up to an arbitrarily small error estimated by~\eqref{CU1}. Of course we are neglecting the Dirichlet boundary condition on $u_\varepsilon$. We are now left with the proof of Theorem \ref{NNC}. \begin{proof}[Proof of Theorem~\ref{NNC}] By Theorem 1.1 in~\cite{Di_Sa_Val}, we know that we can approximate~$\sigma$ by a $s$-harmonic function in~$B_1$: namely, we have that there exist~$R_{\varepsilon,\sigma}>1$ and~$u_\varepsilon \in C^k(B_1)\cap C^s(\mathbb{R}^n)$ satisfying~\eqref{CU2}, \eqref{CU3} and \begin{equation} \label{CU7} \|\sigma-u_\varepsilon\|_{C^k(B_1)}\le\varepsilon. \end{equation} Now we define \begin{equation} \label{CU8} \sigma_\varepsilon:=u_\varepsilon.\end{equation} In this framework, formula~\eqref{CU1} follows from~\eqref{CU7} and~\eqref{CU8}. Moreover, by~\eqref{CU2} and~\eqref{CU8}, $$ (\sigma_\varepsilon(x) - u_\varepsilon(x))\,u_\varepsilon(x)=0= (-\Delta)^s u_\varepsilon(x),$$ for any~$x\in B_1$, which proves~\eqref{CU5}.\end{proof} We stress that Theorem~\ref{NNC} is only due to the nonlocal feature of the equation and it does not have any local counterpart, as pointed out by the next result. \begin{proposition}\label{PCY} Let~$M>0$. Let~$\sigma\in C^2(B_1)$ with \begin{align*} & \sigma(x)\ge M\quad \text{for any }x\in B_{1/16}\hphantom{\setminus B_1}\\ \text{and } \ & \sigma(x)\le 1\quad \text{for any } x\in B_1\setminus B_{1/10}\,. \end{align*} Then, there exists~$M_0>0$ and $\varepsilon>0$ such that, for any~$M\ge M_0$, if $\sigma_\varepsilon \in C^2(B_1)$ satisfies \begin{equation}\label{DU1} \|\sigma-\sigma_\varepsilon\|_{C^2(B_1)}\le\varepsilon \end{equation} and~$u_\varepsilon \in C^2(B_1)$ satisfies \begin{equation}\label{DU5} -\Delta u_\varepsilon(x) =(\sigma_\varepsilon(x) - u_\varepsilon(x))\,u_\varepsilon(x) \quad \text{for any }x\in B_1\,, \end{equation} then \begin{equation}\label{DU2} \|u_\varepsilon-\sigma_\varepsilon\|_{C^2(B_1)}>\varepsilon \end{equation} In particular, the local counterpart of Theorem~\ref{NNC} is false. \end{proposition} \begin{proof} Suppose by contradiction that for every~$\varepsilon>0$ there exist $\sigma_\varepsilon$ and~$u_\varepsilon$ satisfying not only \eqref{DU1} and \eqref{DU5}, but also \[ \|u_\varepsilon-\sigma_\varepsilon\|_{C^2(B_1)}\le\varepsilon\,. \] {F}rom~\eqref{DU1} and~\eqref{DU2}, we know that \begin{equation}\label{90gg} \|u_\varepsilon-\sigma\|_{L^\infty(B_1)}\le \|u_\varepsilon-\sigma\|_{C^2(B_1)}\le 2\varepsilon. \end{equation} As a consequence, \[ \|u_\varepsilon\|_{L^\infty(B_1)} \le 2+\|\sigma\|_{C^2(B_1)}\le C_\sigma,\] for some~$C_\sigma>0$, possibly depending on the fixed resource~$\sigma$. This, \eqref{DU2} and~\eqref{DU5} give that, in~$B_1$, $$ |\Delta u_\varepsilon| \le |\sigma_\varepsilon- u_\varepsilon|\,|u_\varepsilon|\le C_\sigma \,\varepsilon.$$ Thus, the weak Harnack inequality (see e.g. Theorem~8.18 in~\cite{Gil_Tru}) gives that \begin{equation}\label{WH1} \|u_\varepsilon\|_{L^1(B_{1/4})}\le C_1\, \Big( \inf_{B_{1/8}}u_\varepsilon+C_\sigma\,\varepsilon\Big),\end{equation} for some constant~$C_1>0$. Now, by~\eqref{90gg} and~\eqref{DU1}, we see that $u_\varepsilon(x)\ge M-2\varepsilon$ in~$B_{1/16}$ and therefore \begin{equation}\label{WH2} \|u_\varepsilon\|_{L^1(B_{1/4})}\ge \int_{B_{1/16}} u_\varepsilon(x)\,dx \ge C_2\,(M-2\varepsilon),\end{equation} for some constant~$C_2>0$. Similarly, from~\eqref{90gg} and~\eqref{DU1}, we have that~$u_\varepsilon\le 1+2\varepsilon$ in~$B_1\setminus B_{1/10}$ and therefore \begin{equation}\label{WH3} \inf_{B_{1/8}}u_\varepsilon\le 1+2\varepsilon\le 2. \end{equation} By inserting~\eqref{WH2} and~\eqref{WH3} into~\eqref{WH1} we obtain that $$ M-2\varepsilon\le C_3\,( 2+C_\sigma\,\varepsilon),$$ for some~$C_3>0$. Thus, we take~$M\ge M_0:= 3C_3$. This fixes~$\sigma$ and gives that $$ C_3\le M-2C_3\le 2\varepsilon +C_3\,( 2+C_\sigma\,\varepsilon) -2C_3= (2+C_3 C_\sigma)\,\varepsilon.$$ By taking~$\varepsilon$ small, we obtain a contradiction and we complete the proof of Proposition~\ref{PCY}. \end{proof} \section{Further comments on stability and nontrivial solutions}\label{sec:glob} Of course, the results presented in this paper do not aim to exhaust the variety of scenarios offered by the analysis of local and nonlocal competing species. In particular, further investigations about existence and local/global stability of equilibrium solution are desirable, also with the aim of establishing under which conditions local and nonlocal strategies are convenient for the evolution. In particular, while we focused here on the local stability (i.e. whether or not a mutation of strategy turns out to be persistent for small times), the strategic question for biological population in competition for large times is mostly related to global stability. The question of attractors for the global dynamics is related to the regularity properties of the semiflow and to the associated maximum and comparison principles. For this reason, though not directly used in this paper, we present here in detail a general comparison principle for a single fractional equation (the general case of systems deserves a separate analysis, also due to the lack of cooperativeness between biological species, see e.g. formula~(7) in~\cite{Boy}, and we plan future further investigation along the lines of~\cite{HIR1, HIR2}): \begin{lemma}\label{COMP:LEM} Let $T>0$ and consider a locally Lipschitz function $f$. Let $v$ and $w$ be bounded and continuous solutions of \begin{equation}\label{le091} \partial_t v+(-\Delta)^s v +f(v)\ge \partial_t w+(-\Delta)^s w +f(w) \end{equation} on $\mathbb{R}^n\times(0,T]$, with $v(x,t)\ge w(x,t)$ for any $x\in\mathbb{R}^n\setminus\Omega$ and any $t\in[0,T]$, and $v(x,0)\ge w(x,0)$ for any $x\in\Omega$. Then $v(x,t)\ge w(x,t)$ for any for any $x\in\mathbb{R}^n$ and any $t\in [0,T]$. \end{lemma} \begin{proof} The proof is of classical flavor, see e.g. Proposition A.5 in \cite{delaLla_Val}. By possibly iterating the argument, it is enough to prove the result up to a small time, hence, without loss of generality we may assume that \begin{equation}\label{t179} T\le \frac1{4(M+1)},\end{equation} where $M\ge0$ is the (local) Lipschitz constant of $f$ -- more precisely, we take $M$ such that \begin{equation}\label{t178} {\mbox{$f(w(x,t)-\eta)-f(w(x,t))\le M|\eta|$ for any $\eta\in [-1,1]$.}} \end{equation} Now we suppose, by contradiction, that the claim were false. Then, it would exist $(\bar x,\bar t)\in\Omega\times [0,T]$ such that $v(\bar x,\bar t)< w(\bar x,\bar t)$. We define \begin{equation}\label{t177} \varepsilon :=\min \left\{ \frac{ w(\bar x,\bar t)- v(\bar x,\bar t)}{4},\; \frac{T}{4}\right\}\end{equation} and $W:= v-w+\varepsilon t+\varepsilon^2$. Notice that \begin{eqnarray*} && W(x,0)=v(x,0)-w(x,0) +\varepsilon^2 >0\\ {\mbox{and }}&& W(\bar x,\bar t) = v(\bar x,\bar t) -w(\bar x,\bar t) +\varepsilon\bar t+\varepsilon^2 \le v(\bar x,\bar t) -w(\bar x,\bar t) +2\varepsilon <0, \end{eqnarray*} thanks to \eqref{t177}. Hence, there exists $z_*:=(x_*,t_*)\in\Omega\times[0,\bar t]\subseteq \Omega\times [0,T]$ such that $W(x,t)>0$ for any $x\in\Omega$ and any $t\in[0,t_*)$, with $W(x_*,t_*)=0$. In particular, $W(x,t_*)\ge 0=W(x_*,t_*)$, and so the integrodifferential definition of the fractional Laplacian gives that $(-\Delta)^s W(x_*,t_*)\le0$. Also, $W(x_*,t)\ge0=W(x_*,t_*)$, and thus $\partial_t W(x_*,t_*)\le0$. In addition, we have that $$v(z_*)= W(z_*)+w(z_*)-\varepsilon t_*-\varepsilon^2 = w(z_*)-\eta_*,$$ where $\eta_*:=\varepsilon t_* +\varepsilon^2 \in\left[0,\frac{\varepsilon}{2(M+1)}\right]$, thanks to \eqref{t179} and \eqref{t177}. As a consequence of these observations, and recalling \eqref{le091} and \eqref{t178}, we find that \begin{eqnarray*} 0&\ge& \partial_t W(z_*) +(-\Delta)^s W(z_*)\\ &=& \big( \partial_t v+(-\Delta)^s v\big)(z_*) -\big(\partial_t w-(-\Delta)^s w\big)(z_*) +\varepsilon \\ &\ge& f(w(z_*))-f(v(z_*)) +\varepsilon \\ &=& f(w(z_*))-f(w(z_*)-\eta_*)+\varepsilon\\ &\ge& -M \eta_* +\varepsilon \\ &>&0, \end{eqnarray*} which is a contradicition. \end{proof} In addition, we remark that the theory developed in the previous pages also allows us to investigate the stability of nonlocal species. For instance, one sees that large resources allow both local and nonlocal populations to stem from the pure equilibria. More precisely, if~$\sigma$ is larger than the first classical and fractional eigenvalue, then the reverse Poincar\'e-Sobolev condition in~\eqref{poscond} (and its fractional counterpart \eqref{poscond_frac}) are satisfied. So, in case system~\eqref{90} possesses two unstable pure equilibria~$(\tilde u,0)$ and~$(0,\tilde v)$, a positive mixed equilibrium~$ (u_* (x), v_* (x))$ may arise. Of course, a detailed analysis of all these circumstances in a general setting and a careful check of the nontrivial details involved by the dynamics associated to the flow go beyond the scope of this paper, but we refer also to~\cite{Caf_Di_Val} for a series of examples which carefully compare local and nonlocal behaviors of biological populations in terms of the size of the domain and of the sparseness of the resources. \addcontentsline{toc}{chapter}{Bibliography} \bibliographystyle{plain} \def$''${$''$}
1,116,691,501,356
arxiv
\section{Introduction} \label{sec:intro} Galaxy clusters grow by the gravitational accretion of smaller cosmic structures. These accreted structures vary in size, from individual galaxies, to galaxy groups containing tens or hundreds of member galaxies, to major cluster-cluster mergers involving thousands of galaxies \citep{moore1999,frenk2012}. Such hierarchical structure formation is one of the cornerstones of the Lambda cold dark matter ($\Lambda$CDM) model of the Universe \citep{white1978,navarro1996}. The wide range in sizes of their dark matter haloes, plus the existence of other structures such as cosmological filaments and walls \citep{bond1996,hahn2007}, results in a variety of cosmic environments in which galaxies can be found. It is now well-established that the properties of galaxies strongly depend on where they are located. An early study by \citet{dressler1980} revealed that cluster environments contain mostly early-type galaxies, whereas galaxies in field regions typically have late-type morphologies\footnote{This idea had been noted previously in other works, such as \citet{hubble1936} and \citet{zwicky1937}.}. Furthermore, cluster galaxies have quenched star formation rates \citep{balogh1999,mcnab2021} and lower gas fractions \citep{jaffe2015} compared to field galaxies, across a large range of redshifts \citep{quadri2012}. Numerous mechanisms can explain this difference in gas content and star formation rate, including slow quenching processes such as galaxy starvation \citep{larson1980,maier2016,maier2019}, and rapid processes such as ram pressure stripping \citep{gunn1972,abadi1999,zabel2019}. Although they represent the densest, most extreme galaxy environments, clusters are not the only structures that can dramatically impact galaxy evolution. Intermediate density environments can also play an important role: for instance, galaxy groups have been shown to enhance the rate of galaxy mergers, due to their combination of a high galaxy number density, and low velocity dispersion\footnote{The relative velocities of merging galaxies are usually \mbox{$<500$ km s$^{-1}$} \citep{lotz2008,an2019}, but dark matter haloes with masses greater than \mbox{$10^{14}\ M_{\odot}$} typically have velocity dispersions greater than \mbox{$500$ km s$^{-1}$} \citep{mcclintock2019,wetzell2021}. Consequently, mergers are more likely in group-sized haloes, with masses less than \mbox{$10^{14}\ M_{\odot}$}.} \citep{jian2012}. Mergers drastically impact the evolution of galaxies, altering their morphology and potentially triggering outflows and AGN feedback that can remove gas. A consequence of this connection between galaxies and their environments is that a galaxy's evolution is not just impacted by the environment in which it is currently found -- it can also be affected by the environments through which a galaxy has previously passed. In the context of clusters, `pre-processing' describes the environmental mechanisms that act on a galaxy before it is accreted by a cluster. For example, galaxies can enter a cluster through cosmological filaments, which can quench star formation similarly to clusters, albeit to a lesser degree \citep{kraljic2018,laigle2018}. This results in degeneracy, as it is not immediately clear whether cluster galaxies have been quenched by the cluster itself, or are quenched due to pre-processing. However, it is clear that these filaments are an important factor to consider: for instance, \citet{kuchner2022} found that $45\%$ of cluster galaxies are accreted via filaments. As galaxies can also enter clusters as members of galaxy groups, these are another contributor to pre-processing, although the exact degree of groups' contribution is debated. Some simulations \citep{mcgee2009,han2018} and observations \citep{dressler2013} find that close to half of all cluster members have been accreted as members of galaxy groups, while others \citep{arthur2019} find a much lower fraction. There are multiple explanations for this. For example, previous studies have shown that this fraction depends on the stellar mass of the accreted galaxies \citep{delucia2012}, and whether hydrodynamical or $N$-body simulations are used \citep{haggar2021}. Additionally, the definition of a `galaxy group' is not standardised, and different definitions can lead to different conclusions. Various studies have identified group members as galaxies that lie within the radius of a host group halo \citep{arthur2019,donnari2021}, that satisfy a boundness criterion \citep{han2018,choquechallapa2019}, or by using a Friends-of-Friends algorithm \citep{benavides2020}, all of which can result in different selections of group members. Furthermore, \citet{berrier2009} found that, although $30\%$ of cluster members (with dark matter halo masses greater than \mbox{$10^{11.5}\ h^{-1}M_{\odot}$)} are accreted via group haloes, half of these `groups' only contain two or three galaxies. Clearly, the minimum (and maximum) size of what constitutes a group is also an important consideration. Both theoretical and observational studies have shown that the effects of a group environment on the evolutionary processes in galaxies can be enhanced even further when a group enters a cluster. Galaxy mergers \citep{benavides2020} and gas removal \citep{pallero2019,kleiner2021} are common in infalling groups, and multiple studies have connected this galaxy evolution to the external forces acting on a group, such as the effects of large-scale structure and clusters. \citet{vijayaraghavan2013} used cosmological simulations to show that mergers, ram pressure stripping, and tidal truncation of galaxy haloes are all enhanced further when their groups enter clusters, for a variety of reasons -- for example, the intra-group medium is shocked during a group-cluster merger, increasing its density and thus increasing the ram pressure stripping of the group members. Similar mechanisms have been described in previous works, such as \citet{mauduit2007}, who showed that near the centres of clusters lying in the core of the Shapley Supercluster, galaxies have lower radio loudness than galaxies elsewhere. They attributed this to the enhanced ram pressure stripping experienced by galaxies in shocked regions of merging clusters. In a related observational study, \citet{roberts2017} found that dynamically relaxed groups, which are typically isolated and slowly-growing, contain a smaller fraction of star-forming galaxies than unrelaxed groups. Again, this indicates that the processing of galaxies in groups is dependent on the disturbance of these groups by the larger environment in which they are located \citep[see also][]{gouin2021}. All of this means that galaxies that have joined clusters as members of a group have experienced different evolutionary processes to those that have joined as individuals. \citet{bahe2019} used the Hydrangea suite of hydrodynamical simulations \citep{bahe2017} to study the survival fractions of galaxies entering clusters -- in their case, galaxies that do not `survive' are no longer resolved in the simulations, meaning they have either merged into a more massive galaxy (often a group central), or have been stripped below the total mass limit of \mbox{$5\times10^{8}\ M_{\odot}$}. \citet{bahe2019} showed that, after an infalling group enters a cluster, only $\sim50\%$ of its member galaxies survive to $z=0$. In contrast, they found that more than $90\%$ of galaxies that have not experienced any pre-processing survive to $z=0$. This survival fraction is higher than in some other studies, although much of the prior work in this field has used $N$-body simulations \citep[e.g.][]{gill2004_survival}, in which substructure can be more easily stripped \citep{smith2016}. The results of \citet{bahe2019} show that group members are particularly strongly influenced within clusters, and that they can be very heavily disturbed during accretion onto a cluster. Moreover, previous work has hinted that galaxy groups themselves can be heavily disrupted when entering a cluster. \citet{choquechallapa2019} found that, using dark matter-only simulations and a similar group definition as is used in this work, over $90\%$ of group members become unbound after a group enters a cluster, and that these galaxies quickly form part of the cluster population of galaxies. Furthermore, \citet{gonzalezcasado1994} showed that tidal forces from clusters can rapidly increase the internal energy of infalling groups, by up to a factor of 10 for the smallest groups. This can allow these groups to be disrupted, although it should be noted that absorbing more energy than the binding energy does not necessarily lead to the complete disruption of groups \citep{vandenbosch2018}. However, beyond this, there is little work that has examined in detail how the dynamics of galaxy groups evolve when they are accreted by a cluster, particularly with large numbers of clusters in hydrodynamical simulations. While previous studies have looked at the overall disruption of groups that enter a cluster and the subsequent `post-processing' of their constituent galaxies, we do not currently have a detailed understanding of the timescales over which groups change, and how the evolutionary processes that galaxies experience are affected by the group dynamics \citep{cohn2012,bahe2019}. In this work, we use {\sc The Three Hundred}\ project, a mass-complete sample of 324 galaxy clusters taken from a \mbox{$1\ h^{-1}$ Gpc} cosmological volume. These are resimulated out to distances of several times the $R_{200}$ of the cluster, where $R_{200}$ is the radius within which the mean density of a cluster is equal to 200 times the critical density of the Universe. We use these simulations to study the evolution of groups as they enter galaxy clusters, and the processes that galaxies in these groups experience in their subsequent passage through the cluster halo. Specifically, we look at how the phase space of groups evolves: that is, how the positions and speeds of galaxies change, relative to the group that they are bound to. We make comparisons between groups before and after they pass through a cluster, to find the cumulative effect that a cluster has on the dynamics and structure of galaxy groups. Then, we look at the fates of group galaxies, categorising them based on the processes they experience in the several Gyr after entering a cluster (such as mergers and stripping), and how this depends on the structure of groups. Finally, we discuss how this theoretical work can help observational studies. The paper is structured as follows: In \Sec{sec:methods} we introduce the simulation data that we use, and the methods we use to analyse groups. In \Sec{sec:evolution} we show how the internal dynamics of groups change as they pass through a cluster, and in \Sec{sec:before_after} we focus on the state of galaxies and groups after passing through a cluster. Finally, we summarise our findings in \Sec{sec:conclusions}. \section{Simulations \& Numerical methods} \label{sec:methods} Below we detail the methods and data used in this work. Much of this, particularly \Sec{sec:simulations} and \Sec{sec:subsample}, build on the analysis in our previous work, \citet{haggar2021}, in which we compare the substructure of galaxy groups and galaxy clusters in hydrodynamical and dark matter-only simulations. \subsection{Simulation data} \label{sec:simulations} This work utilises data from {\sc The Three Hundred}\ project, a suite of 324 hydrodynamical resimulations of large galaxy clusters. The simulations were produced by extracting the 324 most massive clusters from the dark matter-only MDPL2 MultiDark simulation \citep{klypin2016}\footnote{The MultiDark simulations are publicly available from the cosmosim database, \url{https://www.cosmosim.org}.}, and resimulating each from its initial conditions with baryonic physics. This was done by taking all dark matter particles within \mbox{$15\ h^{-1}$ Mpc} of the cluster centre at \mbox{$z=0$} (between \mbox{$7-10R_{200}$} for the range of cluster masses in the sample), tracing the particles back to their initial positions, and then splitting each one into a dark matter and a gas particle, with masses set by the baryonic matter fraction of the Universe. Lower-resolution particles were used beyond \mbox{$15\ h^{-1}$ Mpc} to model any tidal effects of the surrounding large-scale structure. The MDPL2 simulation involves a box with sides of comoving length \mbox{$1\ h^{-1}$ Gpc}, simulated using \textit{Planck} cosmology (\mbox{$\Omega_{\rm{M}}=0.307$}, \mbox{$\Omega_{\rm{B}}=0.048$}, \mbox{$\Omega_{\Lambda}=0.693$}, \mbox{$h=0.678$}, \mbox{$\sigma_{8}=0.823$}, \mbox{$n_{\rm{s}}=0.96$}) \citep{planck2016}. The same box size and cosmology are used for each of the cluster simulations in {\sc The Three Hundred}, so that each cluster is embedded in a comoving box of size \mbox{$1\ h^{-1}$ Gpc}, most of which is occupied by the low-resolution particles described in the previous paragraph. Consequently, the lengths and distances quoted throughout this work are also given in comoving coordinates. The hydrodynamical resimulations were carried out using the {\sc GadgetX} code. {\sc{GadgetX}} is a modified version of the {\sc{Gadget3}} code, which is itself an updated version of the {\sc{Gadget2}} code, and uses a smoothed-particle hydrodynamics scheme to fully evolve the gas component of the simulations \citep{springel2005_gadget2, beck2016}. The final dataset comprises of a mass-complete cluster sample from \mbox{$M_{200}=5\times10^{14}\ h^{-1}M_{\odot}$} to \mbox{$M_{200}=2.6\times10^{15}\ h^{-1}M_{\odot}$}, where $M_{200}$ is the mass contained within a sphere of radius $R_{200}$. The dark matter and gas particles in the simulations have masses of \mbox{$m_{\rm{DM}}=1.27\times10^{9}\ h^{-1}M_{\odot}$} and \mbox{$m_{\rm{gas}}=2.36\times10^{8}\ h^{-1}M_{\odot}$} respectively. The simulations also contain stellar particles of variable masses, typically with \mbox{$m_{\rm{star}}\sim4\times10^{7}\ h^{-1}M_{\odot}$}, produced by the stochastic star-formation model that is implemented by {\sc{GadgetX}} \citep{tornatore2007,murante2010,rasia2015}. A Plummer equivalent gravitational softening length of \mbox{6.5 $h^{-1}$ kpc} is used for the dark matter and gas particles, and \mbox{5 $h^{-1}$ kpc} for the stellar particles. {\sc The Three Hundred}\ dataset is described in more extensive detail in \citet{cui2018}, and has been used in numerous previous studies to examine galaxy groups \citep{haggar2021}, environment \citep{wang2018}, cosmic filaments \citep{kuchner2020,rost2021,kotecha2022}, backsplash galaxies \citep{haggar2020} and ram pressure stripping \citep{arthur2019,mostoghiu2021}, among other areas. The full simulation suite also includes simulations with different physics models, however in this work we only use the {\sc{GadgetX}} simulations. \subsubsection{Galaxy identification and tree-building} \label{sec:tree} The data for each cluster in {\sc The Three Hundred}\ consists of 129 snapshots saved between \mbox{$z=16.98$} and \mbox{$z=0$}, separated by approximately \mbox{$0.3$ Gyr} at low redshift. To identify the haloes and subhaloes, each snapshot was processed using the Amiga Halo Finder, {\sc ahf}\footnote{\url{http://popia.ft.uam.es/AHF}} (see \citet{gill2004_ahf} and \citet{knollmann2009} for further details). {\sc{ahf}} operates by identifying peaks in the matter density field, and returns the positions and velocities of haloes and subhaloes, as well as their radii, their mass in gas, stars and dark matter, and a host of other properties. The halo merger trees were built using {\sc mergertree}, a tree-builder that forms part of the {\sc ahf} package. For each halo in a given snapshot, this tree-builder calculates a merit function with respect to all haloes in previous snapshots; specifically, {\sc mergertree} uses the merit function $M_{\rm{i}}$, as described in Table B1 of \citet{knebe2013}. This merit function is then used to identify a main progenitor, plus other progenitors, based on the number of particles that they share with the halo of interest. The tree-builder has the ability to skip snapshots, and thus is able to `patch' over gaps in the merger tree, for example when a subhalo is near to the centre of its host halo and so is not easy to identify against the high background density \citep{onions2012}. We also place a limit on the change in mass permitted between successive snapshots, such that no halo can more than double in dark matter mass. This helps to prevent `mismatches', caused by a subhalo located close to the centre of a larger halo being detected as the main halo \citep[as shown in][]{behroozi2015}. Additional information on {\sc ahf} and {\sc mergertree} can be found in \citet{knebe2011_ahf} and \citet{srisawat2013}. \subsection{Sub-sample of clusters} \label{sec:subsample} Some of the clusters exhibit some minor problems in the trees constructed by {\sc mergertree}, which we describe below. However, thanks to the large dataset that we are using, we can identify and remove these objects, and still be left with a large sample of simulated clusters. In some cases the merit function used by {\sc mergertree} can incorrectly assign links between haloes in different snapshots. This can lead to an apparent `jump' in the position of a halo or subhalo (in box coordinates), as well as a sudden change in its properties, due to one halo being incorrectly labelled as the progenitor of another. These mismatching events are uncommon, typically only affect a small number of snapshots, and are fairly inconsequential when they affect individual galaxy haloes. However, the merger tree of the main cluster halo can also be affected in this way, leading to a sudden change in the position of the main halo. Such a change in position is particularly problematic in this work, because it will result in many galaxies and groups being erroneously tagged as members of a cluster. These merger tree mismatches are especially common during a major merger between two haloes. \citet{behroozi2015} showed that various halo finders experience this same problem, where two merging haloes of similar size can be accidentally switched by a tree-builder, leading to the sizes and positions of haloes appearing to change suddenly and dramatically. Many of the clusters in {\sc The Three Hundred}\ experience major mergers; a recent study, \citet{contrerassantos2022}, discusses cluster mergers in {\sc The Three Hundred}\ simulations in detail. In fact, we find that 59 of our 324 simulated clusters experience a change in position of \mbox{$>0.5R_{200}(z)$} between two snapshots after \mbox{$z=1$}. We find that, given that the typical time elapsing between snapshots at this redshift is \mbox{$\sim0.3$ Gyr}, this distance is non-physical and so likely due to these tree-builder issues. In some cases, the tree-builder instead misses a link in the merger tree, causing a branch of the merger tree to end prematurely and the history of the halo before this link to be lost. For 17 clusters, the central cluster halo is affected in this way, and the evolution of the cluster halo cannot be tracked back further than $z=0.5$. We choose to also remove these clusters from our analysis, in order to avoid affecting our results with clusters that do not have complete, reliable merger trees. Nine of these clusters also experience the halo mismatches described in the previous paragraph, resulting in a total of 67 clusters that we choose to remove from our sample. The remaining 257 clusters have $M_{200}$ masses (dark matter, gas and stars, including subhaloes) ranging from \mbox{$5\times 10^{14}\ h^{-1}M_{\odot}$} to \mbox{$2.6\times 10^{15}\ h^{-1}M_{\odot}$}, with a median value of \mbox{$8\times 10^{14}\ h^{-1}M_{\odot}$}. Their radii ($R_{200}$) range from \mbox{$1.3\ h^{-1}$ Mpc} to \mbox{$2.3\ h^{-1}$ Mpc}, with a median of \mbox{$1.5\ h^{-1}$ Mpc}. \subsection{Galaxy and group selection} \label{sec:galgroups} In this work, we place lower limits on the total mass (including dark matter, gas and stars) and the stellar mass of galaxies in the simulations, so that all the haloes we keep from our halo finder represent real, physical galaxies. We only examine galaxy haloes with a total mass of \mbox{$M_{200}\geq10^{10.5}\ h^{-1}M_{\odot}$}, which corresponds to approximately $100$ particles in the high-resolution regions containing the clusters. We also only use galaxies with a stellar mass $M_{\rm{star}}\geq10^{9.5}M_{\odot}$. We consider these to be physical galaxies that have built up a substantial population of stars -- this cut is approximately equivalent to removing all galaxies with a luminosity \mbox{$L<10^{8}L_{\odot}$}, whilst keeping all galaxies with \mbox{$L>10^{9}L_{\odot}$}. This stellar mass cut also allows us to investigate a similar population of galaxies to upcoming observational studies, such as the WEAVE\footnote{\url{https://www.ing.iac.es//confluence/display/WEAV}} Wide-Field Cluster Survey, which will study cluster galaxies down to stellar masses of \mbox{$\sim10^{9}\ M_{\odot}$} \citep[e.g.][]{kuchner2020}. Finally, we remove all galaxies from our simulations that contain more than $30\%$ of their mass in stars. These objects are generally found extremely close to the centre of a larger halo, and so have been heavily stripped \citep{knebe2020}, leaving remnants with high stellar mass fractions, whose properties (such as their radii and masses) are not well-defined by our halo finder. These objects are very rare, and make up only $1\%$ of all haloes within $5R_{200}$ of the clusters, so we make the decision to remove these objects from our analysis. By applying these three constraints to our simulations, we consider all remaining objects to be realistic galaxies with a significant population of stars at $z=0$. \subsubsection{Group identification} \label{sec:group_id} Throughout this work, we identify galaxy groups by taking each galaxy, assuming its halo to be the host halo of a galaxy group, and then determining if any other galaxies in the same snapshot are associated with it. We identify galaxies as being associated with a halo (and thus members of the group) using the same approach as \citet{han2018}. They assume that a group's dark matter halo follows a spherically symmetric NFW density profile \citep{navarro1996}, truncated at $R_{200}$. Using this to calculate the gravitational potential of the group halo, they identify group members as those that satisfy the criterion given below: \begin{equation} \frac{v^2}{2}+\Phi\left(r\right)<\Phi\left(2.5R_{200}^{\rm{grp}}\right)\,. \label{eq:bounded} \end{equation} Here, $v$ is the relative velocity of a galaxy with respect to its group host, $\Phi(r)$ is the gravitational potential due to the group host at a distance $r$ from its centre, and $R_{200}^{\rm{grp}}$ is the radius of the group host halo. It is important to note that this is different to the radius of the host cluster in each simulation, which is subsequently referred to by $R_{200}^{\rm{clus}}$. Any galaxies that are less massive than their group host and that satisfy \Eq{eq:bounded} are taken to be bound members of this group. Although we hereafter refer to these group members as being `bound' to their host group, it is important to note that this definition is not technically equivalent to gravitational binding. Previous work \citep[e.g.][]{behroozi2013} has shown that halo particles can be gravitationally balanced against the Hubble flow out to $\sim4R_{200}$ from the halo centre. However, \Eq{eq:bounded} places an artificial radial limit on groups, so that galaxies can only be found as far as $2.5R_{200}^{\rm{grp}}$ from the centre of the group. This outer limit is the same as was used by \citet{han2018}: their choice was motivated by the work of \citet{mamon2004}, who showed that backsplash galaxies are typically found out to approximately $2.5R_{200}$ from their host halo, but rarely any further. By setting this as the outer limit of a group, we include almost all galaxies that are on bound orbits around the group (having passed through its central halo at least once), whilst excluding galaxies that have not entered the group halo before. Furthermore, the relative velocity term in \Eq{eq:bounded} means that only slow-moving galaxies at large distances are included as group members. Galaxies moving at greater velocities are excluded from the group, as these are likely `fly-by' galaxies or `renegade subhaloes' \citep{knebe2011_renegade}, which happen to be passing near to the group, but are not bound to it. If a halo has four or more galaxies associated with it that each have a smaller total mass (including dark matter, gas, and stars) than the halo, we define this as a group, with the halo being the `group host' halo. Throughout this work we assume that the central group galaxy in each of these group host haloes exists at the centre of the halo, which has been shown to be the case in previous work. \citet{lin2004} used X-ray observations of groups and clusters with masses similar to those in this work (\mbox{$10^{13.5}\ M_{\odot}<$}\mbox{$\ M_{200}<\ $}\mbox{$10^{15.3}\ M_{\odot}$}) to show that in $75\%$ of these haloes, the brightest galaxy is located within $0.06R_{200}$ of the halo centre; this result is corroborated by both \citet{hwang2008} and \citet{stott2012}. Very small groups with fewer than five members are common, but for the mass constraints that we put in place in \Sec{sec:galgroups}, a collection of $\gtrsim5$ associated galaxies is typically required to define a group \citep[see][for example]{tully2015}. Additionally, we only study groups with 50 or fewer members, as detailed in the following section. Again, we stress that this limit applies to the number of group members that satisfy the mass constraints in \Sec{sec:galgroups}, as is the case throughout the rest of this work unless stated otherwise. \subsubsection{Infalling groups} \label{sec:infalling} The focus of this work is on the evolution of galaxy groups as they enter and pass through a galaxy cluster. In order to study this, we identify a sample of infalling galaxy groups at all redshifts, in the same way as our previous study \citep{haggar2021}, and other previous work \citep[e.g.][]{choquechallapa2019}. To do this, we identify all galaxies that have just fallen into the cluster; these are galaxies that are within $R_{200}^{\rm{clus}}$ of the cluster centre, having been outside of the cluster in the previous snapshot. These objects are referred to as the `infalling' galaxies. Note that we do not distinguish by the time at which the galaxies entered the cluster -- these infall events can happen at any time over a cluster's history. We then examine each of these galaxies using the method described in \Sec{sec:group_id}, to determine whether each object is the host halo of a galaxy group that has passed within the radius of the cluster. We keep groups with between five and 50 members (including the host object) that each satisfy the mass constraints given in \Sec{sec:galgroups}. Groups of this richness are considered to be small or intermediate sized groups \citep{tully2015}, but are large enough to provide an environment that can strongly impact galaxy evolution \citep{hester2006}. Because of the upper limit of 50 members on the group size, major cluster-cluster mergers are not included in this study. \Fig{fig:schematic} shows a schematic view of a galaxy group at the moment of infall, and its subsequent passage through a cluster. Finally, we exclude any groups that have passed through the cluster once previously, so that all of the groups in our sample are entering a cluster for the first time. Groups on a second (or subsequent) infall make up less than $1\%$ of the groups we identify, so we assume that this will not strongly impact our results. \begin{figure} \includegraphics[width=\columnwidth]{figures/schematic.pdf} \caption{Schematic of a galaxy group halo (dark circle) passing within $R_{200}^{\rm{clus}}$ of a cluster (light circle) for the first time. Red crosses represent galaxies that are members of this group; note that these are not limited to be within $R_{200}$ of the cluster or the group at infall, but are just defined based on \Eq{eq:bounded}. The position, $r$, and velocity, $v$, of one galaxy relative to its host group are also labelled. The subsequent path of this group through the cluster is shown by the thick, grey line, and the black squares on this line represent the moments of pericentre, apocentre, and second infall of the group (marked $P$, $A$ and $I_{\rm{2}}$ respectively), which are used extensively in \Sec{sec:before_after} of this work.} \label{fig:schematic} \end{figure} Overall, we identify 1340 infalling groups across the 257 clusters that we use in this work, with a median richness (number of galaxies) of \mbox{$8^{+7}_{-3}$} members ($1\sigma$ spread). This indicates that, although we permit groups to contain up to 50 members, groups of this richness are rare compared to the large number of poorer groups -- only $8\%$ of the groups contain more than 20 members. The average mass, $M_{200}^{\rm{grp}}$, of these groups at cluster infall is \mbox{$10^{13.5\pm0.4}\ h^{-1}M_{\odot}$} (median and $1\sigma$ spread). This means that the typical mass ratio between a group and cluster is roughly \mbox{$1:20$}, although this varies across the range of group and cluster masses, from approximately \mbox{$1:5$} to \mbox{$1:100$}. Finally, these groups enter the cluster over a wide range of redshifts, with a median value of \mbox{$z_{\rm{infall}}=0.4^{+0.6}_{-0.3}$}. \subsubsection{Tidal radius of groups} \label{sec:tidal_radius} Subhaloes passing through a larger halo can experience strong tidal stripping, and group-sized haloes can often lose a large fraction of their mass due to stripping from a cluster \citep{muldrew2011, bahe2019}. Similarly, galaxies can be tidally stripped from these groups \citep{gonzalezcasado1994, choquechallapa2019}, although the extent of this stripping varies between different studies. For instance, \citet{vijayaraghavan2015} found that the central regions of galaxy groups are largely unaffected by a cluster potential, and are only disrupted by dynamical friction after several Gyr. The tidal radius of a group or dark matter halo is an effective way to predict and explain tidal stripping. Generally, the tidal radius is defined as the distance from a smaller object at which the self-gravity of that object is less than the tidal force due to a larger object. However, the tidal radius is not precisely defined, and different definitions exist for different scenarios \citep[see][for a detailed summary]{vandenbosch2018}. Perhaps the simplest example is the Roche limit, the tidal radius of a point mass that is being tidally influenced by another point mass. More physically motivated scenarios such as an extended subhalo within a larger extended halo (as is used in this work) require more complex descriptions. Calculating a tidal radius is complicated further by the fact that subhalo properties are often poorly defined by a subhalo finder, and can be strongly dependent on the distance of a subhalo from the group centre. \citet{muldrew2011} test the ability of {\sc {ahf}} and another halo finder, {\sc {subfind}} \citep{springel2001_subfind}, to recover subhalo properties. They find that {\sc {ahf}} performs better at identifying all the particles of a subhalo, and thus constrains the subhalo mass more effectively. However, for subhaloes within \mbox{$\sim0.5R_{\rm{vir}}$ ($\sim0.7R_{200}$)}\footnote{\citet{muldrew2011} use the definition of virial radius presented in \citet{bryan1998}. For their cosmology, the mean density of a halo within the virial radius is $101\rho_{\rm{crit}}$, where $\rho_{\rm{crit}}$ is the critical density of the Universe. Hence, for the clusters used in their work and ours, $R_{\rm{vir}}=R_{101}\approx1.3R_{200}$, although it is important to note that this conversion depends on the concentrations and density profiles of dark matter haloes.}, both halo finders underestimate the number of particles in the subhalo. This makes it challenging to predict the mass, and therefore the radius, of subhaloes in these regions. Furthermore, in our work we wish to combine the data from multiple galaxy groups (of different sizes) in multiple galaxy clusters (also of different sizes). It is therefore convenient to have an expression for the group tidal radius that is independent of the cluster or group size, and solely depends on the separation between these two. We define the tidal radius of an infalling subhalo by adapting the descriptions in \citet{klypin1999} and \citet{vandenbosch2018}. Specifically, they give the tidal radius in terms of a function, $f(d)$, whose value is the minimum of two expressions: \begin{equation} R_{\rm{t}}=d\left(\frac{M_{\rm{<d}}^{\rm{grp}}(R_{\rm{t}})}{M_{\rm{<d}}^{\rm{clus}}(d)}\frac{1}{2-f(d)}\right)^{\frac{1}{3}}\,, \label{eq:tidalrad1} \end{equation} \begin{equation} f(d)={\rm{min}}\left[1,\ \left.\frac{{\rm{d(ln}}M_{\rm{<d}}^{\rm{clus}})}{{\rm{d(ln}}d)}\right|_{d}\right]\,. \label{eq:tidalradchoose} \end{equation} Here, $R_{\rm{t}}$ is the tidal radius of the group, $d$ is distance from a group to the cluster centre, and $M$ are the radial enclosed mass profiles of the group and the cluster. We assume the radial density of the dark matter haloes follow an NFW profile \citep{navarro1996}, given by \begin{equation} \rho(d)=\frac{\rho_{0}}{x\left(1+x\right)^2}\,, \label{eq:nfw} \end{equation} \begin{equation} x=\frac{d}{R_{\rm{s}}}\,, \label{eq:define_x} \end{equation} where $\rho(d)$ is the radial density of the halo in terms of the distance to its centre, $\rho_{0}$ is a characteristic density, and $R_{\rm{s}}$ is the scale radius of the halo. We also define the quantity $x$ to make the equations in this section more easily readable. The concentration of a halo, $c$, is equal to the ratio between $R_{200}$ and $R_{\rm{s}}$: \begin{equation} R_{\rm{s}}=\frac{R_{200}}{c}\,. \label{eq:scaleradius} \end{equation} Integrating the NFW profile, \Eq{eq:nfw}, gives the enclosed mass in a sphere of radius $d$: \begin{equation} M_{\rm{<d}}=4\pi\rho_{0}R_{\rm{s}}^{3}\left[{\rm{ln}}\left(1+\frac{d}{R_{\rm{s}}}\right)-\frac{d}{d+R_{\rm{s}}}\right]\,. \label{eq:nfw_enc} \end{equation} This can then be used to rewrite \Eq{eq:tidalradchoose}. For a general NFW profile, \mbox{$f(d)=1$} in the region \mbox{$d\lesssim2.2R_{\rm{s}}$}. However, \mbox{$f(d)<1$} outside of this region, and so must be calculated for each subhalo. Solving the derivative in the expression of $f(d)$ gives: \begin{equation} f(d)={\rm{min}}\left[1,\ \left(\frac{(x/(1+x))^{2}}{{\rm{ln}}(x+1)-x/(1+x)}\right)\right]\,, \label{eq:tidalradchoose_approx} \end{equation} where $x$ is defined the same as in \Eq{eq:define_x}. Also using \Eq{eq:nfw_enc}, we can produce an expression for $M_{200}$, as \mbox{$M_{\rm{<d}}(d=R_{200})=M_{200}$}. Substituting this into \Eq{eq:nfw_enc} gives \begin{equation} \begin{split} M_{\rm{<d}}=M_{200}&\left[{\rm{ln}}\left(1+x\right)-\frac{x}{1+x}\right]\\ &\hspace{11pt}\times\left[{\rm{ln}}\left(1+c\right)-\frac{c}{1+c}\right]^{-1}\,. \end{split} \label{eq:nfw_enc2} \end{equation} This expression can then be substituted into the equation for tidal radius, \Eq{eq:tidalrad1}, for the cluster enclosed mass, $M_{\rm{<d}}^{\rm{clus}}(d)$, and for the mass enclosed within the tidal radius of a group, $M_{\rm{<d}}^{\rm{grp}}(R_{\rm{t}})$. This gives the expression for tidal radius below, \begin{equation} \begin{split} \frac{R_{\rm{t}}}{R_{200}^{\rm{grp}}}=\frac{d}{R_{200}^{\rm{clus}}}\left(\frac{1}{2-f(d)}\right)^{\frac{1}{3}}\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \\ \times\left(\frac{\left[{\rm{ln}}\left(1+\frac{C_{\rm{c}}d}{R_{200}^{\rm{clus}}}\right)+\left(1+\frac{C_{\rm{c}}d}{R_{200}^{\rm{clus}}}\right)^{-1}-1\right]}{\left[{\rm{ln}}\left(1+C_{\rm{c}}\right)-\frac{C_{\rm{c}}}{1+C_{\rm{c}}}\right]}\right)^{-\frac{1}{3}}\\ \times\left(\frac{\left[{\rm{ln}}\left(1+\frac{C_{\rm{g}}R_{\rm{t}}}{R_{200}^{\rm{grp}}}\right)+\left(1+\frac{C_{\rm{g}}R_{\rm{t}}}{R_{200}^{\rm{grp}}}\right)^{-1}-1\right]}{\left[{\rm{ln}}\left(1+C_{\rm{g}}\right)-\frac{C_{\rm{g}}}{1+C_{\rm{g}}}\right]}\right)^{\frac{1}{3}}\,, \end{split} \label{eq:tidalrad_final} \end{equation} where $C_{\rm{c}}$ and $C_{\rm{g}}$ are the concentrations of the cluster and group haloes, respectively, and $f(d)$ is given by \Eq{eq:tidalradchoose_approx}. Finally, we take the halo concentrations to be constant for all of the clusters, and all of the groups. Specifically, we set the value of $C_{\rm{c}}$ equal to the median value for our clusters, $C_{\rm{c}}=3.9$, and $C_{\rm{g}}$ equal to the median value for our groups, $C_{\rm{g}}=4.4$. Approximating these concentrations as constant has a small effect because \Eq{eq:tidalrad_final} is not strongly dependent on them. For a group at a distance $d=0.2R_{200}^{\rm{clus}}$ from the cluster centre, the value of $R_{\rm{t}}$ varies from its median value by $20\%$ across the full range of cluster concentrations (from $C_{\rm{c}}=2.3$ to $C_{\rm{c}}=7.7$). At greater distances from the cluster centre, this variation is even smaller. Similarly, the $1\sigma$ deviation\footnote{There are a small number of groups with highly concentrated haloes, so we use the $1\sigma$ spread in $C_{\rm{g}}$ to avoid skewing our data.} in $C_{\rm{g}}$, between $2.6$ and $6.9$, leads to a variation in $R_{\rm{t}}$ of less than $18\%$. This variation is used as the uncertainty in the tidal radii that we calculate in \Sec{sec:tidal_friction}. By making these assumptions, we are able to reach an expression for the tidal radius of a group in units of $R_{200}^{\rm{grp}}$ that depends only on the distance from the group to the cluster centre. As $R_{200}^{\rm{grp}}$ of a group can change over the course of infall, the tidal radius could be scaled by this changing group radius. However, we instead choose to scale the tidal radius by $R_{200}^{\rm{grp}}$ at the moment of cluster infall, to allow us to stack groups and study their evolution more clearly -- this is explained in further detail in \Sec{sec:evol_infall}. \Eq{eq:tidalrad_final} is an ideal form of the tidal radius for our analysis, as it allows us to calculate the average tidal radius for all groups in a radial bin across many clusters. This form of the tidal radius may also be useful in future studies, both observational and theoretical, that wish to stack substructure on multiple different size scales. \section{Phase space evolution} \label{sec:evolution} Much of the work in this paper revolves around studying the phase space of galaxies within galaxy groups, as the groups enter and pass through a cluster, and how the distribution of galaxies within this phase space changes over time. This analysis follows the same basic process as in \citet{haggar2021}; the phase space consists of the radial distance of a galaxy from its host group halo in terms of the group halo radius, \mbox{$R_{200}^{\rm{grp}}$}, and the galaxy's velocity relative to the group halo, in units of \mbox{$v_{\rm{cir}}$}, the circular orbital velocity at \mbox{$r=R_{200}^{\rm{grp}}$}. It is important to stress that this work involves looking at the phase space of galaxies relative to their host group, not the cluster \citep[as has been done by numerous previous studies, such as][for example]{jaffe2015,arthur2019}. This method can provide detailed information, by showing both the spatial and velocity distribution of galaxies in groups, and telling us how the speed and acceleration of galaxies differ in different regions of the group. \Fig{fig:infalling_groups} shows the average distribution of galaxies in phase space, for an infalling group -- a group that has just passed within $R_{200}^{\rm{clus}}$ of the cluster centre for the first time, as shown in \Fig{fig:schematic}. Similarly to our previous work \citep{haggar2021}, we produce a smoothed distribution of galaxies using a 2D kernel density estimation (KDE) with an optimised bandwidth. In the remainder of this section, we examine how this phase space changes as a group passes through a cluster. \begin{figure} \includegraphics[width=\columnwidth]{figures/infalling_groups.pdf} \caption{Distribution of galaxies in group phase space, for groups at the moment of infall into the host cluster. Data for all groups from all 257 clusters that are used in this analysis are shown, stacked together. Lighter colours represent regions of phase space with more galaxies -- the maximum value is at \mbox{$r=0.65R_{200}^{\rm{grp}}$}, \mbox{$v=1.15v_{\rm{cir}}$}, representing the region of this phase space in which group members are most likely to be found. The red line represents the boundness criterion for galaxies \Eq{eq:bounded}; galaxies above this line are not considered group members, and so are excluded from this figure. Contours are at densities of [0.2, 0.4, 0.6, 1, 2, 4, 6] \mbox{$(R_{200}^{\rm{grp}}v_{\rm{cir}})^{-1}$}. The data in this figure, and subsequent phase space diagrams in this work, are smoothed using a 2D kernel density estimation (KDE) with an optimised bandwidth, typically $\sim0.2$ virial units.} \label{fig:infalling_groups} \end{figure} The 1340 infalling groups that we identify represent an average of 5.2 accreted groups per cluster -- this might appear to be a small number, but it is important to note that this is not the entire accreted group population, as this only accounts for intermediate size groups (with between five and 50 members). Although they use different mass limits to this work, \citet{berrier2009} demonstrate that about half of galaxy groups contain only two or three members, and such groups are not included in our analysis. If we do include these poor groups, we find that approximately \mbox{$14\%$} of \mbox{$z=0$} cluster galaxies in our simulations were accreted as members of a group, comparable to the results from other studies presented in \Sec{sec:intro} \citep[see also][]{haggar2021}. \subsection{Groups beyond the cluster outskirts} \label{sec:evol_outskirts} Before studying groups passing through clusters, we first study how this phase space changes in groups that are not under the influence of a cluster, and are located far from the cluster centre (greater than \mbox{$3R_{200}^{\rm{clus}}$} from the cluster). This can then be used as a control, showing how the distribution of galaxies changes for a group evolving secularly, as an (approximately) isolated system. \Fig{fig:phasespace_outskirts} shows the direction and rate at which galaxies in groups move around this phase space, for groups between \mbox{$3R_{200}^{\rm{clus}}$} and \mbox{$10R_{200}^{\rm{clus}}$} from the centre of a cluster, between redshifts of \mbox{$z=0.1$} and \mbox{$z=0$}. We assume that these groups are isolated, as they are sufficiently far from a cluster that they are not subject to its strongest effects. We did not study groups at greater cluster distances because the resolution of the simulations decreases outside of this distance. This figure includes only galaxies that were bound to a group (i.e. that lay below the thick red line) at $z=0.1$, which we then follow until $z=0$ (about \mbox{1.3 Gyr}). \begin{figure} \includegraphics[width=\columnwidth]{figures/phasespace_outskirts.pdf} \caption{Motion of group members in phase space of host group, for groups located beyond the influence of a cluster. The thick red line shows boundness criterion, providing an approximate measure of galaxies that have become unbound from their group. Colours of arrows represent the rate at which galaxies are moving in this phase space, with darker arrows indicating that galaxies are moving at a greater rate in this phase space. This plot shows stacked data for 2769 groups, located between $3R_{200}^{\rm{clus}}$ and $10R_{200}^{\rm{clus}}$ from the centre of a cluster, between $z=0.1$ and $z=0$. This shows how galaxies move in the phase space of groups when the group is not affected by the external environment. All galaxies lie below the bounded line at $z=0.1$, but some move above the red line and become unbound, although many remain bound to the group. The motion of the bound galaxies follows a characteristic pattern, rather than being in random directions.} \label{fig:phasespace_outskirts} \end{figure} Throughout this section, in order to study how the speeds and positions of group galaxies change, we examine the changes of these properties for bound group members, relative to \mbox{$R_{200}^{\rm{grp}}$} and \mbox{$v_{\rm{cir}}$} of their host group measured at a previous time. In \Fig{fig:phasespace_outskirts} and the following figures in \Sec{sec:evol_infall} we show how the phase space of groups changes over time. In these plots, the direction of arrows shows the average direction that galaxies in this region are moving in phase space, and darker arrows mean that the galaxies are moving across the phase space more quickly. For example, a galaxy going from \mbox{$[1.0R_{200}^{\rm{clus}}, 0.5v_{\rm{cir}}]$} to \mbox{$[2.0R_{200}^{\rm{clus}}, 1.5v_{\rm{cir}}]$} in \mbox{2 Gyr} would be represented by an arrow located at \mbox{$[1.5R_{200}^{\rm{clus}}, 1.0v_{\rm{cir}}]$}, pointing at a $45^{\circ}$ angle to the top-right, with a colour of \mbox{$\sim0.71$}. Note that the horizontal and vertical axes in \Fig{fig:phasespace_outskirts} are dimensionless, as they have been normalised to prior values of $R_{200}^{\rm{grp}}$ and $v_{\textrm{cir}}$, and so we describe the distance moved across this phase space in a given time with the term `virial units per Gyr'. The positions and velocities of the galaxies in \Fig{fig:phasespace_outskirts} are scaled relative to \mbox{$R_{200}^{\rm{grp}}$} and \mbox{$v_{\rm{cir}}$} of each group at \mbox{$z=0.1$}. Some regions of phase space do not contain any arrows because of a lack of data, indicating that almost no galaxies were found in this region across all the groups -- for example, there are no galaxies in the top-right of \Fig{fig:phasespace_outskirts}, because they were all below the red line just a short time previously. Some galaxies are still found above the line, because they have become unbound between \mbox{$z=0.1$} and \mbox{$z=0$}. The phase space of these groups is not in equilibrium, and bound galaxies in the centres of these groups appear to be moving downwards on this plot (i.e. losing speed but remaining a similar distance from the group centre). This indicates that energy is being dissipated during their orbits. Dynamical friction is strongest in the group centres, and so this is likely responsible for the loss of energy during these orbits. \mbox{Fig. 2} in \citet{arthur2019} shows analogous behaviour to this for the phase space of a galaxy cluster: subhaloes move horizontally in phase space when approaching the centre of their host halo, then move sharply downwards when they are near to the halo centre, resulting in the apparent `spiral' motion of galaxies in \Fig{fig:phasespace_outskirts}. This trend could also potentially be explained by the destruction of some inner galaxies by mergers before they have time to leave the group centre. However, we find that this is not the case, as the majority ($82\%$) of galaxies within \mbox{$0.5R_{200}^{\rm{grp}}$} of the group centre survive to $z=0$ without merging into another halo or being heavily stripped (see \Sec{sec:fates} for further discussion of the fates of group members). Furthermore, if we remove these galaxies from our analysis, there is a negligible change in the trends in \Fig{fig:phasespace_outskirts}. \subsection{Groups passing through clusters} \label{sec:evol_infall} To study groups falling into clusters, the phase space diagrams that we present are instead scaled relative to \mbox{$R_{200}^{\rm{grp}}$} and \mbox{$v_{\rm{cir}}$} of each group at $z_{\rm{infall}}$, the moment of cluster infall. We scale the positions and speeds of galaxies by these values, even in subsequent snapshots after $z_{\rm{infall}}$. This approach is not perfect, because the radius and circular velocity of a host group halo changes as the group approaches the centre of a cluster, likely due to tidal stripping. Despite this, we choose to measure these properties only at the moment of infall because, in the central regions of a large halo, the mass and radius of a subhalo are not well-defined; due to the high background density in the centre of the cluster halo, it can be challenging for a halo finder to identify the overdensity of a subhalo. Consequently, near the centre of a cluster, the mass and radius of a group ($M^{\rm{grp}}_{200}$ and $R^{\rm{grp}}_{200}$) are not reliable \citep{muldrew2011}. Scaling by the values of $R_{200}^{\rm{grp}}$ and $v_{\rm{cir}}$ at $z_{\rm{infall}}$ allows us to visualise how the absolute values of the distance and speed of galaxies relative to their groups are changing. This means that galaxies lying below the line of boundness after infall are not strictly bound to the group, but the approach still provides a good approximation. In this section we consider groups that are entering the cluster for the first time, and so have not previously experienced a cluster potential. We also only include groups at times between their first infall, and their first apocentric passage after entering the cluster (the turnaround in their cluster orbit). It is important to note that this is not necessarily the true `first apocentre' of an orbit, as haloes are not accreted onto clusters in perfectly radial orbits. Instead, they have some tangential component to their velocity, meaning that some haloes will pass an apocentre before their entry to the cluster \citep{ghigna1998,tollet2017}. However, as the focus of this paper is on the evolution of groups after their cluster infall, we will hereafter refer to the first apocentric passage post-infall as the `first apocentre'. Finally, we do not separate groups by redshift -- for example, some of these groups have passed their first pericentre by $z=0$, but some have not and so are absent from the post-pericentre analysis. \Fig{fig:phasespace_two-phase} shows how the phase space of these groups changes as they pass through a cluster. We find that the behaviour of groups as they enter and pass through a cluster can be approximately split into two main phases, with the group dynamics changing suddenly as a group makes its closest approach to the cluster centre, as shown by the two panels in this figure. The left panel of \Fig{fig:phasespace_two-phase} shows groups on their infall, moving from the cluster outskirts towards their first pericentric passage, near to the cluster centre. Generally, the galaxies in these groups move upwards on this plot, showing an increase in their velocity relative to their host group. This data is for galaxies that are bound to groups at the moment of infall ($z_{\rm{infall}}$), but some of these move above the red line and so become unbound from their host group. Similarly to in \Fig{fig:phasespace_outskirts}, the direction of arrows shows the average direction that galaxies are moving in phase space, for galaxies in this region of phase space. It is important to note that the arrows in the left panel (`pre-pericentre') are pointing vertically upwards, with a very small horizontal component. This shows that, although these galaxies have a large change in speed, their distance to the group centre does not change very much; galaxies within $R_{200}^{\rm{grp}}$ remain within $R_{200}^{\rm{grp}}$. \begin{figure*} \includegraphics[width=\textwidth]{figures/phasespace_two-phase.pdf} \caption{Same as \Fig{fig:phasespace_outskirts}, but showing motion of group members in phase space of host group for two epochs, before and after pericentre. Data shown are for galaxies that are bound to groups at the moment of infall, for groups on their first passage through the cluster. The red line shows boundness criterion at the moment of infall, and so provides an approximate measure of galaxies that have become unbound from their group. Left panel shows data for groups before reaching their first pericentric passage of the cluster, moving between $R_{200}^{\rm{clus}}$ and cluster centre, and the right panel shows groups moving between the cluster centre and $R_{200}^{\rm{clus}}$, which have passed their pericentre and are now receding from the cluster, moving towards their first apocentric passage. For pre-pericentre groups, the bulk motion of the galaxies is upwards, representing an increase in their group-centric speed, but little change in the spatial separation of galaxies from their host group. In contrast, for groups that have passed pericentre and are now receding from the cluster, group members are moving approximately horizontally in phase space, increasing their distance from the group to which they were previously bound.} \label{fig:phasespace_two-phase} \end{figure*} This behaviour is different for groups that have passed the pericentre of their orbit, shown in the right panel of \Fig{fig:phasespace_two-phase}. This panel shows data for groups at snapshots when they have passed pericentre, but have not yet reached their first apocentre, and so are receding from the cluster centre. Groups are also only included here at stages of their orbit when they are between the cluster centre and $R_{200}^{\rm{clus}}$, to allow us to compare the two panels in \Fig{fig:phasespace_two-phase}. Instead of increasing their velocity, most galaxies in these post-pericentre, receding groups keep a fairly constant relative velocity, and instead move horizontally on this plot, becoming spatially separated from the centre of their host group. This behaviour is stronger for galaxies that have become unbound from the group, moving above the boundness line -- these move to greater distances from the group centre, often with an accompanying slight increase in relative speed. Galaxies that are still bound to a group instead experience a drop in their relative speed, as well as an increase in separation from the group centre. In summary, \Fig{fig:phasespace_two-phase} shows that there are two phases of evolution for a galaxy group passing through a large cluster. First, galaxies are given a kinetic energy kick, increasing their speed relative to their host group. This rapid boost in kinetic energy is manifested after the group passes pericentre, which typically occurs $\sim0.5$ Gyr after entering the cluster, by being converted into potential energy as the galaxies recede from the group centre. \subsubsection{Tidal effects and dynamical friction} \label{sec:tidal_friction} In \Fig{fig:phasespace_arrows} we break down the results from \Sec{sec:evol_infall} into individual steps, separating the infalling groups into bins based on their cluster-centric distance, both before and after passing pericentre. This gives a much more detailed view of how this phase space changes over the average course of a group through a cluster. We note that each panel does not represent an identical sample of groups, as most groups will not have a snapshot in all of these radial bins, and so this data represents the evolution of all groups that are found in this radial range. If we instead select only groups that have passed through each of these bins, there is only a minimal impact on our results, but large amounts of noise are introduced due to the small number of groups. In each panel, the tidal radius (based on the approximations detailed in \Sec{sec:tidal_radius}) for a group in the centre of this bin is also marked, in units of the group radius at infall. The closer a group is to the cluster centre, the stronger the effect of the cluster will be, and this is demonstrated by the decrease and subsequent increase of the tidal radius as groups pass through the cluster. Interestingly, across the eight panels, the tidal radius appears to mark a transition, such that the group dynamics evolve differently within the tidal radius, compared to beyond the tidal radius. Outside the tidal radius, galaxies first experience a kinetic kick and then recede from the group centre, as detailed in the previous section. However, inside the tidal radius, galaxies generally behave in a way similar to that seen in the centres of isolated groups in \Fig{fig:phasespace_outskirts} -- they mostly move downwards on these plots, showing a decrease in speed. Physically, this distinction indicates how the dynamics in some regions of the group are dominated by the group itself, whilst others are dominated by the effects of the cluster. As described in \Sec{sec:evol_outskirts}, galaxies in isolated groups experience dynamical friction due to the group's halo \citep{vijayaraghavan2015}. This is particularly strong in the dense central regions of the group, where dynamical friction will cause galaxies to slow down and spiral inwards, dominating over the effect of the cluster. However, beyond the tidal radius, tidal effects from the cluster dominate this dynamical friction, meaning that the movement of galaxies in phase space is dictated by the cluster, not the group. The change in the tidal radius means that the two phases of group evolution are clearer in the outskirts of a group, as the dynamics of these regions are dominated by the cluster for much of the group's journey. Conversely, galaxies in the group centres ($r<0.5R_{200}^{\rm{grp}}$) decrease in velocity at almost all times, as they are almost always within the tidal radius. The only exception to this is in the very deepest parts of the cluster (such as in panel~(d) in \Fig{fig:phasespace_arrows}). \citet{dekel2003} showed that at the very centre of a dark matter halo, tidal forces can become fully compressive -- this could explain why all group galaxies change their orbits around their host groups, with their speeds increasing and their distances either remaining the same or decreasing. Finally, as the change from an increase in $v$ to an increase in $r$ is dependent on the tidal radius, this switch in behaviour is not instantaneous as it might appear to be in \Fig{fig:phasespace_two-phase}. Once a group reaches a distance of approximately $0.3R_{200}^{\rm{clus}}$ beyond pericentre (panel~(f) in \Fig{fig:phasespace_arrows}), the motion of galaxies away from the group begins in the centre, and then spreads throughout the group as it once again dominates over the cluster. Eventually, for groups that are long past pericentre (panel~(h) of \Fig{fig:phasespace_arrows}), all galaxies are either decreasing in relative speed, or their speed is staying the same. All the galaxies remaining in groups at this stage are also moving away from the group centre, towards the bottom-right of the phase space, which is characteristic of galaxies approaching the apocentre of a bound orbit around a group. To help visualise this behaviour, \App{sec:appendix_example_cluster} shows how group galaxies move around phase space for a single example group as it passes through a cluster, clearly showing the two main phases of group evolution. \section{Groups after cluster infall} \label{sec:before_after} The results in \Sec{sec:evolution} show how the dynamics of galaxy groups change as they pass through a cluster. In this section, we discuss the differences in the properties of a group before and after it passes through a cluster, in order to understand how distinguishable are these two classes of groups. \afterpage{ \begin{landscape} \begin{figure} \vspace{36pt} \includegraphics[width=\columnwidth]{figures/phasespace_arrows.pdf} \caption{Same as \Fig{fig:phasespace_two-phase}, but showing motion of group members in phase space of host group at multiple stages of the group's passage through a cluster. Each panel shows groups at different stages of their journey, showing the groups as they enter a cluster, approach the cluster centre, and recede from the cluster out to a distance of $R_{200}^{\rm{clus}}$. Top row (panels (a)-(d)) shows groups before reaching pericentre, binned by their distance from the cluster centre. Bottom row ((e)-(h)) shows groups after passing pericentre, before reaching their first apocentre. Data are shown for groups on their first infall only. Vertical black line on each plot represents the tidal radius for a group in this radial bin, and the shaded region represents the variation of this radius due to the $1\sigma$ spread in the concentrations of clusters and groups -- these have median halo concentrations of \mbox{$C_{\rm{c}}=3.9^{+1.5}_{-1.1}$} and \mbox{$C_{\rm{g}}=4.4^{+2.5}_{-1.8}$}, respectively. A small schematic is shown in the top-right of each panel, showing the point at which the groups (small black circle) are on their journey through the cluster halo (large grey circle) -- these schematics show a radial orbit, although most infalling group orbits also have some tangential component to their velocity. The transition between the two phases of group evolution shown in \Fig{fig:phasespace_two-phase} can be seen, as well as the corresponding change in the tidal radius discussed in \Sec{sec:tidal_friction}.} \label{fig:phasespace_arrows} \end{figure} \end{landscape}} \subsection{Orbits of galaxy groups} \label{sec:orbits} The data used in \Sec{sec:evolution} is for groups on their first passage through a cluster, but not all of these groups will follow the same path. Just as some galaxies that are accreted by a cluster can become `backsplash galaxies' \citep{balogh2000,gill2005,haggar2020}, some groups will pass through the cluster and exit $R_{200}^{\rm{clus}}$ again, becoming `backsplash groups' that can enter the cluster for a second time. Others will `stick' to the cluster, remaining bound and not leaving $R_{200}^{\rm{clus}}$ once they have entered. We find that, across the 257 clusters used in this work, most groups ($92\%$) that fall into a cluster do not leave it again. By \mbox{$z=0$}, only 42\% of the groups that enter a cluster have reached their first turnaround (apocentre) in their cluster orbit, while $58\%$ do not reach this stage. These groups do not reach apocentre for multiple reasons; either they have merged with the cluster halo before reaching apocentre rather than remain a substructure of the cluster, they have been heavily stripped by the cluster and so fall below the resolution limit before reaching apocentre, or they have simply not had time to reach apocentre by \mbox{$z=0$}. Of the groups that do reach the apocentre of their orbit, 20\% have left the cluster after entering $R_{200}^{\rm{clus}}$, while 80\% reach apocentre within $R_{200}^{\rm{clus}}$ of the cluster centre, and so remain `stuck' to the cluster potential. We hereafter refer to these as `backsplash groups' and `sticky groups', respectively. Overall, just 8\% of all infalling groups go on to leave the cluster again. Finally, 81\% of the backsplash groups in our sample later experience a second cluster infall, and 19\% are still outside of the cluster at \mbox{$z=0$}. The paths that groups can take through a cluster can be described in terms of the distance from a group to the cluster centre at pericentre and apocentre. Interestingly, we find that the distance of a group halo from the cluster centre at pericentre is very consistent, regardless of the group's later behaviour. Groups that do not reach apocentre have a median pericentric distance of \mbox{$0.36^{+0.14}_{-0.09}R_{200}^{\rm{clus}}$} from the cluster centre, which is very similar to the pericentre of groups that do later reach apocentre: backsplash groups and sticky groups have median pericentric distances of \mbox{$0.38\pm0.13R_{200}^{\rm{clus}}$} and \mbox{$0.36^{+0.11}_{-0.08}R_{200}^{\rm{clus}}$}, respectively. This justifies our decision to normalise the figures throughout this paper by the group radius at infall, as most groups pass well within \mbox{$0.7R_{200}^{\rm{clus}}$}, where \citet{muldrew2011} showed that subhalo sizes cannot be reliably measured. This shows that most groups take a similar trajectory into clusters, passing by the cluster centre at a similar distance. However, the subsequent orbits of these groups can vary dramatically, with groups reaching a wide range of apocentric distances, and some not being tracked to reach their apocentre at all. By definition, the post-infall apocentric cluster distances of backsplash groups and sticky groups are very different. Backsplash groups have a median apocentric distance of \mbox{$1.16^{+0.29}_{-0.12}R_{200}^{\rm{clus}}$}, and sticky groups of \mbox{$0.63^{+0.22}_{-0.16}R_{200}^{\rm{clus}}$}, which correspond to median orbital eccentricities of \mbox{$0.53\pm0.12$} and \mbox{$0.25^{+0.19}_{-0.15}$}, respectively. \subsection{Removal of galaxies from groups} \label{sec:removal_groups} The sample of backsplash groups that exit a cluster and then re-enter allow us to directly compare how a single passage through a cluster permanently affects the properties of a group. Comparing the same sample of groups at the moment of first infall and second infall means that the groups are in approximately the same configuration (at a distance of $\sim{R}_{200}^{\rm{clus}}$, falling towards the cluster). Overall, we find that groups on a second infall contain far fewer galaxies, when compared to groups infalling for the first time. On their first infall, the median number of galaxies in these groups was $6^{+3}_{-1}$ (note that this is slightly smaller than the value of \mbox{$8^{+7}_{-3}$} quoted in \Sec{sec:evolution}, which includes groups that do not exit and re-enter the cluster). By their second infall, the median richness of these same groups is $2\pm{1}$ members. In fact, 46\% of the groups that are infalling for the second time contain only one member. Physically, these objects are not actually groups at all: a `group' with one member instead represents a single galaxy that has no other galaxies bound to it, having previously been the central galaxy in a group. This shows that, in a single passage through a cluster, almost all galaxies become unbound from groups. Often this process completely disrupts a group, resulting in no galaxies remaining bound together. Similarly, the dark matter haloes of these groups are heavily stripped during their passage through the cluster. At first infall, the median radius, $R_{200}^{\rm{grp}}$, of a group was \mbox{$0.51^{+0.15}_{-0.10}\ h^{-1}$ Mpc}. By their second infall, these same groups had a median radius of \mbox{$0.32^{+0.10}_{-0.07}\ h^{-1}$ Mpc}. Similarly, the median mass\footnote{The infall mass of groups that later have a second infall is slightly smaller than the average mass of all groups, \mbox{$10^{13.5}\ h^{-1}M_{\odot}$} (\Sec{sec:infalling}). However, as we discuss in \Sec{sec:fates}, we still consider these to be a representative sample of all infalling groups.} of these groups, $M_{200}^{\rm{grp}}$, decreases by a factor of three in this time, from \mbox{$10^{13.2}\ h^{-1}M_{\odot}$} to \mbox{$10^{12.7}\ h^{-1}M_{\odot}$}, consistent with the decrease in the number of galaxies. This is comparable to the results from other previous studies which have found that dark matter subhaloes are heavily stripped; \citet{muldrew2011} found that a halo passing through the centre of a cluster has approximately half of its mass stripped away, and \citet{taylor2004} used semi-analytic models to show that subhaloes on orbits similar to our groups lose $>40\%$ of their mass with each pericentric passage of a cluster. Some studies find even more extreme evidence of this removal of dark matter: \citet{smith2016} used hydrodynamical simulations to show that a cluster halo can strip away $\sim80\%$ of the dark matter in galaxy-sized subhaloes. \subsection{The fates of group galaxies} \label{sec:fates} We can investigate the removal of galaxies from groups further, by comparing these groups at different stages of their infall and journey through a cluster. As shown in \Sec{sec:evolution}, the speed of galaxies relative to their host group increases before they have reached pericentre of their cluster orbit, and their group-centric distance increases post-pericentre. Therefore, although groups become spatially separated after pericentre, it is not clear when the galaxies become unbound from these groups. For backsplash groups that also have a second infall, their member galaxies are removed from their host group very quickly. Of those galaxies that are bound to a group at first cluster infall (i.e. that satisfy \Eq{eq:bounded}), $60^{+20}_{-35}\%$ are no longer bound to the group by the first pericentre, $76^{+24}_{-6}\%$ are removed by apocentre, and $89^{+11}_{-29}\%$ by the second infall into the cluster (median and $1\sigma$ spread for backsplash groups). These numbers are almost identical for backsplash groups that do not have a second infall. For groups that reach apocentre but do not leave the cluster (`sticky groups'), $75\pm{25}\%$ of previously bound galaxies are no longer group members at pericentre, and $73^{+27}_{-33}\%$ at apocentre. Although it appears that the number of unbound galaxies drops slightly between pericentre and apocentre, this can actually be explained by the fact that the radius (and thus mass) of a subhalo are artificially suppressed in the centre of a large halo, making more galaxies appear to be unbound. However, although these galaxies are no longer members of the group, this is not necessarily because they have become gravitationally unbound from their host group. In this section, we analyse the final fates of these galaxies after their group enters a cluster. To do this, we separate the galaxies' states into five categories: \begin{itemize} \item Bound: galaxy is still bound to its host group, according to \Eq{eq:bounded}. \item Unbound: galaxy does not satisfy \Eq{eq:bounded}, and so is no longer bound to its host group. \item Disrupted: no descendent of a group member has been found by the halo finder, typically because its dark matter halo has been heavily stripped. \item Merged with group: galaxy has been absorbed by the halo of its host group, effectively merging with the Brightest Group Galaxy. \item Other merger: merging with another, larger object (e.g. merging with a more massive satellite galaxy). Alternatively, galaxy may be absorbed by the cluster halo, effectively merging with the Brightest Cluster Galaxy. \end{itemize} The `disrupted' galaxies in our sample represent a class of objects that have physical similarities. However, because of the nature of the simulations and tree-builder that we use in this work, the branches of their merger trees are cut off prematurely, meaning that they appear to have no descendent halo in the simulations and so their final fate cannot be determined. Before their branches end, the dark matter masses of these galaxies are changing rapidly -- in their final ten snapshots before they are removed from the merger tree, $76\%$ of these galaxies experience at least one drop of $>30\%$ in their halo mass between two snapshots \mbox{($\sim0.3$ Gyr)}, and $37\%$ have a measured drop of $>40\%$. However, {\sc{mergertree}} does not allow for the dark matter mass of an object to change by more than a factor of two between snapshots (see \Sec{sec:tree} for an explanation of this). Consequently, if a galaxy's dark matter halo mass drops by $>50\%$ between snapshots, this change will not be recorded, no descendent for the halo will be added to the catalogue, and this branch in the merger tree will end. Despite this heavy stripping of dark matter, very few of the disrupted galaxies violate the mass limits that are imposed in \Sec{sec:tree}; if we remove these mass limits, the median final mass of these galaxies before their merger tree ends is \mbox{${\rm{log}}_{10}(M_{200}/h^{-1}M_{\odot})=11.3^{+0.7}_{-0.6}$}, with a median stellar mass of \mbox{${\rm{log}}_{10}(M_{\rm{star}}/h^{-1}M_{\odot})=10.4^{+0.5}_{-0.4}$}, and a ratio between these of \mbox{$0.14^{+0.12}_{-0.08}$}. Consequently, few of these galaxies are removed from the merger trees due to violating these imposed mass limits. \Fig{fig:galaxy_fates} shows the status of group member galaxies as their host group passes through a cluster. This data is averaged across all groups that become backsplash groups and then have a second cluster infall, meaning that we have data for their entire passage through a cluster. Overlaid as solid, dashed, dot-dashed and dotted lines are the boundaries between the coloured regions when all groups are included. For example, this indicates the states of galaxies at pericentre for all groups that reach their first pericentre, regardless of what subsequently happens to the group. Similarly, the apocentre data shows the fates of all galaxies in groups at apocentre, whether or not this apocentre is outside of the cluster. This data closely follows the data for groups that have a second infall, showing that these second infallers are representative of the entire group sample. We therefore only discuss these groups that later have a second infall, allowing us to make comparisons of the same sample of groups at different stages of their orbit. \begin{figure} \includegraphics[width=\columnwidth]{figures/galaxy_fates.pdf} \caption{Status of galaxies that were bound to groups at cluster infall, as their host group passes through a cluster and begins its second infall. These data are averaged across all groups that become backsplash groups, and experience a second infall. All galaxies are bound at first infall, by definition. Areas representing galaxies that have become bound, unbound, disrupted, or merged with the group halo are labelled. The small, black region represents other mergers, which is unlabelled for clarity. Solid/dashed/dot-dashed/dotted lines show the boundaries between these regions for all groups that reach this stage of their orbit, regardless of whether they go on to reach apocentre or have a second infall.} \label{fig:galaxy_fates} \end{figure} As stated above, only approximately $40\%$ of group members are still members of the group at the pericentric passage of the cluster centre (note that here we use the mean behaviour of each group, as opposed to the median used earlier on in this section, and so the quantities differ slightly). However, of the $61\%$ of galaxies that are no longer group members at pericentre, only $45\%$ have become unbound from their host group, while $16\%$ have experienced one of the other fates described above. As these groups exit the cluster and re-enter, the number of galaxies becoming unbound increases slightly (to $53\%$), but the number of galaxies leaving the group for another reason doubles, to $32\%$, showing that these other processes are more important after a group's initial infall. It is also important to note that these four stages in the group orbit -- infall, pericentre, apocentre and second infall -- are not equally spaced in time. For the groups shown in \Fig{fig:galaxy_fates} (backsplash groups with a second infall), pericentre, apocentre and the second infall occur an average of \mbox{$0.5\pm0.2$ Gyr}, \mbox{$2.6^{+0.4}_{-0.7}$ Gyr}, and \mbox{$3.5^{+1.2}_{-1.0}$ Gyr} after the first infall, respectively. Consequently, not only do most of the unbound galaxies leave the group between infall and pericentre, but this process takes place in just \mbox{$\sim0.5$ Gyr}, compared to the \mbox{$\sim2$ Gyr} between pericentre and apocentre. We note that these timescales are redshift dependent: the time for a galaxy entering a cluster to reach pericentre at $z=0$ can typically range from $1-2$ Gyr \citep[see Fig. B1 in][for further details]{tollet2017}, but the time taken decreases at higher redshifts. Our method consequently returns an average infall-to-pericentre time of less than $1$ Gyr, because we stack data from groups at numerous different redshifts \citep[for some additional discussion of cluster crossing times, see][]{contrerassantos2022}. \Fig{fig:galaxy_fates} represents all group members at infall, however \Fig{fig:phasespace_arrows} shows that galaxies in different regions of the group phase space will experience different processes, and so the likelihood of each outcome is not the same for all galaxies in a group. Accordingly, we also find that the evolution and fates of group galaxies is strongly dependent on their position within the phase space of their host group. \Fig{fig:galaxy_fates_inner} and \Fig{fig:galaxy_fates_outer} show the evolution of members of groups that pass through and re-enter a cluster, in the bottom-left\footnote{This selection specifically examines slow-moving galaxies near the group centre, as fast-moving galaxies near the group centre exhibit different behaviour. We elaborate on this in \Sec{sec:fates_phasespace}.} \mbox{($r<0.5R_{200}^{\rm{grp}}$}, \mbox{$v<0.5v_{\rm{cir}}$)} and bottom-right \mbox{($r>0.8R_{200}^{\rm{grp}}$)} of the phase space shown in \Fig{fig:infalling_groups}. These represent the slow-moving galaxies deep within the group's potential well, and loosely-bound galaxies in the group outskirts, respectively. \begin{figure} \includegraphics[width=\columnwidth]{figures/galaxy_fates_inner.pdf} \caption{Same as \Fig{fig:galaxy_fates}, but for slow-moving galaxies in the centre of groups ($r<0.5R_{200}^{\rm{grp}}$, $v<0.5v_{\rm{cir}}$). Again, the small `other mergers' region is unlabelled for clarity. Galaxies in this region are much more likely to become heavily disrupted and have an incomplete merger tree, although a substantial fraction merge with the group halo, mostly between pericentre and apocentre.} \label{fig:galaxy_fates_inner} \end{figure} \begin{figure} \includegraphics[width=\columnwidth]{figures/galaxy_fates_outer.pdf} \caption{Same as \Fig{fig:galaxy_fates}, but for galaxies in the outskirts of groups \mbox{($r>0.8R_{200}^{\rm{grp}}$)}. The `group mergers' and `other mergers' regions are unlabelled for clarity. Galaxies in the outskirts of the groups are highly likely to become unbound from their host group, which usually happens between infall and pericentric passage.} \label{fig:galaxy_fates_outer} \end{figure} Clearly, galaxies in the central (\Fig{fig:galaxy_fates_inner}) and outer (\Fig{fig:galaxy_fates_outer}) regions of a group have vastly different evolutionary histories. Slow-moving galaxies in the centres of groups almost never become unbound from the group -- instead, the majority of them are disrupted by the time the group re-enters the cluster, although a sizeable fraction ($17\%$) of them merge with the group halo. Dynamical friction likely plays a role in this, by causing these galaxies to spiral in towards the group centre, making them likely to merge with their host group's halo. This is in contrast to the outskirts of the groups, where the vast majority of group members become unbound from the group almost immediately after the group enters the cluster, and only a small fraction are heavily disrupted. In both cases, the black lines on the figures show that galaxies in other infalling groups experience similar evolution, although slightly more galaxies are disrupted in groups that become backsplash groups. \subsubsection{Galaxy fates across group phase space} \label{sec:fates_phasespace} Finally, we can take a more general approach to \Sec{sec:fates} by looking at the phase space of the infalling groups, to determine the typical fates of galaxies at the second cluster infall, as a function of their initial position in this phase space. \Fig{fig:phasespace_difference} shows how common different outcomes are for group members, as a function of their relative position and speed at cluster infall; this is in effect a generalisation of \Fig{fig:galaxy_fates_inner} and \Fig{fig:galaxy_fates_outer}. For example, in the bottom-left region of the phase space, there is a high density of `disrupted' galaxies, showing that galaxies here during infall later became disrupted, in agreement with \Fig{fig:galaxy_fates_inner}. \begin{figure*} \includegraphics[width=\textwidth]{figures/phasespace_difference.pdf} \caption{Of the galaxies that are bound to a group at its first infall, each panel shows the fraction of these in each state at the moment of second infall. Phase space is defined by the position/speeds of the galaxies at the first infall. Top-left panel shows the fraction of galaxies that remain bound to the group. Top-right panel shows the fraction that become unbound from the group. Bottom-left shows the fraction that are `disrupted'. Bottom-right shows the fraction that merge with the group halo. Lighter colours represent regions of the phase space with a greater number of galaxies. White regions either represent the `unbound' region, or regions where the number of galaxies is very low.} \label{fig:phasespace_difference} \end{figure*} The top-left and top-right panels of \Fig{fig:phasespace_difference} show a substantial decrease in the number of galaxies that remain bound to a group outside of \mbox{$r\sim0.7R_{200}^{\rm{grp}}$} from the group centre. This indicates that, for almost all groups, virtually all galaxies outside of this radius are removed. Similarly to in \Sec{sec:evol_infall}, the tidal stripping of groups can explain this sharp cut. According to \Eq{eq:tidalrad_final}, a tidal radius of \mbox{$0.7R_{200}^{\rm{grp}}$} corresponds to a group that is approximately \mbox{$0.7R_{200}^{\rm{clus}}$} from the cluster centre. This distance is the maximum typical pericentric distance that we find for groups in our sample -- almost all groups ($95\%$) have a pericentric passage of \mbox{$r\leq0.7R_{200}^{\rm{clus}}$}. Consequently, almost all groups will have had a tidal radius of \mbox{$R_{\textrm{t}}=0.7R_{200}^{\rm{grp}}$} at some point in their orbit, but not all groups will have experienced a tidal radius less than this. This explains why some galaxies remain in the groups within $0.7R_{200}^{\rm{grp}}$, but none remain beyond this distance. Generally, only galaxies near to the group centre with high velocities remain as bound group members. These are on longer, eccentric orbits -- galaxies with lower velocities spend more time nearer the group centre, and so are more likely to merge with the group, or to be disrupted. Furthermore, the bottom-left and bottom-right panels show that the disrupted galaxies inhabit different parts of phase space, compared to those that later merge with the group halo. Disrupted galaxies have large amounts of their dark matter stripped in a short period of time: for two-thirds of these galaxies, in the snapshot immediately after they are `disrupted', more than $50\%$ of their dark matter particles appear either in the halo of their host group or (less often) their host cluster. This disruption by a larger halo is similar to how galaxy harassment can occur in clusters \citep{moore1996_harassment}. However, the galaxies in the centre of these disrupted haloes do not immediately become associated with the group halo -- if this were the case, these objects would be tagged as merging with the group halo, which they are not. This implies that a tidal disruption is occurring, in which large amounts of material are removed from the galaxy, forming a substructure in the group such as a tidal stream. This substructure will most likely merge with the group halo at some later time \citep{moore1998}, effectively making this process a merger with the group halo, but over a longer time period. Disruption is more likely for galaxies in the centres of groups that are slow-moving at the moment of infall, while galaxies with greater speeds are somewhat more likely to merge with the group halo. One explanation for this lies in the left panel of \Fig{fig:phasespace_two-phase}, showing pre-pericentre groups. Before a group reaches pericentre, galaxies in the group centre with high speeds move downwards in phase space, indicating that their speed is decreasing due to dynamical friction, and they are slowly spiralling into the group centre where they merge. Slow-moving galaxies are instead moving upwards on this plot, indicating that they are experiencing strong, accelerating forces that can disrupt their structure. Additionally, high-speed galaxies are on radial, eccentric orbits, meaning that they pass the group centre infrequently. In contrast, a low speed and low group-centric distance indicates that a galaxy is on a small, circular orbit, and so will be able to make multiple orbits of the group in a short period of time, providing more opportunities for heavy stripping by the central group galaxy. To more clearly show the differences between these classes of galaxies (those that are bound to the group, unbound, disrupted, or have merged with the group at second infall), we combine the four panels from \Fig{fig:phasespace_difference} into a single figure, \Fig{fig:phasespace_contour}. The contours in this show, for galaxies in each class, where in phase space they were located at the moment of infall. It is important to note that each contour is located at half of the maximum value for that class, and so they are not scaled in the same way as some galaxy fates are more common than others. Consequently, this plot does not show what outcome is most likely for galaxies in each part of phase space. For example, many more galaxies become unbound than remain bound to a group, so those in the top-left of this diagram are far more likely to become unbound than remain bound -- this is more apparent when we compare the top two panels of \Fig{fig:phasespace_difference}. Instead, \Fig{fig:phasespace_contour} allows us to take a single class of galaxies (say, those that are disrupted by second infall), and see from where they originated (in this case, the low-velocity, inner regions of the group). Some regions are the source of multiple classes of galaxies, while some are the source of only one. For instance, group members that are later either bound or unbound can originate from the the low-$r$, high-$v$ region of phase space, but only unbound galaxies originate from the high-$r$, low-$v$ region. These results from \Fig{fig:phasespace_difference} and \Fig{fig:phasespace_contour} broadly agree with the findings of \citet{choquechallapa2019}, who use dark matter-only simulations to study the fates of galaxies in infalling groups as a function of their position in group phase space. Among other results, they show that outside of \mbox{$r\sim0.8R_{200}^{\rm{grp}}$} there is a sharp increase in the fraction of members becoming unbound from their host group, and that galaxies lying near to the boundness line are more likely to be stripped from their groups. \begin{figure} \includegraphics[width=\columnwidth]{figures/phasespace_contour.pdf} \caption{Overlay of four panels from \Fig{fig:phasespace_difference}, to aid with comparison of different phase space distributions, for galaxies bound to groups at cluster infall. One highlighted region is shown for each of the four galaxy fates at the time of second infall. These show the region from which each class of galaxy most commonly originated -- that is, where were they previously found at the moment of first infall. Contour surrounding each region is placed at a value equal to half of the maximum, from each panel in \Fig{fig:phasespace_difference}. Grey regions either represent areas of phase space that contain few galaxies, or are not an important producer of any of these four classes of galaxies. From this, it is clear that galaxies in different regions of phase space later experience different environments, and different evolutionary processes.} \label{fig:phasespace_contour} \end{figure} \subsubsection{Observational analogues} \label{sec:obs} This work focuses on simulations of groups and clusters, but these simulations can be used to inform and interpret future observational studies. It might appear that this preferential removal of outer group members could lead to the formation of very dense, centrally-concentrated groups such as Hickson Compact Groups \citep{hickson1982} in and around clusters. As \Fig{fig:phasespace_arrows} shows though, the galaxies that remain bound to a group do not remain in the same region of phase space. Instead, the remaining galaxies are redistributed throughout the group until they follow a similar distribution to that shown in \Fig{fig:infalling_groups}. These group remnants are no more centrally concentrated than the infalling groups. Most importantly, \Fig{fig:galaxy_fates} and the top-left panel of \Fig{fig:phasespace_difference} show that, of the galaxies that are bound to a group when it approaches a cluster, almost none are still bound to a group after just a single crossing of the cluster ($\sim2$ Gyr later). Typically, only a very small number of galaxies remain in a group, and so the remnant `groups' are usually either single galaxies, or galaxy binaries. Groups with five or more members are extremely unlikely to have previously experienced a cluster environment; across all of our simulations, less than $1\%$ of such groups entering a cluster after $z=0.1$ have previously passed through a cluster. Because of this, galaxy groups nearby to a cluster (i.e. in the cluster outskirts, just outside of $R_{200}^{\rm{clus}}$) are very unlikely to contain backsplash galaxies, which typically make up about $50\%$ of the galaxies surrounding a cluster \citep{gill2005,haggar2020}. Instead, these groups represent a population of galaxies that are unprocessed by their host clusters, but have experienced group effects in their past. This may also partly explain why unrelaxed clusters, which contain more substructure and hence more galaxy groups, are typically surrounded by fewer backsplash galaxies than relaxed clusters \citep{haggar2020}. The fact that galaxy groups observed nearby to clusters are very likely to be on their first approach to the cluster has important implications for observational studies of galaxy evolution and environmental pre-processing. Additionally, we can infer greater detail about the histories of the galaxies in these groups. For example, cluster galaxies that are currently not in groups are unlikely to have previously experienced the dense, central regions of a group, as galaxies in group centres are much more likely to remain in their groups. Similarly, galaxies associated with groups inside clusters have almost certainly previously passed through the group centre, even if they now reside in the group outskirts. This means that they will have experienced the most extreme environmental impacts of the group. \citet{hester2006} showed that, in groups of a similar size to those used in this work \mbox{($M_{200}^{\rm{grp}}=10^{13}M_{\odot}$)}, a disk galaxy with a dark matter mass of \mbox{$10^{11}M_{\odot}$} at \mbox{$r=0.75R_{200}^{\rm{grp}}$} will have $\sim20\%$ of its disk gas removed, but if this galaxy passes within \mbox{$r=0.25R_{200}^{\rm{grp}}$} of the group centre, it can lose approximately $90\%$ of its gas. They attribute this to the stronger ram pressure stripping that takes place in group centres. \section{Conclusions} \label{sec:conclusions} In this work we use hydrodynamical simulations to study the evolution of intermediate sized galaxy groups (five to 50 members with stellar masses $M_{\rm{star}}\geq10^{9.5}M_{\odot}$) in the vicinity of large galaxy clusters, and specifically from the time after the groups pass within $R_{200}$ of the cluster. We begin by studying the positions and speeds of galaxies relative to their host group in order to characterise how this `phase space' of the group changes over time, before studying the fates of group members after the passage of their group through a cluster. Our findings are summarised below. \begin{itemize} \item On entering a cluster, galaxy groups typically pass within $0.6R_{200}^{\rm{clus}}$ of the cluster centre. Most of these groups remain permanently bound to the cluster, although a small fraction ($\sim10\%$) reach an apocentric distance outside of the cluster's radius, $R_{200}^{\rm{clus}}$. \item The dynamics of these groups change in two phases. First, the member galaxies increase their speeds relative to the group centre, often becoming gravitationally unbound. Then, after the group passes the pericentre of its cluster orbit (which typically occurs after $\sim0.5$ Gyr in the cluster), the distances of galaxies from the group centre increases. \item The majority of galaxies bound to a group at its first cluster infall are no longer in the group after a full passage through the cluster. Many of these galaxies become either unbound from the group, heavily stripped, or merge with the Brightest Group Galaxy, and the fate of a galaxy depends strongly on its location within the group at the infall time. \item Consequently, the overwhelming majority ($>99\%$) of groups that enter a cluster are doing so for the first time in their histories. In observations, groups that are seen just outside of a cluster are very unlikely to have previously experienced a cluster environment. \end{itemize} Although the composition and structure of simulated galaxy groups is dependent on the physical models that are used, the results from this work still allow us to make conclusions about groups that can be applied to observational work. Groups that are observed nearby to clusters are almost certainly recent infallers, particularly groups with low velocity dispersions, as galaxies in groups become gravitationally unbound almost immediately after entering a cluster. Furthermore, any galaxies that are observed in a group that is inside a cluster will have previously passed through the group centre, and so will be severely stripped by the tidal forces and ram pressure of their host group. In addition to the approach taken in this paper, which draws conclusions on galaxy groups that can be applied observationally, work remains to be done on the dynamics of these groups. In future work we plan to study the dynamics of these groups in greater detail. For example, the binding energy-angular momentum phase space, and the orbital parameters of galaxies, can tell us about the anisotropy of group members' orbits \citep[for example]{wojtak2008,lotz2019}, which can in turn be used to describe how virialised is a group. In our future work we will study the time evolution of these dynamical parameters. The work in this paper further strengthens the case that galaxy groups provide a unique way to study galaxy evolution, and particularly pre-processing. As they are almost all first-infallers, groups in the outskirts of clusters will have experienced no cluster processing, and will have a very low contamination by backsplash galaxies. Consequently, pre-processing effects will dominate over any effects from the cluster in these structures, and so studying these objects in more detail will allow us to further disentangle the environmental effects of clusters, and the effects of other cosmic environments. Processes such as gas removal and morphological changes in these galaxies will exclusively have occurred pre-infall in groups or cosmic filaments, and so ultimately the properties of galaxies in groups will help inform us of the role that environment plays in driving galaxy evolution. \section*{Acknowledgements} We thank the referee, Gary Mamon, for his helpful and thorough comments, which have helped to improve the quality of this paper. This work has been made possible by {\sc The Three Hundred}\ collaboration\footnote{\url{https://www.the300-project.org}}. This work has received financial support from the European Union's Horizon 2020 Research and Innovation programme under the Marie Sk\l{}odowskaw-Curie grant agreement number 734374, i.e. the LACEGAL project\footnote{\url{https://cordis.europa.eu/project/rcn/207630\_en.html}}. {\sc The Three Hundred}\ simulations used in this paper have been performed in the MareNostrum Supercomputer at the Barcelona Supercomputing Center, thanks to CPU time granted by the Red Espa\~nola de Supercomputaci\'on. For the purpose of open access, the author has applied a creative commons attribution (CC BY) to any author accepted manuscript version arising. RH acknowledges support from STFC through a studentship. He also thanks Andrew Benson for useful suggestions relating to the tidal radius of groups, and Liza Sazonova for extremely helpful discussions relating to merger trees. AK is supported by the Ministerio de Ciencia, Innovaci\'{o}n y Universidades (MICIU/FEDER) under research grant PGC2018-094975-C21. This work makes use of the {\sc{SciPy}} \citep{virtanen2020}, {\sc{NumPy}} \citep{vanderwalt2011}, {\sc{Matplotlib}} \citep{hunter2007}, {\sc{SymPy}} \citep{meurer2017} and {\sc{pandas}} \citep{mckinney2010} packages for {\sc{Python}}\footnote{\url{https://www.python.org}}. The authors contributed to this paper in the following ways: RH, UK, MEG and FRP formed the core team. RH analysed the data, produced the plots and wrote the paper along with UK, MEG and FRP. AK produced the halo catalogues and merger trees. GY supplied {\sc The Three Hundred}\ simulation data. WC assisted with interpreting the time evolution of group members and group properties, particularly in \Sec{sec:fates}. All authors had the opportunity to comment on the paper. \section*{Data availability} The data underlying this work has been provided by {\sc The Three Hundred}\ collaboration. The data may be shared on reasonable request to the corresponding author, with the permission of the collaboration. \bibliographystyle{mnras}
1,116,691,501,357
arxiv
\section{Introduction}\label{sec:introduction} Traditionally, machine learning treats the world as \textit{closed} and \textit{static} space. In particular for classification, domain data is assumed to comprise pre-defined classes with stationary class-conditional distributions. Also datasets to fit models before deploying them shall be available in a single chunk. Practitioners develop such models under controlled lab conditions, where they nowadays rely on tremendous computational resources. This scarcely applies to many real-world\xspace} \def\Realworld{Real-world\xspace applications as the world is an \textit{open} space in many facets. For instance, classifiers might be confronted with classes unseen during training. Also distributions of pre-trained classes might be non-stationary or models shall learn novel classes within operation mode. These aspects often occur simultaneously like in image classification, where unknown image categories should be distinguished from known ones showing \textit{concept drifts} (\emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot, captured new data with different cameras). It is also in the very nature of biometric systems like face or writer identification that are confronted with known subjects having concept drifts (\emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot., due to aging or environmental changes), novel subjects to enroll, and unknown subjects. There is also a steady quest for making the respective algorithms computationally efficient to be applicable on edge devices with limited resources. \Gls{owr} as formalized by Bendale and Boult~\cite{bendale2015openworld} addresses such constraints and includes three subtasks. \begin{enumerate*} \item \emph{Recognize} new samples either as a \emph{known} or \emph{unknown}. \item \emph{Label} new samples either by approving the recognition or defining a new known class. \item \emph{Adapt} the current model by exploiting updated labels. \end{enumerate*} The recognition subtask poses an independent research area termed \gls{osr} \cite{scheirer2012openset} and received a lot of interest in applications like face recognition~\cite{gunther2017opensetface}, novelty and intrusion detection~\cite{bendale2016opensetnn, henrydoss2017ievm, prijatelj2021novelty}, and forensics~\cite{lorch2020jpeg, maier2020bnn, lorch2021gps}. Currently \gls{evm} models as proposed by Rudd \emph{et al}\onedot~\cite{rudd2017evm} are state of the art in \gls{osr}. \Glspl{evm} predict unnormalized class-wise probabilities for query samples to be included in the respective known classes. Model fitting depends on class negatives, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, it adapts well to imbalanced data, which is a common problem in incremental learning~\cite{ditzler2010learn++unb, wu2019largeir}. However, fitting and prediction scale badly for large datasets making their use on resource limited platforms difficult. Model adaptability can be achieved by cyclic retraining. However, this model-agnostic approach is computationally inefficient and all data needs to be organized in a single chunk. \textit{Incremental learning} aims at doing adaptions effectively and efficiently by batch-wise or sample-wise incorporation of novel data. This needs to handle different challenges: On the one hand, data undergoes concept drifts that shall be learned. On the other hand, the stability-plasticity dilemma~\cite{carpenter1987stabilityplasticity} could either lead to maximum predictive power on previously learned classes (\emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, high stability) or on novel classes (\emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, high plasticity). A good tradeoff between both border cases is desired for well-generalizing models. Although there are several incremental formulations of popular classifiers~\cite{bifet2009adaptivedt, cauwenberghs2001isvc} or deep learning architectures~\cite{rebuffi2017icarl, castro2018endir, wu2019largeir}, these approaches assume closed sets of known classes in their prediction phase. In principle, probabilistic models like the \gls{evm} can handle batch-wise data but their actual behaviour in incremental learning under an open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace regime is still widely unexplored. In this paper, we show that simple ad-hoc applications of existing \gls{evm} approaches in \gls{owr} lead to suboptimal stability-plasticity tradeoffs. The contribution of this paper can be summarized as follows: \begin{enumerate*} \item A partial model fitting algorithm that prevents costly Weibull estimations by neglecting unaffected space during an update. This reduces the incremental training time by a factor of $28$. \item A model reduction technique using weighted maximum $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace providing fixed size model complexities, which is fundamental for memory constrained systems. This approach is up to $4\times$ faster than existing methods and achieves higher recognition rates of about \SI{12}{\percent}. \item Two novel open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace protocols that can be adapted to vary the task complexity in terms of openness. \item The framework is evaluated on these protocols with varying difficulty and dimensional complexity for applications such as image classification~and~face~recognition \end{enumerate*} \section{Related Work}\label{sec:related-work} \subsubsection{Incremental Learning} Popular classifiers such as \glspl{svm}, decision trees, linear discriminant analysis, and ensemble techniques are modified to allow efficient model adaptations~\cite{domingos2000hoeffdingtrees, cauwenberghs2001isvc, polikar2001learnpp, kim2007incrementallda, bifet2009adaptivedt}. Curriculum and self-paced learning are concepts to sequentially incorporate samples into a model in a meaningful order~\cite{bengio2009curriculum, kumar2010spl, lin2017activeselfpaced}. iCaRL~\cite{rebuffi2017icarl} and EEIL~\cite{castro2018endir} use distillation or bias correction~\cite{wu2019largeir} to counter catastrophic forgetting. Zhang \emph{et al}\onedot~\cite{zhang2021fewshotil} proposed a pseudo incremental learning paradigm by decoupling the feature and classification learning stages. However, the adaptation of underlying \glspl{dnn} on embedded hardware, as required in many open world applications~\cite{bendale2015openworld}, is far from being efficient. Additionally, these incremental strategies are not designed for \gls{osr}. \subsubsection{\OpenSet Recognition} Early approaches~\cite{tax2008growing,bartlett2008classification,grandvalet2008svm,cevikalp2012efficient} define threshold-based unknown detection rules for closed-set classifier outputs. More recent methods focus on the \gls{evt} to consider negative class samples for the estimation of rejection probabilities. Scheirer \emph{et al}\onedot~\cite{scheirer2014wsvm} developed the \gls{wsvm} that combines a one-class and a binary \gls{svm}, where decision scores are calibrated via Weibull distributions. Jain \emph{et al}\onedot~\cite{jain2014pisvm} proposed the \gls{pisvm} to calibrate the outputs of a \acrshort{rbf} \gls{svm} to unnormalized posterior probabilities. The related OpenMax~\cite{bendale2016opensetnn} calibration is used for class activations of \glspl{dnn} to model the probability of samples being unknown. Unfortunately, such re-calibrations do not support incremental learning off-the-shelf. Also GANs allow to sharpen open set\xspace} \def\Openset{Open set\xspace} \def\OpenSet{Open Set\xspace models with adversarial samples~\cite{ge2017genopenmax, neal2018counterfactual, kong2021opengan, yue2021counterfactualzf}. Recent novelty detection approaches focus on the uncertainty expressiveness of classifiers that can be used to perform novelty or unknown detection, such as Bayesian neural networks~\cite{blundell2015bnn}, Bayesian logistic regression~\cite{lorch2020jpeg}, and Gaussian processes~\cite{lorch2021gps}. While these methods commonly require multiple computationally demanding Monte Carlo draws to calculate the predictive uncertainty, Sun \emph{et al}\onedot~\cite{sun2021react} propose a non-incremental post hoc approach to handle model overconfidence. \subsubsection{Open World Recognition} \Gls{nn} based classifiers are open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace capable, as they typically have no actual training step. The \gls{osnn}~\cite{junior2017osnn} defines the open space\xspace} \def\Openspace{Open space\xspace via a threshold on the ratio of similarity scores of the two most similar classes. Bendale and Boult~\cite{bendale2015openworld} derived the \gls{nno} algorithm from the \gls{ncm} classifier~\cite{mensink2013ncm, ristin2014incm}. \gls{nno} rejects samples that are not in the range of any class center where the distance depends on a learned Mahalanobis distance. However, these approaches are purely distance-based and do not take distributional information into account. Joseph \emph{et al}\onedot~\cite{joseph2021ore} proposed an open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace object detection method that includes fine-tuning of a \gls{dnn} which is typically too costly for embedded hardware. To overcome the limitations of \glspl{nn}, Rudd \emph{et al}\onedot~\cite{rudd2017evm} introduced the \gls{evm} that defines sample-wise inclusion probabilities in dependence of their neighborhood of other classes. Since this approach is based on a \gls{nn}-like data structure, they propose a model reduction technique to keep the most relevant data points, similar to the support vectors of \glspl{svm}, to reduce the memory footprint. The \gls{evm} has achieved state-of-the-art results in intrusion detection~\cite{henrydoss2017ievm} and open set\xspace} \def\Openset{Open set\xspace} \def\OpenSet{Open Set\xspace face recognition\cite{gunther2017opensetface}. The C-EVM~\cite{henrydoss2020cevm} performs a clustering prior to the actual \gls{evm} fitting to reduce the dataset size. These centroids are then used to fit the \gls{evm}. However, the clustering does not ensure a reduced model size and especially for small batches, it can cause computational overhead. In contrast, our proposed method adequately detects unaffected space in incremental updates and prevents redundant parameter estimations. Additionally, we provide a computationally more efficient model reduction using weighted maximum $K$-set cover, that reduces the model size to a fixed user-set value. \section{Background: Extreme Value Theory}\label{sec:background} The \gls{evm} estimates per-sample probabilities of inclusions. Let $\bm{x}} \newcommand{\BX}{\bm{X}_i$ be a feature vector of class $y_i$ referred to as an anchor sample. Given $(\bm{x}} \newcommand{\BX}{\bm{X}_i, y_i)$, we select the $\tau$ nearest negative neighbors $\bm{x}} \newcommand{\BX}{\bm{X}_j$, $j = 1, \ldots, \tau$ from different classes $y_j \neq y_i$ according to a distance $d(\bm{x}} \newcommand{\BX}{\bm{X}_i, \bm{x}} \newcommand{\BX}{\bm{X}_j)$, where $\tau$ denotes a tail size\xspace} \def\Tailsize{Tail size\xspace. The inclusion probability of a sample $\bm{x}} \newcommand{\BX}{\bm{X}$ for class $y_i$ is given by the cumulative Weibull distribution: \begin{equation} \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}) = \Psi(\bm{x}} \newcommand{\BX}{\bm{X}; \theta_i) = \exp{\left(- \left( \frac{d(\bm{x}} \newcommand{\BX}{\bm{X}_i, \bm{x}} \newcommand{\BX}{\bm{X})}{\lambda_i} \right)^{\kappa_i} \right)} \enspace \text{,} \end{equation} where $\theta_i = \{\kappa_i, \lambda_i\}$ denotes the Weibull parameters, $\kappa_i$ is the \emph{shape}, and $\lambda_i$ is the \emph{scale} associated with $\bm{x}} \newcommand{\BX}{\bm{X}_i$. Given labeled training data $\mathcal{N} = \left\{ (\bm{x}} \newcommand{\BX}{\bm{X}_1, y_1), \ldots, (\bm{x}} \newcommand{\BX}{\bm{X}_N, y_N) \right\}$, each feature vector $\bm{x}} \newcommand{\BX}{\bm{X}_i$ with class label $y_i$ becomes an anchor. Fitting the underlying EVM aims at sample-wise estimating their~$\theta$. A query sample $\bm{x}} \newcommand{\BX}{\bm{X}$ is assigned to class $y_i$ with maximum probability $\max_{i \in N} \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X})$. This probability shall reach a threshold $\delta$ to distinguish knowns and unknowns according to: \begin{equation} y = \begin{cases} y_i & \text{if } \max_{i \in N} \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}) \geq \delta \enspace \text{,} \\ \text{``unknown''} & \text{otherwise} \enspace \text{.} \end{cases} \end{equation} A baseline approach keeps all $\theta_i$, which is expensive in terms of prediction time complexity and memory footprint. Rudd~\emph{et al}\onedot~\cite{rudd2017evm} proposed a model reduction such that only informative $\theta_i$, \emph{\glspl{ev}}, are kept since samples within the same class might be redundant. It can be expressed as set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace problem~\cite{karp1972setcover} to find a minimum number of samples that \emph{cover} all other samples. Redundancies are determined by inclusion probabilities $\Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j)$ within $N_c$ samples of a class $c$ ($y_i = y_j \, \forall i,j \in \{1,\ldots,N_c\}$). A sample $\bm{x}} \newcommand{\BX}{\bm{X}_j$ is discarded if it is covered by $\theta_i$, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, $\Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j) \geq \zeta$, where $\zeta$ denotes the coverage threshold. This can be formulated as the minimization~problem: \begin{align} \text{minimize} \enspace \sum_{i=1}^{N_c} I(\theta_i) \label{eq:rudd-optimize} \enspace \text{subject to} \enspace I(\theta_i) \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j) \geq \zeta \enspace \text{,} \end{align} where the indicator function $I(\theta_i)$ is given by: \begin{equation}\label{eq:rudd-indicator-function} I(\theta_i) = \begin{cases} 1 & \text{if any } \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j) \geq \zeta \quad \forall j \in N_c \, \enspace \text{,} \\ 0 & \text{otherwise}\enspace \text{.} \end{cases} \end{equation} Rudd~\emph{et al}\onedot~\cite{rudd2017evm} determines approximate solutions in $\mathcal{O}(N_c^2)$ using greedy iterations, where in each iteration samples that cover most other samples are selected. This approach does not constrain the amount of \glspl{ev}, which might be necessary for memory limited systems. To this end, bisection to determine a suitable $\zeta$ \emph{per class} can be performed. \section{Incremental Extreme Value Learning}\label{sec:incremental-learning} During online learning new data points arise and may interfere with the current \glspl{ev}' Weibull distribution estimates. \subsubsection{Incremental Learning Framework}\label{ssec:incremental-framework} \gls{evm} learning involves two subtasks: \begin{enumerate*} \item \emph{Model fitting} to adapt the model to new data and \item \emph{model reduction} that bounds the model's computational complexity and required resources. \end{enumerate*} In \gls{owr}, both steps need to handle training data arriving batch-wise over consecutive epochs. We perform incremental learning over epochs using new arriving training batches $\mathcal{N}^t$, where $t$ denotes the epoch index. For an incremental formulation, let $\Theta_E^t = \{\theta_1^t, \ldots, \theta_E^t\}$ be a model of $E$ \glspl{ev} determined either at the previous epoch or learned from scratch at the first epoch. The fit function incorporates the new batch $\mathcal{N}^t$ to the current model $\Theta_E^t$ to obtain a new intermediate model $\Theta^{t+1}$. The reduction squashes $\Theta^{t+1}$ according to a given budget by selecting most informative \glspl{ev} considering both previous and new samples. This yields the consolidated model $\Theta_{E}^{t+1} \subseteq \Theta^{t+1}$. Our framework alternates the fit and reduction function efficiently per epoch. \subsubsection{Partial Model Fitting} \label{ssec:partial-model-fit} \begin{figure}[tb] \centering \subfloat[No update required.]{\input{figures/incremental-updates1}\label{sfig:incremental-a}} \qquad \subfloat[Update required.]{\input{figures/incremental-updates2}\label{sfig:incremental-b}} \caption{Incremental update illustration with $\tau=4$. The Weibull distribution of the \acrfull{ev}~(\protect\tikzextremevec) is estimated on the $\tau$ nearest samples~(\protect\tikznormalsample). The blueish hypersphere with radius~$d_\tau$ is derived from the farthest sample. The new sample~(\protect\tikznewsample) in \cref{sfig:incremental-a} lies outside the sphere and can be ignored. Once a new sample lies within the sphere, \emph{cf}\onedot} \def\Cf{\emph{Cf}\onedot \cref{sfig:incremental-b}, an update is required.} \label{fig:incremental}% \end{figure}% For model fitting, we process samples in new arriving batches $\mathcal{N}^t$ independently to incorporate them into the current model $\Theta_E^t$. A new sample $\bm{x}} \newcommand{\BX}{\bm{X}^{t+1}$ might fall into the neighborhood of any \gls{ev}'s feature vector $\bm{x}} \newcommand{\BX}{\bm{X}_e^t$, which would invalidate the corresponding Weibull parameters in $\theta_e^t$, where $\theta_e^t \in \Theta_E^t$. A naive approach is to re-estimate a new Weibull distribution for each \gls{ev} including nearest negative neighbor search and tail construction. We argue that this is highly inefficient since it is most likely that the new sample will not influence all the \glspl{ev}. Thus, most estimates will result in the same Weibull parameters as previously. We extend the \gls{evm} model by an automatically derivable, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, nonuser-set value, namely the \emph{maximum tail distance\xspace} \def\Taildistance{Tail distance\xspace}~$d_\tau$, which corresponds to the maximum distance within a tail such that $\theta_e^t = \{\kappa_e^t, \lambda_e^t, d_{\tau,e}^t\}$. This parameter operates as a threshold and controls the model update. It can be described by a hypersphere centered at an \gls{ev} with radius $d_{\tau}$ as depicted in \cref{fig:incremental}. Anytime a sample falls into this hypersphere, we need to shrink it. To perform partial fits, we need to compute distances between $\bm{x}} \newcommand{\BX}{\bm{X}^{t+1}$ and all $\bm{x}} \newcommand{\BX}{\bm{X}_e^t$ and estimate the Weibull parameters for $\bm{x}} \newcommand{\BX}{\bm{X}^{t+1}$. Using these distances, we define the update rule for the \gls{ev}: \begin{equation} \theta_e^{t+1} = \begin{cases} \text{update}(\theta_e^{t}) & \text{if } d(\bm{x}} \newcommand{\BX}{\bm{X}_e^t, \bm{x}} \newcommand{\BX}{\bm{X}^{t+1}) < d_{\tau,e}^t \enspace \text{,} \\ \theta_e^{t} & \text{otherwise} \enspace \text{,} \end{cases} \end{equation} where $\text{update}(\cdot)$ denotes tail update, re-estimation of Weibull parameters, and storage of new maximum tail distances. This allows computationally efficient partial fits and leads to exactly the same result as cyclic retraining, as long as no model reduction is carried out. \input{tables/mnist-update-ratio}% In \cref{tab:mnist-update-ratio}, we exemplify the gain of this approach. We incrementally fit an \gls{evm} on a subset of MNIST and store all samples as \glspl{ev}. The update ratio determines the fraction of \glspl{ev} that require an update in subsequent epochs. It follows, the smaller the batches and tail size the less updates are necessary. The benefit can become very substantial at small batch and tail sizes with an update ratio of only \SI{0.56}{\percent}. \subsubsection{Model Reduction}\label{ssec:model-reduction} \renewcommand{\thefigure}{2} \begin{figure*}[tb] \setlength{\fboxsep}{0pt}% \centering \newcommand{.18}{.18} \subfloat[EVM ($\infty$-SC)~\cite{rudd2017evm}]{\includegraphics[width=.18\linewidth]{figures/no-reduce-r100.pdf}\label{sfig:reduce-none2}}% \hfill% \subfloat[EVM ($10$-SC)~\cite{rudd2017evm}]{\includegraphics[width=.18\linewidth]{figures/original-r10.pdf}\label{sfig:reduce-orig2}}% \hfill% \subfloat[iEVM ($10$-wSC)]{\includegraphics[width=.18\linewidth]{figures/ours-r10.pdf}\label{sfig:reduce-ours2}}% \hfill% \subfloat[C-EVM ($\infty$-SC)~\cite{henrydoss2020cevm}]{\includegraphics[width=.18\linewidth]{figures/c-evm-r100.pdf}\label{sfig:c-evm2}}% \hfill% \subfloat[C-iEVM ($10$-wSC)]{\includegraphics[width=.18\linewidth]{figures/c-ievm-r10.pdf}\label{sfig:c-ievm2}}% \caption{Decision boundaries of different \gls{evm} reductions on a $3$-class toy dataset. Solid dots correspond to the \acrfullpl{ev} and colored areas belong to the related class where the inclusion probability is visualized via the opacity. In~(a), no reduction is performed, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, the \glspl{ev} match the training data. The set cover (SC) reduction and our weighted (wSC) are shown in~(b) and~(c), respectively. In~(d), the C-EVM is shown and (e)~presents~the~C-EVM with our wSC.}% \label{fig:overview-reductions2}% \end{figure*}% \input{algorithms/gmc-greedy-short}% In our incremental learning framework, the aim of a class-wise model reduction $g$ is to find a subset $\Theta_{E_c}^t \subseteq \Theta_c^t$ that is budgeted w.\,r.\,t\onedot} \def\dof{d.\,o.\,f\onedot the number of resulting \glspl{ev}. \paragraph{Problem Statement} For the sake of simplicity, let us drop the batch count $t$ and class index $c$, unless it is necessary. We denote our model reduction by a function $g \colon \Theta \to \Theta_{E}$, where $\Theta_{E}$ underlies the constraint $|\Theta_{E}| \leq K\leq |\Theta| = N$ and $K$ denotes the budget of \glspl{ev} that can be kept for a certain class with $N$ samples. The intuition behind the design of $g$ is three-fold: \begin{enumerate*} \item We aim at selecting \glspl{ev} that best cover others according to pair-wise inclusion probabilities. \item While pair-wise inclusion probabilities are not symmetric in general, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, $\Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j) \neq \Psi_j(\bm{x}} \newcommand{\BX}{\bm{X}_i)$, high bilateral coverage is common and would introduce a bias towards selecting \glspl{ev} very close to class centroids implying that selecting both $\Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j)$ and $\Psi_j(\bm{x}} \newcommand{\BX}{\bm{X}_i)$ shall be penalized. \item At most $K$ \glspl{ev} shall be selected. \end{enumerate*} We propose to formulate $g$ as a weighted maximum $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace~\cite{cohen2008gmc}. Let us define a collection of sets $\mathcal{S} = \{ \mathcal{S}_1, \ldots, \mathcal{S}_N \}$, where $\mathcal{S}_i = \{ (w_{kl}, w_{lk}) \, | \, 1 \leq k \leq i < l \leq N \}$ models a single~\gls{ev}. A pair $(w_{kl}, w_{lk}) \in \mathcal{S}_i$ contains two weights given by the inclusion probabilities $w_{kl} = \Psi_k(\bm{x}} \newcommand{\BX}{\bm{X}_l)$ and $w_{lk} = \Psi_l(\bm{x}} \newcommand{\BX}{\bm{X}_k)$. We determine $g$ according to the integer linear program: \begin{align} \text{maximize} \enspace & \sum_{i=1}^{N} \sum_{j=i+1}^{N} \beta_{ij} \Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j) + \beta_{ji} \Psi_j(\bm{x}} \newcommand{\BX}{\bm{X}_i) \label{eq:mst-optimization-3} \\* \text{subject to} \enspace & \sum_{i=1}^{N} \gamma_i \leq K \enspace \text{,} \label{eq:mst-constraint-1-3} \\* & \beta_{ij} + \beta_{ji} \leq 1 \enspace \text{,} \label{eq:mst-constraint-2-3} \end{align} where $\beta_{ij} \in \{0, 1\}$ selects covered elements ($\beta_{ij} = 1 \Leftrightarrow (w_{ij}, w_{ji})~\text{is covered by}~\mathcal{S}_i$) and $\gamma_i \in \{0, 1\}$ selects kept \glspl{ev} ($\gamma_i = 1 \Leftrightarrow \mathcal{S}_i~\text{is kept}$). The objective in \cref{eq:mst-optimization-3} is optimized w.\,r.\,t\onedot} \def\dof{d.\,o.\,f\onedot $\beta$ and $\gamma$ to maximize the value of the coverage. The constraint in~\cref{eq:mst-constraint-1-3} limits the amount of \glspl{ev} to the budget~$K$ and \cref{eq:mst-constraint-2-3} penalizes the selections of bilateral coverage. \paragraph{Incremental Algorithm} We solve \crefrange{eq:mst-optimization-3}{eq:mst-constraint-2-3} by greedy iterations as depicted in Algorithm~\ref{alg:gmc}. Our algorithm facilitates incremental learning by reusing intermediate results from the model reduction of the previous epoch, where $\Theta$ denotes the intermediate model of a class from the partial fit function and $K$ is the \gls{ev} budget. Line $3$ limits the amount of iterations to the desired budget $K$. In each iteration, we first compute for each sample the sum of inclusion probabilities from all other samples toward it (line $4$). The element with the highest sum is selected as \gls{ev} (line $5$ - $6$). In the end, the reduced model $\Theta_E$ is released. Note that summations in line $4$ do not need to be recomputed in every iteration. We provide additional implementation details for Algorithm~\ref{alg:gmc} in the supplementary~material. \paragraph{Relationship to Previous Works~\cite{rudd2017evm, henrydoss2020cevm}} \label{sssec:relationship} Our weighted maximum $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace formulation in \crefrange{eq:mst-optimization-3}{eq:mst-constraint-2-3} generalizes the conventional set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace model reduction of Rudd~\emph{et al}\onedot~\cite{rudd2017evm}. To formulate \cite{rudd2017evm} in our framework, we need to substitute $\Psi_i(\bm{x}} \newcommand{\BX}{\bm{X}_j)$ and $\Psi_j(\bm{x}} \newcommand{\BX}{\bm{X}_i)$ in \cref{eq:mst-optimization-3} by $I(\theta_i)$ and $I(\theta_j)$, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, the indicator function of \cref{eq:rudd-indicator-function}. Thus, all samples with coverage probabilities $\geq \zeta$ are weighted uniformly. The C-EVM~\cite{henrydoss2020cevm} uses class-wise DBSCAN clustering~\cite{ester1996dbscan} and generates centroids from these clusters. This preconditioning reduces the training set size before the actual EVM is fitted to the centroids. However, this does not enforce a specific amount of \glspl{ev}. This is sub-optimal in memory-limited applications, \emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot, on edge devices, where fixed model sizes are preferred. In \cref{fig:overview-reductions2}, we compare different reduction techniques on example data, where $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace ($K$-SC) represents Rudd's method~\cite{rudd2017evm} and ($K$-wSC) our weighted $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace ($K$-wSC). It can be observed that $K$-SC leads to scattered decision boundaries and is sensitive to outliers. Our stand-alone \gls{ievm} is robust against outliers and empowers the open space\xspace} \def\Openspace{Open space\xspace, \emph{cf}\onedot} \def\Cf{\emph{Cf}\onedot \cref{sfig:reduce-ours2}. The C-EVM generates new centroids but does not guarantee a certain amount of \glspl{ev}. Therefore, we extend it with our $K$-wSC and bilateral coverage regularization. This selects \glspl{ev} that accurately describe the underlying distributions of known classes. We argue that both, the iEVM and C-iEVM, perfectly describe different levels of the stability-plasticity tradeoff. While the iEVM strictly bounds the decision boundaries to dense class centers and leaves more open space\xspace} \def\Openspace{Open space\xspace, it is stable to concept drift. In contrast, the C-iEVM enables more plasticity as outliers have a high impact on the generated centroids. The hard thresholding of Rudd~\emph{et al}\onedot~\cite{rudd2017evm} also comes at the cost of embedding their set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace into a bisection search to determine a coverage threshold $\zeta$ providing the desired number of \glspl{ev}. Given a bisection termination tolerance of $\epsilon$, the overall model reduction has a time complexity of $\mathcal{O}(\log(\epsilon^{-1})N^2)$ for a single class comprising $N$ samples. In contrast, our model reduction method avoids thresholding and considers the given budget on the number of \glspl{ev} in a single pass with time complexity $\mathcal{O}(N^2)$. This is an important factor for implementations on resource limited devices. \section{\OpenWorld Evaluation Protocols}\label{sec:protocols} We introduce our two designed open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace evaluation protocols. The first protocol describes the very general real-world\xspace} \def\Realworld{Real-world\xspace online learning environment, where new classes are learned and old classes are updated by new samples. The second protocol is a specialization of the first one, where subsequent epochs contain only new classes. \subsubsection{Protocol~I\xspace} This protocol reflects the realization of a newly deployed \gls{owr} application. While others start with a large initial training phase~\cite{bendale2015openworld}, we argue that this is not possible in real-world\xspace} \def\Realworld{Real-world\xspace scenarios, as the exact environmental conditions, \emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot, sensors and lighting, are unknown. Furthermore, it is an unrealistic assumption to start with a large initial training~phase. We start with a minimum of $2$ classes and incrementally learn new classes, while incorporating new samples of previous classes. This introduces two types of concept drifts, termed \emph{direct} and \emph{implicit} concept drift. Direct concept drift applies to a single changing class, \emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot, the aging of a person. Implicit concept drift determines the mutual impact of neighboring classes competing for transitional feature space. Here, the occurrence of a new class can have a high impact on previously learned classes as both may share parts of the feature space, \emph{e.\,g}\onedot} \def\Eg{\emph{E.\,g}\onedot, leopards and jaguars. Implicit concept drift is given whenever an altering class influences the learned concepts of other classes. \renewcommand{\thefigure}{4} \begin{figure*}[t] \centering \vspace{1ex \tikzsetnextfilename{classinc-lfw-macro} \includegraphics[width=.99\linewidth]{figures/classinc-lfw-75-macro.tikz} \vspace{-1ex}% \caption{Averaged results over $3$ runs of Protocol~II\xspace on LFW. Set cover and our weighted maximum $K$-set cover reduction to $K$ \acrfullpl{ev} are denoted as $K$-SC and $K$-wSC, respectively. Our reduction achieves comparable results \emph{while} reducing the model complexity by factor $4$.}% \label{fig:lfw-classinc-macro}% \end{figure*}% Our protocol allows the control of its complexity on the basis of an initial \emph{openness}~\cite{scheirer2012openset}. According to this openness, classes are divided into two disjoint sets of knowns~$\mathcal{C}_\text{K}$ and unknowns~$\mathcal{C}_\text{U}$, with $|\cdot|$ denoting the cardinality. The first epoch contains $2$ classes of $\mathcal{C}_\text{K}$. The following epochs comprise a single new class of $\mathcal{C}_\text{K}$ as well as samples of classes seen in previous epochs. Hence, all classes in $\mathcal{C}_\text{K}$ are known at epoch $|\mathcal{C}_\text{K}| - 1$. Each learning epoch follows an evaluation on a fixed test set. Note that, although the test set is fixed, the amount of unknowns reduces over the epochs. Thus, the openness decreases from epoch number $1$ to $|\mathcal{C}_\text{K}| - 1$. This reduces the complexity of unknown detection while increasing the difficulty for the classification of knowns. To further investigate the models' incremental adaptability at a steady openness, we continue the epoch-wise training after $|\mathcal{C}_\text{K}| - 1$ with batches of $\mathcal{C}_\text{K}$. \subsubsection{Protocol~II\xspace} This protocol specializes the first one for applications with few samples per class. Due to the limited amount of training samples, we derive a pure class-incremental evaluation, where each epoch contains a certain amount of new classes. No previously learned classes are directly updated by new samples in subsequent epochs but they are updated implicitly by new occurring classes leading to the previously mentioned implicit concept drift. We split the classes w.\,r.\,t\onedot} \def\dof{d.\,o.\,f\onedot a predefined openness into knowns and unknowns. The unknowns are put in the test set together with a subset of samples for each of the known classes. The known classes are split into batches where each batch contains all remaining samples of a certain amount of classes. \subsubsection{Performance Measures} The \gls{dir} at certain \glspl{far} serves as evaluation metric, which is common in the open set\xspace} \def\Openset{Open set\xspace} \def\OpenSet{Open Set\xspace face recognition~\cite{gunther2017opensetface}. The \gls{far} determines the fraction of misclassified unknowns. The threshold to receive a certain \gls{far} can be derived from the evaluated dataset. The \gls{dir} determines the fraction of correctly detected knowns \emph{and} their correct classification. A high \gls{dir} at low \gls{far} is favorable. \section{Experiments and Results}\label{sec:experiments} We evaluate our \gls{ievm} in different \gls{owr} applications. The \gls{evm}, \gls{osnn}, and \gls{tnn} serve as baselines. We also extend the C-EVM by our incremental framework, where clustering is applied prior to model fitting. The method notations are adopted from Section~\ref{sssec:relationship}. Model reductions are performed at every epoch. \subsubsection{Image Classification} The open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace performance of our approach is evaluated with Protocol~I\xspace on \cifar{100}~\cite{krizhevsky09cifar}. This dataset comprises \num{50000} training and \num{10000} test samples of \num{100} classes. The randomized split into knowns and unknowns is \SI{50}{\percent}, which results in an openness range from \SI{80.2}{\percent} for the first batch to \SI{18.4}{\percent} for batch $49$ and the following ones. We evaluate $100$ epochs using a batch size of $24$ and benchmark all models on the whole test set after each epoch. We repeat the protocol $3$ times using different random orders in the creation and processing of batches. \paragraph{Implementation Details} For feature extraction, we use EfficientNet-B6~\cite{tan2019efficientnet} pre-trained on ImageNet~\cite{deng2009imagenet} and fine-tuned on a \cifar{100} training split via categorical cross-entropy loss and a bottleneck layer of size $1024$. All \glspl{evm} use the same parameters: $\tau = 75$ and $\alpha = 0.5$. For the clustering in the C-EVM and C-iEVM, we adopt the parameters reported in~\cite{henrydoss2020cevm}. Methods that employ a model reduction reduce the amount of \glspl{ev} to $K = 10$. We report additional results with alternative parameters in the supplementary material. \paragraph{Results} Averaged results of $3$ repetitions of Protocol~I\xspace are shown in \cref{fig:cifar100-openworld-group}. We depict the \gls{dir} over the amount of samples at different \glspl{far}. All \glspl{evm} perform similar for the first \num{250} samples and achieve an initial \gls{dir} of about \SI{95}{\percent} at a \gls{far} of \SI{10}{\percent}. In later epochs, our \gls{ievm} and C-iEVM clearly outperform the competing methods for high and medium \glspl{far} (\SI{10}{\percent} and \SI{1}{\percent}), while at very small \gls{far} (\SI{0.1}{\percent}) all methods perform comparably. However, our methods begin to recover after the openness remains constant. In the case that the training samples within a class are widely spread, the original set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace model reduction struggles to find the most important \glspl{ev}. This leads to a constant decrease in the \gls{dir} even after the openness complexity stays constant. Similarly, DBSCAN in the C-EVM fails to generate meaningful centroids resulting in almost identical outputs as the baseline \gls{evm}. We noticed that DBSCAN achieves only average reductions of about \SI{3}{\percent} and the model contains \num{2294}~\glspl{ev} after the last epoch. Our weighted $K$-set cover\xspace} \def\Setcover{Set cover\xspace} \def\SetCover{Set Cover\xspace easily selects the most important \glspl{ev} and achieves the best results in the C-iEVM and iEVM while storing only \num{500}~\glspl{ev} ($10$ per class). The amount of \glspl{ev} does not only influence the memory but also the inference time. The reduced models take about \SI{2.4}{s} to evaluate the test set while the others require about \SI{14.7}{s} which is a factor of $6$. Further, our model reduction is, averaged over all epochs, by a factor $4.2$ faster than the conventional one. \subsubsection{Face Recognition} To evaluate our method in open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace face recognition, we apply Protocol~II\xspace to the \gls{lfw}~\cite{huang2007lfw1, huang2014lfw2} dataset. We adopt the training and the $O3$~test split of~\cite{gunther2017opensetface}, where the training set consists of \num{2900} samples from \num{1680} unbalanced classes with either $1$ or $3$ images. We divide this split into $10$ batches with $168$ classes each. After each epoch the test set is evaluated. Since the test set is highly unbalanced with \numrange{1}{527} samples per class, we report the \emph{macro} average \gls{dir} at certain \glspl{far}. This prevents the suppression of misclassified underrepresented classes and is therefore a better representation on the global performance on this dataset. The protocol is repeated $3$ times. \paragraph{Implementation Details} For feature extraction we use the ResNet50, pre-trained on MS-Celeb-1M~\cite{guo2016msceleb} and fine-tuned on VGGFace2~\cite{cao2018vggface2}, with an embedding size of \num{128}. We adopt the \gls{evm} parameters $\tau=75$ and $\alpha=0.5$ from~\cite{gunther2017opensetface}. Additionally, our methods with model reduction perform the contraction to a single \gls{ev} per class, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, $K = 1$. \paragraph{Results} We present the averaged \gls{dir} at several \glspl{far} in \cref{fig:lfw-classinc-macro}. Surprisingly, the \gls{osnn} achieves in this protocol better recognition scores than in the previous one. The C-EVM and \gls{osnn} perform comparable while the \gls{osnn} looses precision at the lowest FAR (\SI{0.1}{\percent}). Our C-iEVM and iEVM achieve comparable results \emph{while} reducing the model complexity by a factor of $4$. The computational efficacy of our incremental framework is presented in \cref{fig:lfw-runtime}. Here, partial fitting reduces the average training time by a factor of $28$. In particular, performance gains are substantial at late epochs, where the EVM requires \SI{27}{s} to learn the final classes, while the iEVM takes \SI{0.7}{s}. Our model reduction is, averaged over all epochs, by a factor of $3.7$ faster than the conventional set cover approach. \renewcommand{\thefigure}{5} \begin{figure}[t] \centering \tikzsetnextfilename{lfw-runtime}% \includegraphics[width=.99\linewidth]{figures/lfw-runtime.tikz}% \vspace{-1ex}% \caption{Averaged runtime of the training step (left) and model reduction (right) from the evaluation of Protocol~II\xspace and LFW. Our partial fit reduces the average training time by a factor of $28$. Our model reduction, averaged over all epochs, is faster than the conventional set cover by a factor of $3.7$.} \label{fig:lfw-runtime}% \end{figure}% \subsubsection{Additional Experiments} The supplementary material contains additional details about the proposed reduction and the evaluation on an additional dataset~\cite{fiel2017icdar2017} using Protocol~II\xspace. \section{Conclusion}\label{sec:conclusion} We introduced an incremental leaning framework for the \gls{evm}. Our partial model fitting neglects unaffected space during an update and prevents costly Weibull estimates. The proposed weighted maximum $K$-set cover model reduction guarantees a fixed-size model complexity with less computational effort than the conventional set cover approach. Our reduction leads to dense class centers filtering out outliers. The proposed modifications outperform the original EVM and the C-EVM on novel open world\xspace} \def\Openworld{Open world\xspace} \def\OpenWorld{Open World\xspace protocols in terms of efficacy and efficiency. In future work, we will investigate the method on larger datasets to better understand the advantages of our model reduction and put more effort into applications with harsh constraints on low \acrlongpl{far}. \subsection{Algorithm Details} Algorithm~\ref{alg:gmc2} provides additional details of the proposed weighted maximum $K$-set cover model reduction for the \gls{evm}. Recall that this is a class-wise reduction technique. Thus, the amount of \glspl{ev} in a single class is denoted as $E$. The amount of samples within a batch of this class is denoted $N$. The summations of the inclusion probabilities for each \gls{ev} are given in $\bm{p}} \newcommand{\BP}{\bm{P}$. The \gls{evm} model $\Theta_E^t$ represents the \glspl{ev} of the previous epoch, $\Theta_N^{t+1}$ the estimated Weibull parameters of the current data batch, and $K$ determines the \gls{ev} budget. The reduction comprises four steps: \begin{enumerate} \item Updating the inclusion probability sums of the old \glspl{ev} w.\,r.\,t\onedot} \def\dof{d.\,o.\,f\onedot the new batch (line \numrange{2}{4}). \item Sum up the inclusion probabilities of the new samples w.\,r.\,t\onedot} \def\dof{d.\,o.\,f\onedot each other (line \numrange{6}{9}). This step has a time complexity of $\mathcal{O}(N \cdot (E + N))$ which is $\mathcal{O}(N^2)$ for large batches (\emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, $N \gg E$) and $\mathcal{O}(NE)$, otherwise. \item In line $10$ follows the greedy search for the \glspl{ev}. Details for Algorithm~\ref{alg:greedy} follow in the next paragraph. \item Update $\bm{p}} \newcommand{\BP}{\bm{P}$ according to the new \glspl{ev} (line \numrange{11}{15}). If the two conditions $N > E$ and $E > (N - E)$ hold, it is more efficient to skip line $11$, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, not to reset $\bm{p}} \newcommand{\BP}{\bm{P}$. Then we can use the modified $\bm{p}} \newcommand{\BP}{\bm{P}$ of Algorithm~\ref{alg:greedy} and incrementally subtract and remove non-\gls{ev} samples similar as in the regularization in Algorithm~\ref{alg:greedy}. This has a time complexity of $\mathcal{O}((N - E) \cdot E) \Rightarrow \mathcal{O}(NE)$, since we only need to update the elements in $\bm{p}} \newcommand{\BP}{\bm{P}$ that are part of $\Theta_E^{t+1}$. \end{enumerate} The greedy iteration algorithm is depicted in Algorithm~\ref{alg:greedy} and requires the summations $\bm{p}} \newcommand{\BP}{\bm{P}$, the combined model $\Theta$, and the budget $K$. The amount of iterations is limited by $K$ (line~$3$). In line~4 we take the sample with the highest sum of inclusion probabilities and store it in the \gls{ev} model (line~$5$). Then follows the bilateral coverage regularization by removing the probability of inclusion of the selected \gls{ev} from the other samples (line \numrange{6}{8}). In line \numrange{9}{10}, we remove the \gls{ev} from $\bm{p}} \newcommand{\BP}{\bm{P}$ and $\Theta$. In the end, we receive the \gls{evm} model $\Theta_E$ containing only the \glspl{ev}. Note for the mentioned special case in the previous step~$4$, we also need to return the modified $\bm{p}} \newcommand{\BP}{\bm{P}$ and $\Theta$. The total asymptotic runtime of the proposed weighted maximum $K$-set cover algorithm is $\mathcal{O}(N^2)$. It does not depend on a bisection search as the set cover of Rudd \emph{et al}\onedot~\cite{rudd2017evm} that has a complexity of $\mathcal{O}(\log(\epsilon^{-1}) N^2)$, with termination tolerance $\epsilon$. \input{algorithms/gmc-greedy-long}% \input{algorithms/gmc-greedy-iter}% \renewcommand{\thefigure}{6} \begin{figure*}[tb] \centering \vspace{1ex} \tikzsetnextfilename{protocol1-cifar-params} \includegraphics[width=.99\linewidth]{figures/supmat/protocol1-cifar-params} \vspace{-1ex}% \caption{Different parameterizations of our \acrfull{ievm}. Averaged results over $3$ runs of Protocol~I\xspace and \cifar{100}. The vertical dashed line determines the batch at which the openness remains constant.} \label{fig:cifar100-ievm-params}% \end{figure*}% \renewcommand{\thefigure}{7} \begin{figure*}[tb] \centering \tikzsetnextfilename{protocol2-icdar-ievm-params} \includegraphics[width=.99\linewidth]{figures/supmat/protocol2-icdar-ievm-params} \vspace{-1ex}% \caption{Different tail size $\tau$ parameterizations of our \acrfull{ievm}. Averaged results over $3$ runs of Protocol~II\xspace and ICDAR\num{17}\xspace.} \label{fig:icdar-ievm-params}% \end{figure*}% \subsection{Additional Experiments} In this section we present further experiments of the evaluation with Protocol~I\xspace and \cifar{100}. Furthermore, we evaluated the writer identification dataset ICDAR\num{17}\xspace~\cite{fiel2017icdar2017} with Protocol~II\xspace. \subsubsection{Protocol~I\xspace\ -- \cifar{100}} In the main text, we show the result of the \gls{ievm} on Protocol~I\xspace and \cifar{100} with parameters $\tau = 75$ and the reduction to $K=10$. Here, we want to present further parameterizations in \cref{fig:cifar100-ievm-params}. As in the main text, the left, middle, and right plots show the \gls{dir} at \glspl{far} of \SI{10}{\percent}, \SI{1}{\percent}, and \SI{0.1}{\percent}. When comparing the accuracies for different values of $\tau$ at identical $K$, it turns out that the tail size\xspace} \def\Tailsize{Tail size\xspace $\tau$ has almost no influence on the models' accuracy. This is similar to what Günther \emph{et al}\onedot~\cite{gunther2017opensetface} reported on the \gls{lfw} dataset. A larger value of $K$ may lead to worse results, as can be seen in the case of iEVM ($\tau = 75$, $50$-wSC). This may be counter-intuitive at first glance, considering that classification should perform better with more data. However, storing more data implies less plasticity and more stability which can interfere with the incremental training adaptability. \subsubsection{Protocol~II\xspace\ -- ICDAR\num{17}\xspace} Another \gls{owr} task is writer identification. Here, we apply Protocol~II\xspace to the dataset ICDAR\num{17}\xspace~\cite{fiel2017icdar2017}. It contains handwritten pages from the $13^\text{th}$ to $20^\text{th}$ century. Since the feature extraction is trained on the training set of ICDAR\num{17}\xspace, the subsequent classification training and evaluation on the same set would be biased. Therefore, we take only the test set into account with $5$ pages for each of the $720$ writers. \SI{30}{\percent} of the classes are selected as unknowns and left in the test split. For each of the known classes, we leave $1$ sample in the test split, \emph{i.\,e}\onedot} \def\Ie{\emph{I.\,e}\onedot, the training split has $4$ samples for each of the $504$ known classes. The knowns are split into $9$ batches with $56$ classes and trained incrementally. This protocol implements an openness from \SIrange{62}{9.3}{\percent}. The results are averaged over $3$ protocol repetitions. \paragraph{Implementation Details} The feature set consists of the \num{6400}-dimensional activation of the penultimate layer of a ResNet20. It was trained in a self-supervised fashion~\cite{Christlein17ICDAR}. The training uses SIFT descriptors that are calculated on patches of $32\times32$ pixels at SIFT keypoints. The SIFT descriptors are clustered using $k$-means. Then, the ResNet20 is trained using cross-entropy loss where the patches are used as input and the targets are the cluster center IDs of the patches. \renewcommand{\thefigure}{8} \begin{figure*}[tb] \centering \vspace{1ex} \tikzsetnextfilename{protocol2-icdar2} \includegraphics[width=.99\linewidth]{figures/supmat/protocol2-icdar2} \vspace{-1ex}% \caption{Averaged results over $3$ runs of Protocol~II\xspace on ICDAR\num{17}\xspace. Set cover and our weighted maximum $K$-set cover reduction to $K$ \acrfullpl{ev} are denoted as $K$-SC and $K$-wSC, respectively.} \label{fig:icdar}% \end{figure*}% \paragraph{Hyperparameter Evaluation} The experiments on \cifar{100} and Protocol~I\xspace show, similar as the previous work of G{\"u}nther \emph{et al}\onedot~\cite{gunther2017opensetface}, that the tail size\xspace} \def\Tailsize{Tail size\xspace parameter $\tau$ has only a minor impact on the results. However, we noticed that this does not apply to Protocol~II\xspace and ICDAR\num{17}\xspace as visualized in \cref{fig:icdar-ievm-params}. The experiments show that a small tail size\xspace} \def\Tailsize{Tail size\xspace ($\tau \in \{5, 10\}$) achieves a better \gls{dir} at a high \gls{far} of \SI{10}{\percent}. This difference degrades over the class-wise increments at medium and small \glspl{far} of \SI{1}{\percent} and \SI{0.5}{\percent}. Rudd \emph{et al}\onedot~\cite{rudd2017evm} state that a larger tail size\xspace} \def\Tailsize{Tail size\xspace leads to higher coverage. This implies that for ICDAR\num{17}\xspace a high coverage and little open space\xspace} \def\Openspace{Open space\xspace is less favorable and a steep decision boundary is beneficial. \paragraph{Results} The comparison to the other baseline methods follows in \cref{fig:icdar}. All \glspl{evm} use a tail size\xspace} \def\Tailsize{Tail size\xspace $\tau = 5$. The C-iEVM without model reduction performs comparable to the \gls{osnn} and both outperform the conventional \gls{evm}. The boundary case of a model reduction to a single \gls{ev} per class does not lead to an improvement in this evaluation. In contrast to this result, we note that the evaluation of Protocol I on \cifar{100} performed much better with model reduction. However, the representation of a class via a single sample is challenging and heavily depends on the class distribution.
1,116,691,501,358
arxiv
\section{Introduction} Milnor \cite{Mi} in $1968$ conjectured that any open $n$-manifold $M$ with $\mathrm{Ric}_M\ge 0$ has a finitely generated fundamental group. This conjecture remains open today. It was proven for manifolds with Euclidean volume growth by Anderson \cite{An} and Li \cite{Li} independently, and manifolds with small diameter growth by Sormani \cite{Sor}. For background and relevant examples regarding Milnor conjecture, see \cite{SS}. For $3$-manifolds, Schoen and Yau \cite{SY} developed minimal surfaces theory in dimension $3$ and proved that any $3$-manifold of positive Ricci curvature is diffeomorphic to $\mathbb{R}^3$. Recently, based on minimal surface theory, Liu \cite{Liu} proved that any $3$-manifold with $\mathrm{Ric}\ge 0$ either is diffeomorphic to $\mathbb{R}^3$ or its universal cover splits. In particular, this confirms Milnor conjecture in dimension $3$. There are some interests to find a proof of Milnor conjecture in dimension $3$ not relying on minimal surface theory. Our main attempt is to accomplish this by using structure results for limits spaces of manifolds with Ricci curvature bounded below \cite{Co,CC1,CC2,CN}, equivariant Gromov-Hausdorff convergence \cite{FY} and pole group theorem \cite{Sor}. \begin{thm1}\label{3} Let $M$ be an open $3$-manifold with $\mathrm{Ric}_M\ge 0$, then $\pi_1(M)$ is finitely generated. \end{thm1} For any open $3$-manifold $M$ of $\mathrm{Ric}_M\ge 0$ and any sequence $r_i\to\infty$, by Gromov's precompactness theorem \cite{Gro2}, we can pass to some subsequences and consider tangent cones at infinity of $M$ and its Riemannian universal cover $\widetilde{M}$ coming from the sequence $r_i^{-1}\to 0$: \begin{center} $\begin{CD} (r^{-1}_i\widetilde{M},\tilde{p}) @>GH>> (C_\infty\widetilde{M},\tilde{o})\\ @VV\pi V @VV\pi V\\ (r^{-1}_iM,p) @>GH>> (C_\infty M,o). \end{CD}$ \end{center} We roughly illustrate our approach to prove Theorem \ref{3}. If $\pi_1(M,p)$ is not finitely generated, then we draw a contradiction by choosing some sequence $r_i\to\infty$ and eliminating all the possibilities regarding the dimension of $C_\infty \widetilde{M}$ and $C_\infty M$ above in the Colding-Naber sense \cite{CN}, which are integers $1,2$ or $3$. We also make use of some reduction results by Wilking \cite{Wi} and Evans-Moser \cite{EM}. The first reduces any non-finitely generated fundamental groups to abelian ones in any dimension, while the latter further reduces abelian non-finitely generated ones to some subgroup of the additive group of rationals in dimension $3$. In particular, we can assume that $\pi_1(M)$ is torsion free if it is not finitely generated. One observation is that, if $\pi_1(M,p)$ is torsion free, then in the space $(C_\infty\widetilde{M},\tilde{v},G)$ above, the orbit $G\cdot\tilde{v}$ is not discrete (See Corollary \ref{non_dis_orb_cor}). This observation plays a key role in the proof. The author would like to thank Professor Xiaochun Rong and Professor Jeff Cheeger for suggestions during the preparation of this note. \section{Proof of Theorem \ref{3}} We start with the following reductions by Wilking and Evan-Moser. \begin{thm}\cite{Wi}\label{red_W} Let $M$ be an open manifold with $\mathrm{Ric}_M\ge 0$. If $\pi_1(M)$ is not finitely generated, then it contains a non-finitely generated abelian subgroup. \end{thm} \begin{thm}\cite{EM}\label{red_EM} Let $M$ be a $3$-manifold. If $\pi_1(M)$ is abelian and not finitely generated, then $\pi_1(M)$ is torsion free. \end{thm} Evans-Moser \cite{EM} actually showed that $\pi_1(M)$ is a subgroup of the additive group of rationals. Being torsion free is sufficient for us to prove Theorem \ref{3}. Gromov \cite{Gro1} introduced the notion of short generators of $\pi_1(M,p)$. By path lifting, $\pi_1(M,p)$ acts on $\widetilde{M}$ isometrically. We say that $\{\gamma_1,...,\gamma_i,...\}$ is a set of short generators of $\pi_1(M,p)$, if \begin{center} $d(\gamma_1\tilde{p},\tilde{p})\le d(\gamma\tilde{p},\tilde{p})$ for all $\gamma\in\pi_1(M,p)$, \end{center} and for each $i$, \begin{center} $d(\gamma_i\tilde{p},\tilde{p})\le d(\gamma\tilde{p},\tilde{p})$ for all $\gamma\in\pi_1(M,p)-\langle\gamma_1,...,\gamma_{i-1}\rangle$, \end{center} where $\langle\gamma_1,...,\gamma_{i-1}\rangle$ is the subgroup generated by $\gamma_1,...,\gamma_{i-1}$. Let $M$ be an open $3$-manifold with $\mathrm{Ric}_M\ge 0$. We always denote $\pi_1(M,p)$ by $\Gamma$. Suppose that $\Gamma$ is not finitely generated, then by Theorems \ref{red_W} and \ref{red_EM}, we can assume that $\Gamma$ is torsion free. Let $\{\gamma_1,...,\gamma_i,...\}$ be an infinite set of short generators at $p$. Since $\Gamma$ is a discrete group acting freely on $\widetilde{M}$, we have $r_i=d(\tilde{p},\gamma_i\tilde{p})\to \infty$. When considering a tangent cone at infinity of $\widetilde{M}$ coming from the sequence $r_i^{-1}\to 0$, we also take $\Gamma$-action into account. Passing to some subsequences if necessary, we assume the following sequences converge in equivariant Gromov-Hausdorff topology \cite{FY}: \begin{center} $\begin{CD} (r^{-1}_i\widetilde{M},\tilde{p},\Gamma) @>GH>> (\widetilde{Y},\tilde{y},G)\\ @VV\pi V @VV\pi V\\ (r^{-1}_iM,p) @>GH>> (Y=\widetilde{Y}/G,y). \end{CD}$ $(\star)$ \end{center} Colding-Naber \cite{CN} showed that the isometry group of any Ricci limit space is a Lie group. In particular, $G$ above, as a closed subgroup of $\mathrm{Isom}(\widetilde{Y})$, is a Lie group. We recall the dimension of Ricci limit spaces in the Colding-Naber sense \cite{CN}. A point $x$ in some Ricci limit space $X$ is $k$-regular, if any tangent cone at $x$ is isometric to $\mathbb{R}^k$. Colding-Naber showed that there is a unique $k$ such that $\mathcal{R}_k$, the set of $k$-regular points, has full measure in $X$ with respect to any limit renormalized measure (See \cite{CC2,CN}). We regard such $k$ as the dimension of $X$ and denote it by $\dim(X)$. It is unknown whether in general the Hausdorff dimension of $X$ equals to $\dim(X)$. For Ricci limit spaces coming from $3$-manifolds, dimension in the Colding-Naber sense equals to Hausdorff dimension, which follows from Theorem 3.1 in \cite{CC2} and \cite{Hon1}. As indicated in the introduction, we prove Theorem \ref{3} by eliminating all possibilities regarding the dimension of $Y$ and $\widetilde{Y}$ in ($\star$). There are three possibilities and we rule out each of them, which finishes the proof of Theorem \ref{3}.\\ \noindent\textit{Case 1. $\dim(\widetilde{Y})=3$} (Lemma \ref{not_3});\\ \textit{Case 2. $\dim(Y)=\dim(\widetilde{Y})=2$} (Lemma \ref{not_2});\\ \textit{Case 3. $\dim(Y)=1$} (Lemma \ref{not_1}).\\ \begin{lem}\label{non_dis_orb} Let $(M_i,p_i)$ be a sequence of complete $n$-manifolds and $(\widetilde{M}_i,\tilde{p}_i)$ be their universal covers. Suppose that the following sequence converges $$(\widetilde{M}_i,\tilde{p}_i,\Gamma_i)\overset{GH}\longrightarrow(\widetilde{X},\tilde{p},G),$$ where $\Gamma_i=\pi_1(M_i,p_i)$ is torsion free for each $i$. If the orbit $G\cdot\tilde{p}$ is discrete in $\widetilde{X}$, then there is $N$ such that $$\#\Gamma_i(1)\le N$$ for all $i$, where $\#\Gamma_i(1)$ denotes the number of elements in $$\Gamma_i(1)=\{\gamma\in \Gamma_i\ |\ d(\gamma\tilde{p}_i,\tilde{p}_i)\le 1\}.$$ \end{lem} \begin{proof} We claim that if a sequence $\gamma_i\in\Gamma_i$ such that $\gamma_i\overset{GH}\to g\in G$ with $g$ fixing $\tilde{p}$, then $g=e$, the identity element, and $\gamma_i=e$ for all $i$ sufficiently large. In fact, suppose that $\gamma_i\not= e$ for some subsequence. Since $\gamma_i$ is torsion free, we always have $\mathrm{diam}(\langle\gamma_i\rangle\cdot \tilde{p}_i)=\infty$. Together with $d(\gamma_i\tilde{p}_i,\tilde{p}_i)\to 0$, we see that $G\cdot\tilde{p}$ can not be discrete, which contradicts with the assumption. Therefore, there exists $i_0$ large such that for all $g\in G(2)$ and any two sequences with $\gamma_i\overset{GH}\to g$ and $\gamma'_i\overset{GH}\to g$, $\gamma_i=\gamma'_i$ holds for all $i\ge i_0$. In particular, we conclude that $$\#\Gamma_i(1)\le \# G(2)<\infty$$ for all $i\ge i_0$. \end{proof} \begin{cor}\label{non_dis_orb_cor} Let $(M,p)$ be an open $n$-manifold with $\mathrm{Ric}_M\ge 0$ and $(\widetilde{M},\tilde{p})$ be its universal cover. Suppose that $\Gamma=\pi_1(M,p)$ is torsion free, then for any $s_i\to \infty$ and any convergent sequence $$(s_i^{-1}\widetilde{M},p,\Gamma)\overset{GH}\longrightarrow(C_\infty\widetilde{M},\tilde{o},G),$$ the orbit $G\cdot\tilde{v}$ is not discrete. \end{cor} \begin{proof} The proof follows directly from Lemma \ref{non_dis_orb}. If $G\cdot\tilde{o}$ is discrete, then there is $N$ such that $\#\Gamma(s_i)\le N$ for all $i$. On the other hand, $\#\Gamma(s_i)\to \infty$ because $\Gamma$ is torsion free. A contradiction. \end{proof} \begin{lem}\label{non_cnt_orb} Let $(M,p)$ be an open $n$-manifold with $\mathrm{Ric}_M\ge 0$ and $(\widetilde{M},\tilde{p})$ be its universal cover. Suppose that $\Gamma=\pi_1(M,p)$ has infinitely many short generators $\{\gamma_1,...,\gamma_i,...\}$. Then in the following tangent cone at infinity of $\widetilde{M}$ $$(r_i^{-1}\widetilde{M},p,\Gamma)\overset{GH}\longrightarrow(\widetilde{Y},\tilde{y},G),$$ the orbit $G\cdot\tilde{y}$ is not connected, where $r_i=d(\gamma_i\tilde{p},\tilde{p})\to\infty$. \end{lem} \begin{proof} On $r_i^{-1}\widetilde{M}$, $\gamma_i$ has displacement $1$ at $\tilde{p}$. By basic properties of short generators, $\gamma_i\tilde{p}$ has distance $1$ from the orbit $H_i\cdot\tilde{p}$, where $H_i=\langle\gamma_1,...,\gamma_{i-1}\rangle$. From equivariant convergence $$(r^{-1}_i\widetilde{M},\tilde{p},H_i,\gamma_i)\overset{GH}\longrightarrow(\widetilde{Y},\tilde{y},H,g),$$ we conclude $d(g\tilde{y},H\cdot\tilde{y})=1$. It is obvious that $H$ contains $G_0$, the connected component of $G$ containing the identity. Thus $d(g\tilde{y},G_0\cdot\tilde{y})\ge 1$ and the orbit $G\cdot \tilde{y}$ is not connected. \end{proof} We recall cone splitting principle, which follows from splitting theorem for Ricci limit spaces \cite{CC1}. \begin{prop}\label{cone_split} Let $(X,p)$ be the limit of a sequence of complete $n$-manifolds $(M_i,p_i)$ of $\mathrm{Ric}_{M_i}\ge 0$. Suppose that $X=\mathbb{R}^k\times C(Z)$ is a Euclidean cone with vertex $p=(0,z)$. If there is an isometry $g\in \mathrm{Isom}(X)$ with $g(0,z)\not\in \mathbb{R}^k\times \{z\}$, then $X$ splits isometrically as $\mathbb{R}^{k+1}\times C(Z')$. \end{prop} \begin{lem}\label{not_3} Case 1 can not happen. \end{lem} \begin{proof} When $\dim(\widetilde{Y})=3$, $\widetilde{Y}$ is a non-collapsing limit space \cite{CC2}, that is, there is $v>0$ such that $$\mathrm{vol}(B_1(\tilde{p},r_i^{-1}\widetilde{M}))\ge v$$ for all $i$. By relative volume comparison, this implies that $\widetilde{M}$ has Euclidean volume growth $$\lim\limits_{r\to\infty}\dfrac{\mathrm{vol}(B_r(\tilde{p}))}{r^n}\ge v.$$ By \cite{CC2}, $\widetilde{Y}$ is a Euclidean cone $\mathbb{R}^k\times C(Z)$ with vertex $\tilde{y}=(0,z)$, where $C(Z)$ does not contain any line and $z$ is the vertex of $C(Z)$. We rule out all the possibilities of $k\in \{0,1,2,3\}$. If $k=3$, then $\widetilde{Y}=\mathbb{R}^3$. Thus $\widetilde{M}$ is isometric to $\mathbb{R}^3$ \cite{Co}. If $k=2$, then according to co-dimension $2$ \cite{CC2}, actually $\widetilde{Y}=\mathbb{R}^3$. If $k=1$, then $Y=\mathbb{R}\times C(Z)$. By Proposition \ref{cone_split}, the orbit $G\cdot \tilde{y}$ is contained in $\mathbb{R}\times\{z\}$. Applying Lemma \ref{non_cnt_orb}, we see that $G\cdot \tilde{y}$ is not connected. Note that a non-connected orbit in $\mathbb{R}$ is either a $\mathbb{Z}$-translation orbit, or a $\mathbb{Z}_2$-reflection orbit. In particular, the orbit $G\cdot\tilde{y}$ must be discrete. This contradicts with Corollary \ref{non_dis_orb_cor}. If $k=0$, then $Y=C(Z)$ with no lines. Again by Proposition \ref{cone_split}, the orbit $G\cdot\tilde{y}$ must be a single point $\tilde{y}$, which is a contradiction to Lemma \ref{non_cnt_orb}. \end{proof} \begin{lem}\label{not_2} Let $(M,p)$ be an open $n$-manifold with $\mathrm{Ric}_M\ge 0$ and $(\widetilde{M},\tilde{p})$ be its universal cover. Assume that $\Gamma=\pi_1(M,p)$ is torsion free. Then for any $s_i\to \infty$ and any convergent sequence \begin{center} $\begin{CD} (s^{-1}_i\widetilde{M},\tilde{p},\Gamma) @>GH>> (C_\infty\widetilde{M},\tilde{o},G)\\ @VV\pi V @VV\pi V\\ (s^{-1}_iM,p) @>GH>> (C_\infty M,o), \end{CD}$ \end{center} $\dim(C_\infty\widetilde{M})=\dim(C_\infty M)$ can not happen. In particular, Case 2 can not happen. \end{lem} \begin{proof}[Proof of Lemma \ref{not_2}] We claim that $G$ is a discrete group when $\dim({C_\infty\widetilde{M}})=\dim(C_\infty M)=k$. If the claim holds, then the desired contradiction follows from Corollary \ref{non_dis_orb_cor}. It remains to verify the claim. Suppose that $G_0$ is non-trivial, then we pick $g\not=e$ in $G_0$. Note that there is a $k$-regular point $\tilde{q}\in C_\infty \widetilde{M}$ such that $d(g\tilde{q},\tilde{q})>0$ and $\tilde{q}$ projects to a $k$-regular point $q\in C_\infty M$. In fact, let $\mathcal{R}_k(C_\infty M)$ be the set of $k$-regular points in $C_\infty M$. Since $\mathcal{R}_k(C_\infty M)$ is dense in $C_\infty M$, its pre-image $\pi^{-1}(\mathcal{R}_k(C_\infty M))$ is also dense in $C_\infty \widetilde{M}$. Let $\tilde{q}$ be a point in the pre-image such that $d(g\tilde{q},\tilde{q})>0$. Note that any tangent cone at $\tilde{q}$ splits $\mathbb{R}^k$-factor isometrically. By Proposition 3.78 in \cite{Hon2} (also see Corollary 1.10 in \cite{KL}), it follows that any tangent cone at $\tilde{q}$ is isometric to $\mathbb{R}^k$. In other words, $\tilde{q}$ is $k$-regular. Along a one-parameter subgroup of $G_0$ containing $g$, we can choose a sequence of elements $g_j\in G_0$ with $d(g_j\tilde{q},\tilde{q})=1/j\to 0$. We consider a tangent cone at $\tilde{y}$ and $y$ respectively coming from the sequence $j\to\infty$. Passing to some subsequences if necessary, we obtain \begin{center} $\begin{CD} (jC_\infty\widetilde{M},\tilde{q},G,g_j) @>GH>> (C_{\tilde{q}} C_\infty\widetilde{M},\tilde{o}',H, h)\\ @VV\pi V @VV\pi V\\ (jC_\infty M,q) @>GH>> (C_qC_\infty M,o'). \end{CD}$ \end{center} with $C_{\tilde{q}} C_\infty\widetilde{M}/H=C_qC_\infty M$ and $d(h \tilde{o}',\tilde{o}')=1$. On the other hand, since both $q$ and $\tilde{q}$ are $k$-regular, $C_{\tilde{q}} C_\infty\widetilde{M}=C_qC_\infty M=\mathbb{R}^k$. This is a contradiction to $H\not=\{e\}$. Hence the claim holds. \end{proof} To rule out the last case $\dim(Y)=1$, we recall Sormani's pole group theorem \cite{Sor}. We say that a length space $X$ has a pole at $x\in X$, if for all $y\not=x$, there is a ray starting from $x$ and going through $y$. \begin{thm}\cite{Sor}\label{non_polar} Let $(M,p)$ be an open $n$-manifold with $\mathrm{Ric}_M\ge 0$ and $(\widetilde{M},\tilde{p})$ be its universal cover. Suppose that $\Gamma=\pi_1(M,p)$ has infinitely many short generators $\{\gamma_1,...,\gamma_i,...\}$. Then in the following tangent cone at infinity of $M$ $$(r_i^{-1}M,p)\overset{GH}\longrightarrow(Y,y),$$ $Y$ can not have a pole at $y$, where $r_i=d(\gamma_i\tilde{p},\tilde{p})\to\infty$. \end{thm} \begin{lem}\label{not_1} Case 3 can not happen. \end{lem} \begin{proof} By \cite{Hon1} (also see \cite{Chen}), $Y$ is a topological manifold of dimension $1$. Since $Y$ is non-compact, $Y$ is either a line $(-\infty,\infty)$ or a half line $[0,\infty)$. By Theorem \ref{non_polar}, $Y$ can not have a pole at $y$. Thus there is only one possibility left: $Y=[0,\infty)$ but $y$ is not the endpoint $0\in [0,\infty)$. Put $d=d_Y(0,y)>0$. We rule out this case by a rescaling argument and Lemmas \ref{not_3}, \ref{not_2} above. (In general, it is possible for an open manifold having a tangent cone at infinity as $[0,\infty)$ with base point not being $0$. See example \ref{tree}.) Let $\alpha(t)$ be a unit speed ray in $M$ starting from $p$ and converging to the unique ray from $y$ in $Y=[0,\infty)$ with respect to the sequence $(r_i^{-1}M,p)\overset{GH}\longrightarrow(Y,y)$. Let $x_i\in r_i^{-1}M_i$ be a sequence of points converging to $0\in Y$, then $r_i^{-1}d_M(p,x_i)\to d$. For each $i$, let $c_i(t)$ be a minimal geodesic from $x_i$ to $\alpha(dr_i)$, and $q_i$ be a closest point to $p$ on $c_i$. We reparametrize $c_i$ so that $c_i(0)=q_i$. With respect to $(r_i^{-1}M,p)\overset{GH}\longrightarrow(Y,y)$, $c_i$ subconverges to the unique segment between $0$ and $2d\in [0,\infty)$. Clearly, $$r_i^{-1}d_M(x_i,\alpha(dr_i))\to 2d, \quad r_i^{-1}d_i\to 0,$$ where $d_i=d_M(p,c_i(0))$. If $d_i\to\infty$, then we rescale $M$ and $\widetilde{M}$ by $d_i^{-1}\to 0$. Passing to some subsequences if necessary, we obtain \begin{center} $\begin{CD} (d^{-1}_i\widetilde{M},\tilde{p},\Gamma) @>GH>> (\widetilde{Y}',\tilde{y}',G')\\ @VV\pi V @VV\pi V\\ (d^{-1}_iM,p) @>GH>> (Y',y'). \end{CD}$ \end{center} If $\dim(Y')=1$, then we know that $Y'=(-\infty,\infty)$ or $[0,\infty)$. On the other hand, since $$d_i^{-1}d_M(c_i(0),x_i)\to\infty,\quad d_i^{-1}d_M(c_i(0),\alpha(dr_i))\to\infty, \quad d_i^{-1}d_M(c_i,p)=1,$$ $c_i$ subconverges to a line $c_\infty$ in $Y'$ with $d(c_\infty,y')=1$. Clearly this can not happen in $(-\infty,\infty)$ nor $[0,\infty)$. If $\dim(\widetilde{Y}')=3$, then $\widetilde{M}$ has Euclidean volume growth and thus $\dim(\widetilde{Y})=3$. This case is already covered in Lemma \ref{not_3}. The only situation left is $\dim(\widetilde{Y}')=\dim(Y')=2$. By Lemma \ref{not_2}, this also leads to a contradiction. In conclusion, $d_i\to\infty$ can not happen. If there is some $R>0$ such that $d_i\le R$ for all $i$, then on $M$, $c_i$ subconverges to a line $c$ with $c(0)\in B_{2R}(p)$. Consequently, $M$ splits a line isometrically \cite{CG}, which contradicts with $Y=[0,\infty)$. This completes the proof. \end{proof} \begin{exmp}\label{tree} We construct a surface $(S,p)$ isometrically embedded in $\mathbb{R}^3$ such that $S$ has a tangent cone at infinity as $[0,\infty)$, but $p$ does not correspond to $0$. We first construct a subset of $xy$-plane by gluing intervals. Let $r_i\to\infty$ be a positive sequence with $r_{i+1}/r_i\to\infty$. Starting with a interval $I_1=[-r_1,r_2]$, we attach a second interval $I_2=[-r_3,r_4]$ perpendicularly to $I_1$ by identifying $r_2\in I_1$ and $0\in I_2$. Repeating this process, suppose that $I_{k}$ is attached, then we attach the next interval $I_{k+1}=[-r_{2k+1},r_{2k+2}]$ perpendicularly to $I_k$ by identifying $r_{2k}\in I_k$ and $0\in I_{k+1}$. In the end, we get a subset $T$ in the $xy$-plane consisting of segments. We can smooth the $\epsilon$-neighborhood of $T$ in $\mathbb{R}^3$ so that it has sectional curvature $\ge -C$ for some $\epsilon,C>0$. We call this surface $S$ and let $p\in S$ be a point closest to $0\in I_1$ as base point. If we rescale $(S,p)$ by $r_{2k+1}^{-1}$, then $$(r_{2k+1}^{-1}S,p)\overset{GH}\longrightarrow ([-1,\infty),0)$$ because $r_{i+1}/r_i\to\infty$. In other words, $S$ has a tangent cone at infinity as the half line, but the base point does not correspond to the end point in this half line. \end{exmp}
1,116,691,501,359
arxiv
\section{Model Description and PI Controller design} \label{sec:model} The SC shown in Fig.~\ref{fig:implementation} can be modeled as a discrete-time linear system discretized at 1 Hz, as shown in in Fig.~\ref{fig:model}(a). Let $\tau_m(k)$ and $\tau_c(k)$ be the periods of the PPS signals at the input and at the output of the SC, respectively, at the $k-$th sampling second. Since both signals are affected by phase and frequency noises and by a relative frequency offset, the time synchronization error $e(k)$ between them is simply given by: \begin{equation} \label{eq:error1} e(k+1) = e(k) + \tau_m(k) - \tau_c(k) , \end{equation} Therefore, the equivalent model depicted in Fig.~\ref{fig:model}(a) can be easily reduced to the classic SC model shown in Fig.~\ref{fig:model}(b), where symbols $t_m$ and $t_c$ denote the reference time and the time measured by the SC, respectively. Based on this simplified model, the SC in the $z$-domain basically consists of two subsystems, i.e. the clock itself, whose transfer function is \begin{equation} C(z) = \frac{1}{z-1} , \label{eq:clock} \end{equation} and the PI controller, which can be obtained from the classic backward Euler integration method, i.e. \begin{equation} P(z) = K_P + K_I \frac{z}{z-1}, \label{eq:control} \end{equation} where $K_P$ and $K_I$ are the proportional and integral gains, respectively. Of course, SC stability depends on the position of the poles of the closed-loop transfer function \begin{equation} H(z)\!=\!\frac{P(z)C(z)}{1+P(z)C(z)} \!=\! \frac{(z-1)K_P + zK_I}{(z-1)^2 \!+\! (z-1)K_P + zK_I}. \label{eq:cltf} \end{equation} Moreover, coefficients $K_P$ and $K_I$ in~\eqref{eq:control} should be tuned in order to meet given performance requirements, in terms of convergence time or output uncertainty. To this end,~\eqref{eq:clock}, \eqref{eq:control} and~\eqref{eq:cltf} can be expressed using difference equations. Nevertheless, the control design formulation is slightly complicated by the fact that i) the resolution of the SC is $1/f_{XO}$ (with $f_{XO} = 200$~MHz), and ii) the nominal frequency of the emulated VCO is $f_0 = 12.8$~kHz. Therefore, all time quantities (as well as the controller output) have to be expressed in ticks of an ideal 200~MHz oscillator rather than in seconds. Thus, if $n_t=f_{XO}/f_0$ denotes the number of ticks in one nominal period, \begin{figure}[t] \centering \begin{tabular}{c} \includegraphics[page=3, trim={3.3cm 5.5cm 3cm 5.5cm}, clip, width=\columnwidth]{figures/models} \\ (a) \\ \includegraphics[page=1, trim={6cm 6.5cm 6cm 6.7cm}, clip, width=0.85\columnwidth]{figures/models} \\ (b) \end{tabular} \caption{Equivalent models of the implemented SC before (a) and after (b) reduction.} \label{fig:model} \end{figure} then the dynamic of the frequency of the disciplined emulated VCO is given by \[ f_c(k+1) = f_0 (1 + \alpha) + b \eta(k) + b u(k) , \] where $b = 1/n_t$, $\alpha$ is the relative frequency offset of the SC local oscillator with respect to the input reference signal when the SC is in open loop, $\eta(k)$ represents the jitter of the SC accumulated during one second and, finally, $u(k)$ is the control action. For what concerns the error, it follows immediately from~\eqref{eq:error1} that \[ e_q(k+1) = e_q(k) + \tau_{m_q}(k) - \tau_{c_q}(k) , \] where $e_q(k)$ is the time error $e(k)$ expressed in ticks, \[ \tau_{m_q}(k)=n_tf_0 + \nu(k) , \] is the period of the PPS reference input signal expressed in ticks, $\nu(k)$ is the jitter of the input PPS signal and \[ \tau_{c_q}(k)=n_tf_c(k) , \] is the period of the PPS output of the SC, again expressed in ticks. The control action dynamics is instead given by \[ u(k+1) = u(k) + K_P(e_q(k+1) - e_q(k)) + K_I e_q(k+1) . \] If vector $q(k) = [e_q(k), f_c(k), u(k)]^T$ (with $q(0) = [0, f_0, 0]^T$) denotes the aggregated state of the system in closed loop, the previous equations we can be rewritten more compactly as \[ q(k+1) = A q(k) + G \tau_{m_q}(k) + C \eta(k) + F f_0, \] where \[ A = \begin{bmatrix} 1 & -n_t & 0 \\ b K_I & -(K_P + K_I) & b \\ K_I & -n_t (K_P + K_I) & 1 \end{bmatrix},\,\,\, G = \begin{bmatrix} 1 \\ b (K_P + K_I) \\ K_P + K_I \end{bmatrix} , \] \[ C = \begin{bmatrix} 0 \\ b \\ 0 \end{bmatrix}\,\,\, \mbox{and}\,\,\, F = \begin{bmatrix} 0 \\ (1 + \alpha) \\ 0 \end{bmatrix} . \] \begin{figure}[t] \centering \includegraphics[width=\columnwidth]{figures/surf_std} \caption{Standard deviation of the synchronization errors given by the square root of~\eqref{eq:Variance}, as a function of the values of gains $K_P$ and $K_I$ in the stability region of the SC. } \label{fig:Ki_Kp} \end{figure} Since $q(k+1)$ is a random vector, its mean value is given by \[ \E{q(k+1)} = A \E{q(k)} + G \E{\tau_{m_q}(k)} + Ff_0 , \] where $\E{\cdot}$ denotes the expectation operator. To compute the uncertainty of $q(k+1)$ generated by the joint effect of $\nu(k)$ and $\eta(k)$, to a first approximation, we assume that: $\nu(k)\sim\mathcal{N}(0, \sigma_\nu^2)$, $\eta(k)\sim\mathcal{N}(0, \sigma_\eta^2)$. As a result, the covariance matrix of $q(k+1)$ is given by \begin{equation} \label{eq:CovFirst} Q(k+1) = A Q(k) A^T + G G^T \sigma_\nu^2 + C C^T \sigma_\eta^2 . \end{equation} Of course, if $\nu(k)$ and $\eta(k)$ are not white or normally distributed,~\eqref{eq:CovFirst} holds just approximately. Notice that, by setting $Q(0)$,~\eqref{eq:CovFirst} can be used to compute the uncertainty of the state vector in closed form. If the controller gains are set so as to make~\eqref{eq:cltf} stable,~\eqref{eq:CovFirst} reaches a steady-state equilibrium, i.e., there exists a sufficiently large value $\bar k$ such that $Q(k+1) = Q(k)$, $\forall k > \bar k$. Moreover, the steady-state variance of the time error with respect to the reference is equivalent to the entry (1,1) of $Q(k)$. As a consequence, by computing the equilibrium of~\eqref{eq:CovFirst}, it follows that the synchronization error variance is \begin{equation} \label{eq:Variance} \sigma_e^2 = \E{(e(k) - \E{e(k)})^2} = \frac{2 (\sigma_\nu^2 + \sigma_\eta^2)}{K_P (4 - K_I - 2 K_P)} . \end{equation} Since $K_P > 0$, $K_I \geq 0$ and $\sigma_e^2 > 0$, it follows that the SC is stable for $0 < K_P < 2$ and $K_I < 4 - 2 K_P$. Within this region,~\eqref{eq:Variance} is minimized once the denominator is maximized. Therefore, the values of $K_P$ and $K_I$ minimizing the variance of the synchronization error can be determined from Fig.~\ref{fig:Ki_Kp}, which shows the behavior of $\sigma_e$ as a function of $K_P$ and $K_I$ for values of $\sigma_\nu$ and $\sigma_\eta$ consistent with those of the system at hand and reported in Section~\ref{sec:results}. However, it is worth emphasizing that trend and position of the minimum do not depend on the variances of $\nu$ and $\eta$. From this analysis it follows that if a prompt response is required, a dead-beat controller, with $K_P = K_I = 1$, works well. Otherwise, if the uncertainty has to be minimized, $K_P = 1$ and $K_I = \varepsilon > 0$ is the right choice, with $\varepsilon$ being a sufficiently small constant. However, the smaller $K_I$, the longer the convergence time becomes. \section{Model Description and PI Controller design} \label{sec:model} The SC shown in Fig.~\ref{fig:implementation} can be modeled as a discrete-time liner system discretized at 1 Hz. The corresponding block diagram is depicted in Fig.~\ref{fig:model}(a). As described in the rest of this section, this model can be easily reduced to the classic SC model shown in Fig.~\ref{fig:model}(b), by considering explicitly the effect of the accumulator on the reference PPS signal. \begin{figure}[t] \centering \begin{tabular}{c} \includegraphics[page=3, trim={3.3cm 5.5cm 3cm 5.5cm}, clip, width=\columnwidth]{../figures/models} \\ (a) \\ \includegraphics[page=1, trim={6cm 6.5cm 6cm 6.7cm}, clip, width=0.85\columnwidth]{../figures/models} \\ (b) \end{tabular} \caption{Equivalent models of the implemented SC before (a) and after (b) simplification.} \label{fig:model} \end{figure} Based on the simplified equivalent model in Fig.~\ref{fig:model}(b), the SC in the $z$-domain basically consists of two subsystems, i.e. the clock itself, whose transfer function is \begin{equation} C(z) = \frac{1}{z-1} , \label{eq:clock} \end{equation} and the PI controller, which can be obtained from the classic backward integration \begin{equation} P(z) = K_P + K_I \frac{z}{z-1} , \label{eq:control} \end{equation} where $K_P$ and $K_I$ are the proportional and integral gains, respectively. As a consequence, the stability of the closed loop SC system is related to the closed loop transfer function \begin{equation} H(z)=\frac{P(z)C(z)}{1+P(z)C(z)} = \frac{(z-1)K_P + zK_I}{(z-1)^2 + (z-1)K_P + zK_I} . \label{eq:cltf} \end{equation} The PI controller parameters $K_P$ and $K_I$ in~\eqref{eq:control} should be tuned in order to meet the desired performance requirements, such as smallest convergence time or output uncertainty reduction. To this end, we represent the relations in~\eqref{eq:clock}, \eqref{eq:control} and~\eqref{eq:cltf} using difference equations. Let $\tau_m^r(k)$ be the period of the input PPS reference signal in input (see Fig.~\ref{fig:model}-a) at time $k T_m$, given by the sum of the ideal period PPS $T_m$ (i.e. 1 s) and jitter $\nu^r(k)$. The superscript $r$ stand for ``time reference'', hence all the quantities are measured in seconds. The time synchronization error $e^r(k)$, obtained from the accumulation of the differences between the PPS reference period $\tau_m^r(k)$ and the PPS local period $\tau_c^r(k)$, follows the following updating rule: \begin{equation} \label{eq:error1} e^r(k) = e^r(k-1) + \tau_m^r(k) - \tau_c^r(k) , \end{equation} where \begin{equation} \label{eq:TauM} \tau_m^r(k) = t(k) - t(k-1) + \nu(k) = T_m + \nu^r(k), \end{equation} $t(k)$ is the ideal reference time and, similarly, $\tau_c^r(k)$ is the time measured by the SC between the period $kT_m$ and $(k-1)T_m$. As stated in Section~\ref{sec:implementation}, we recall that the system at hand is a multi-rate system, i.e. the resolution of the error measurements are given by $1/f_{XO}$, with $f_{XO} = 200$~MHz, while the resolution of the local clock is $1/f_{c}$, with $f_c = 12.8$~KHz. We therefore express the previous quantities in ticks accumulated by the respective clock, rather than in time. To this end, lest us define with $n_t$ the number of ticks accumulated in the reference for every period of the SC, i.e. \[ n_t = \frac{f_{XO}}{f_c} . \] The dynamic of the SC in each and every period of $T_m$ can be instead represented by \[ \tau_c(k+1) = b \tau (1 + \alpha) + b \eta(k) + b u(k) , \] where $b = 1/n_t$, $\tau$ is the nominal period (in terms of number of ticks) accumulated by the SC in every period $T_m$, $\alpha$ is the relative frequency offset of $f_c$, $\eta(k)$ represents the jitter of the SC accumulated during $T_m$ and, finally, $u(k)$ is the control action. Notice that except for $\tau_c(k)$, all the other quantities are expressed in the oscillator frequency $f_{XO}$. Moreover, all the quantities in ticks are denoted without the superscript $r$. For what concerns the error, it can be easily shown that \[ e(k+1) = e(k) + \tau_m(k) - n_t \tau_c(k) , \] while for the control action \[ u(k+1) = u(k) + K_P(e(k+1) - e(k)) + K_I e(k+1) . \] By denoting with the vector $q(k) = [e(k), \tau_c(k), u(k)]^T$ the aggregated state of the system in closed loop, with initial conditions $q(0) = [0, \tau_c(0), 0]^T$, we can rewrite more compactly the previous equations as \[ q(k+1) = A q(k) + G \tau_m(k) + C \eta(k) + F \tau , \] where \[ A = \begin{bmatrix} 1 & n_t & 0 \\ b K_I & -(K_P + K_I) & b \\ K_I & -n_t (K_P + K_I) & 1 \end{bmatrix},\,\,\, G = \begin{bmatrix} 1 \\ b (K_P + K_I) \\ K_P + K_I \end{bmatrix} , \] \[ C = \begin{bmatrix} 0 \\ b \\ 0 \end{bmatrix}\,\,\, \mbox{and}\,\,\, F = \begin{bmatrix} 0 \\ b (1 + \alpha) \\ 0 \end{bmatrix} . \] Since $q(k+1)$ is a random vector, its mean value is given by \[ \E{q(k+1)} = A \E{q(k)} + G \E{\tau_m(k)} + F\tau , \] where $\E{\cdot}$ denotes the expectation operator. To compute the uncertainty of $q(k+1)$ generated by the joint effect of $\nu(k)$ and $\eta(k)$, to a first approximation, we assume that: $\nu(k)\sim\mathcal{N}(0, \sigma_\nu^2)$, $\eta(k)\sim\mathcal{N}(0, \sigma_\eta^2)$. As a result, the covariance matrix of $q(k+1)$ is given by \begin{equation} \label{eq:CovFirst} Q(k+1) = A Q(k) A^T + G G^T \sigma_\nu^2 + C C^T \sigma_\eta^2 . \end{equation} Of course, if the Gaussianity and/or the whiteness property of the input quantities do not hold, \eqref{eq:CovFirst} holds only approximately. Notice that, by setting $Q(0)$,~\eqref{eq:CovFirst} can be used to compute the uncertainty of the state vector in closed form. If the parameters are chosen to make~\eqref{eq:cltf} stable,~\eqref{eq:CovFirst} can reach a steady-state equilibrium, i.e., there exists a sufficiently large value $\bar k$ such that $Q(k+1) = Q(k)$, $\forall k > \bar k$. Moreover, the variance of the SC jitter over 1 s is equivalent to the steady-state value of $\E{(e(k) - \E{e(k)})^2}$, which is the entry (1,1) of $Q(k)$. As a consequence, by computing the equilibrium of~\eqref{eq:CovFirst} and considering only the first entry, it follows that \begin{equation} \label{eq:Variance} \E{(e(k) - \E{e(k)})^2} = \frac{2 (\sigma_\nu^2 + \sigma_\eta^2)}{K_P (4 - K_I - 2 K_P)} . \end{equation} Combining this equation with the constraints $K_P > 0$, $K_I > 0$ (for the system stability) and $\E{(e(k) - \E{e(k)})^2} > 0$, it immediately follows that \[ 0 < K_P < 2\,\,\, \mbox{and}\,\,\, K_I < 4 - 2 K_P . \] Within this region, $\E{(e(k) - \E{e(k)})^2}$ is minimized once the denominator is maximized. Suitable values for these parameters are visible in Fig.~\ref{fig:Ki_Kp}. \begin{figure}[t] \centering \includegraphics[width=\columnwidth]{../figures/Ki_Kp} \caption{Possible choices of $K_P$ and $K_I$ that stabilize the system, with the related value of the denominator of~\eqref{eq:Variance}.} \label{fig:Ki_Kp} \end{figure} From this analysis, it follows that if a prompt response is needed, a dead-beat controller with $K_P = K_I = 1$ is needed. Otherwise, if the uncertainty has to be minimized, $K_P = 1$ and $K_I = \varepsilon > 0$ is the right choice, with $\varepsilon$ being a sufficiently small constant. Notice, however, that the smaller is $K_I$, the longer is the convergence time. \section{Conclusion} \label{sec:conclusion} This paper describes the design criteria of a Servo Clock (SC) implemented on a single-board computer to discipline the data acquisition stage of a low-cost Phasor Measurement Unit (PMU). The SC has been developed in the context of the `OpenPMU' project. The SC implementation relies on a BeagleBone Black (BBB) embedded platform. In particular, the SC runs mainly in one of the Programmable Real-time Units (PRUs) of the BBB microprocessor, with no need for additional hardware. The PPS input reference signal could be provided by a common master clock shared among multiple PMUs within the same substation. The experimental results are quite consistent with the theoretical ones and highlight the correct operation of the SC based on a PI controller correcting possible frequency offsets between the local oscillator of the BBB and the input reference. The controller has been optimized in order to minimize the standard deviation of the short-term synchronization error. In the future, other and more sophisticated (e.g. adaptive) techniques will be adopted to further optimize the controller parameters on-line, e.g. to handle changes in environmental or processing load conditions. \section{Servo Clock Architecture} \label{sec:implementation} Unlike typical PMU implementations, the acquisition stage of the `OpenPMU' platform described in~\cite{Zhao2017} is fairly simple and relies on a Beaglebone Black (BBB) board. This is a low-cost commercial embedded system, which has been recently used in a variety of projects, including I/O signal synchronization~\cite{Alanwar2017} and PMU algorithm prototyping~\cite{Tosato2018}. A distinctive feature of the BBB is its Sitara AM3358 microprocessor, that include a 1-GHz ARM Cortex-A8 microprocessor running a Linux kernel, and two 200-MHz co-processors, called Programmable Real-time Units (PRU). The PRUs can be used to perform specific real-time tasks, since they can be programmed at a low-level, i.e. without using any operating system. The PRUs are provided with a rich set of peripherals (including timers), besides direct access to General-Purpose Input-Output (GPIO) pins. On the other hand, the PRU computational capabilities are limited: no Floating-Point Unit is present, local memory is quite small (only 8~KB plus a 12~KB of shared DRAM) and asynchronous interrupt handling is not possible. Despite such limitations, the basic idea of the solution proposed in this paper is to use one of the PRUs to fully implement a SC running in parallel to the main ARM core in order to {\em synchronize} and {\em syntonize} the data acquisition system described in~\cite{Zhao2017}. The architecture of the proposed SC is shown in Fig.~\ref{fig:implementation}. A purely software indirect frequency synthesizer is used to generate the 12.8-kHz signal clocking the Analog-to-Digital Converter (ADC) of the acquisition stage. Since a real 12.8-kHz Voltage-Controlled Oscillator (VCO) is not available on the BBB, this is emulated by means of one of the timers of the PRU, clocked at 200~MHz and configured to be reloaded automatically. While the nominal timeout to be loaded into the timer is 15625 ticks, its actual value changes as a function of the corrective action performed by the internal controller. The 12.8~kHz signal is used to increment both the system clock (which is implemented as a software counter properly initialized with a UTC timestamp as soon as it is available) and a second counter (labeled as {\em PPS generator} in Fig.~\ref{fig:implementation}) that generates a PPS signal. The difference in time (measured with a resolution of 5~ns) between the external (i.e. reference) PPS signal and the local one is integrated by a digital accumulator in order to compute the time error, which finally drives a Proportional-Integral (PI) controller adjusting the clock rate, as customary in SC design. The equivalence between the system considered and a classic SC is demonstrated in Section~\ref{sec:model}. \begin{figure}[t] \centering \includegraphics[page=7, trim={2cm 3.5cm 2cm 3.5cm}, clip, width=\columnwidth]{figures/models} \caption{Architecture of the SC implemented on the PRU of the BBB.} \label{fig:implementation} \end{figure} \section{Introduction} \label{sec:Intro} Time synchronization is a crucial part of every distributed measurement system. Synchrophasor measurement in transmission and distribution systems is a well-known application field in which time synchronization plays a key role, e.g. for power system state estimation~\cite{Ree10}, topology detection~\cite{Cavraro15} or loss-of-mains protection~\cite{Laverty15}. Phasor Measurement Units (PMU) are complex instruments, requiring usually high-end hardware that perform timestamped measurements of voltage and current amplitude, phase, frequency and rate of change of frequency (ROCOF) synchronized to the Coordinated Universal Time (UTC). From a functional point of view, PMUs can be decomposed into component parts: an acquisition subsystem, a synchronization module, and finally a digital signal processing subsystem. Several scientific contributions about the impact of PMU synchronization uncertainty in power systems have already been proposed in the literature~\cite{Bazerque2016}. For instance, analytical methods for mitigating the effect of time synchronization on the grid state estimation are presented in~\cite{Todescato2017, Yang2013}. Some insight from a real network can be found in the work by Della Giustina et al.~\cite{Giustina2014}, who analyze the timing requirements for power quality measurements. The IEEE Standard C37.242-2013 on PMU synchronization, calibration, testing, and installation highlights that the main contributors to estimation uncertainty can be identified in (i) synchronization issues, (ii) noise and distortion in the input channel and acquisition circuitry, (iii) intrinsic accuracy limits of the adopted digital signal processing algorithm and, finally, (iv) possible communication problems~\cite{c37.242-2013}. In general, no information on the weight of these contributions on overall PMU accuracy is available. The IEEE Standard C37.118.1-2011 and its Amendment IEEE C37.118.1a-2014 express the overall synchrophasor measurement accuracy with a single parameter, namely the Total Vector Error (TVE), which depends on both amplitude and phase estimation uncertainties. While both documents prescribe various TVE boundaries in different testing conditions~\cite{c37.118-2011,c37.118-2014}, no specific limits for time synchronization uncertainty or jitter are explicitly reported. Under the overoptimistic assumption that the amplitude measurement uncertainty is negligible, time errors within $\pm31$ $\mu$s or $\pm26$ $\mu$s for 50 Hz or 60 Hz systems, respectively, are small enough to keep phase estimation accuracy below 10 mrad and, consequently, TVE~$\leq1$\%, which is the strictest limit reported in~\cite{c37.118-2011,c37.118-2014}. However, since amplitude and phase estimation uncertainties are usually both significant in practice, ``a time source that reliably provides time, frequency, and frequency stability at least 10 times better than the values above is highly recommended"~\cite{c37.118-2011}. Thus, as a rule of thumb, synchronization accuracy within $\pm1$ $\mu$s is currently considered to be adequate in most power systems applications, as the corresponding maximum phase errors (in the order $\pm0.4$ mrad) are usually much smaller than those due to other uncertainty contributions. However, the evolution of smart active distribution grids as well as the emerging need to measure phasor angle differences smaller than 1 mrad (e.g. over short lines) could demand more accurate synchrophasor measurements than those possible nowadays~\cite{Borghetti11, Wen15, Barchi2015}. As a result, tighter synchronization accuracy might be needed in the future. It is worth emphasizing that PMUs require not only {\em synchronization} (i.e. time offset compensation with respect to UTC) to properly timestamp measurement data, but also {\em syntonization} (i.e. clock rate adjustment) to enable coherent sampling of voltage or current waveforms in ideal conditions. Commercial PMUs generally include specific hardware modules for time synchronization, most notably GPS receivers or IRIG-B (Inter-Range Instrumentation Group time codes) decoders, which are supposed to be used to discipline the sampling clock as well, e.g. through some hardware Phase Lock Loop (PLL) or other more sophisticated custom techniques. For instance, in~\cite{Yao2018} Yao et al. describe a way to compensate for the sampling time errors caused by the division remainder between the desirable sampling rate and the oscillator frequency. An alternative approach to achieve both {\em synchronization} and {\em syntonization} is through Servo Clocks (SC), e.g. based on Proportional Integral (PI) controllers~\cite{Exel2013, Eidson2006c}. The most common examples of SCs are those developed for Ordinary and Boundary Clocks of IEEE~1588 devices~\cite{Correll05, Machizawa2008, Ferencz2013}. In general, there are just a few comprehensive analyses of SCs. One of them, is provided by Chen et al.~\cite{Chen2017} who propose an optimized SC for distributed motion control systems based on EtherCAT. However, the design and implementation of SCs for PMUs is a topic seldom covered in the scientific literature. Even the recently released IEEE Standard C37.238-2017 dealing with a profile of the IEEE~1588 Precision Time Protocol (PTP) for power systems application does not report any indication about SC design~\cite{c37.238-2017}. This research work is part of the `OpenPMU' project\footnote{http://www.OpenPMU.org}, an international project whose purpose is to develop a fully open-source PMU for power system analysis and research~\cite{Laverty2013}. In particular, this paper deals with a SC for the `OpenPMU' platform described in~\cite{Zhao2017}. The SC has been designed and optimized to minimize the synchronization errors due to the local crystal oscillator (XO) and generates the signal to sample the input waveform as well. The main advantage of the proposed solution is that the SC relies only on a Programmable Real-Time Unit (PRU) available in the embedded platform, with no need for specific synchronization hardware except for an external Pulse Per Second (PPS) reference signal, which could be provided by a common GPS receiver (or substation clock) and shared among multiple PMUs. The rest of the paper is structured as follows. First, in Section~\ref{sec:implementation}, the resources available to implement the SC for the `OpenPMU' platform are described in brief. Then, in Section~\ref{sec:model}, a mathematical model of the SC is defined and the related design criteria are explained. Finally, in Section~\ref{sec:results} the results of various experiments showing SC performance are reported. Section~\ref{sec:conclusion} concludes the paper and outlines future work. \section{Experimental Results} \label{sec:results} Based on the previous analysis, the SC behavior was tested for different values of $K_P$ and $K_I$. The instrument used as a reference was a GPS-disciplined Meinberg~M600 master clock. The overall phase noise of the generated PPS signal (estimated on the basis of the power spectral density reported in instrument's specifications) is about 30 ns. However, it is about one order of magnitude smaller over time intervals of a few hours, i.e. till when the effect of flicker phase noise, white frequency noise, flicker frequency noise and random walk frequency noise are negligible. In such conditions, the phase noise is mainly white, in accordance with the theoretical model described in Section~\ref{sec:model}. The period fluctuations of the PPS signal generated by the SC in open-loop conditions (namely when no control action is applied) were measured by an Agilent DSO7032A with a 2-GHz sampling clock disciplined by the master clock. The systematic relative frequency offset of the emulated free-running DCO is about -82 ppm. The standard deviation $\sigma_\eta$ of the corresponding phase noise in open-loop conditions instead ranges from about 25 ns over one hour (when the phase noise is still dominated by white contributions) till about 120 ns over two days (i.e. when the effect of the other low-frequency power-law noises becomes significant). Given that, as explained in Section~\ref{sec:model}, the models adopted for the SC design rely on the inherent assumption that phase contributions are mainly white, $\sigma_\eta=25$ ns was also used to simulate the behavior of the SC in closed loop for different values of $K_P$ and $K_I$. The resulting standard deviation values $\sigma_e$ are basically the same as those obtained from the square root of~\eqref{eq:Variance} and shown in Fig.~\ref{fig:Ki_Kp}. The results of the theoretical analysis were also validated experimentally by estimating $\sigma_e$ in steady-state conditions over 1-hour intervals for various pairs of $K_P$ and $K_I$ values within the SC stability region (i.e. with $K_I=\{0.05, 0.1, 0.5, 1\}$ and $K_P$ ranging from $0.1$ and $1.6$). The corresponding standard deviation values are shown in Fig.~\ref{fig:std_exp}. \begin{figure}[t] \centering \includegraphics[width=\columnwidth]{figures/experiments} \caption{Experimental values of the standard deviation of the synchronization errors for different settings of $K_P$ and $K_I$.} \label{fig:std_exp} \end{figure} The experimental curves clearly confirm that $\sigma_e$ exhibits a minimum value when $K_P=1$ and $K_I=\varepsilon>0$. However, for $K_I<0.05$ jitter reduction becomes negligible. Observe that even if experimental and theoretical results are generally quite consistent, significant deviations can be observed when both $K_P$ and $K_I$ tend to zero. The ultimate reason for this mismatch is not clear, but it is certainly related to some second-order difference between SC model and SC implementation. Nonetheless, it is worth noticing that the experimental results are smaller than those based on the theoretical analysis, which can be regarded as a conservative design policy in this respect. The long-term stability of the SC with respect to the chosen reference PPS signal was determined by measuring the difference in time (over about two days) between the rising edges of the PPS signals at the input and at the output of the SC, respectively, for some of the $K_P$ and $K_I$ values considered in the design stage, i.e. \begin{enumerate} \item Using a dead-beat controller (i.e. $K_P = K_I = 1$) that is chosen for its fast response, albeit the jitter reduction with this controller is not the best; \item Using a purely proportional controlled with unit gain (i.e. $K_P = 1$ and $K_I = 0$); \item And, finally, with $K_P = 1$ and $K_I = 0.05$ for the reasons explained above. \end{enumerate} The Allan deviation values of the waveforms generated by the SC are shown in Fig.~\ref{fig:allan} for different observation intervals. \begin{figure}[t] \centering \includegraphics[width=\columnwidth]{figures/allan} \caption{Allan deviation curves of PPS signals generated by the SC running on the BBB for different values of the PI controller parameters $K_P$ and $K_I$. For the sake of comparison, also the Allan deviation of the PPS signal generated in open-loop conditions is shown. } \label{fig:allan} \end{figure} The open-loop (i.e. free-running) case is also shown for the sake of comparison. It is worth noticing that the short-term stability of the PPS in open-loop conditions over 1 s is approximately $3.4\cdot10^{-9}$, but it tends to degrade over longer intervals, as customary of low-quality crystal oscillators. Of course, in closed-loop conditions, the systematic relative frequency offset with respect to the input reference signal is well adjusted by the PI controller. Therefore, if the input reference oscillator is particularly accurate, the systematic relative frequency offset of the SC can be reduced to less than 0.1 ppm. Observe that the short-term SC stability is clearly worse than in open-loop conditions, particularly when the dead-beat controller is used. However, in the long term, stability with respect to the input reference drastically improves as a result of the control action. Some test were performed also using different input PPS signal, i.e. the PPS from a u-blox NEO-6M. Such device produce a poorer quality PPS signal, hence in the long-term the performance is worse. Nevertheless it was possible to confirm the design choices discussed so far. On the whole, the configuration based on the criterion described in Section~\ref{sec:model} provides a very good trade-off between short-term and long-term stability. In order to complete the analysis, Fig.~\ref{fig:histograms}(a)-(c) shows the histograms of the PPS period fluctuations with respect to the Meinberg~M600 clock using (a) the dead-beat controller; (b) the quasi-optimal controller with $K_P = 1$ and $K_I = 0.05$ and (c) a purely proportional controller ($K_P = 1$ and $K_I = 0$). \begin{figure}[t] \centering \begin{tabular}{c} \includegraphics[width=0.97\columnwidth]{figures/hist_1} \\ (a) \\ \includegraphics[width=0.97\columnwidth]{figures/hist_2} \\ (b) \\ \includegraphics[width=0.97\columnwidth]{figures/hist_3} \\ (c) \\ \end{tabular} \caption{PPS period fluctuations of the signal generated by the SC with respect to the Meinberg~M600 using (a) a dead-beat controller ($K_P = 1$ and $K_I = 1$), (b) a quasi-optimal controller for time uncertainty minimization ($K_P = 1$ and $K_I = 0.05$) and (c) a proportional-only controller. } \label{fig:histograms} \end{figure} Observe that in the second case, the jitter is almost halved (i.e. 25 ns vs. 50 ns). The jitter associated wit the proportional-only controller is instead just slightly worse than the optimal one, whereas in theory it should be much larger. Such a difference is due to the mismatch between theoretical model and SC implementation explained before. In any case, the proportional-only controller is not able to correct possible sudden time offsets perfectly. \section{Platform description} Synchronization is traditionally performed with PLLs locking on a reference signal, achieving high performance with hardware PLLs. Nevertheless the use of such components can be avoided, using a servo clock, able to achieve comparable performances on software. Thanks to such implementation, it is easy to provide synchronization in many application field that require it, with just the effort of writing good code.
1,116,691,501,360
arxiv
\section{Introduction and statement of results} \setcounter{equation}{0} Let $S$ be a closed Riemann surface of genus $p>1$ with $n$ punctures removed. Assume that $3p-4+n>0$. Let Mod$(S)$ denote the mapping class group which consists of isotopy classes of orientation preserving self-homeomorphisms of $S$. In view of the Nielsen--Thurston classification theorem \cite{Th}, elements of Mod$(S)$ are represented by periodic, reducible, or pseudo-Anosov maps. See Fathi--Laudenbach--Poenaru \cite{FLP} for the definitions and more information on reducible and pseudo-Anosov maps. The mapping class group Mod$(S)$ can naturally act on the Teichm\"{u}ller space $T(S)$ as a group of isometries with respect to the Teichm\"{u}ller metric $d_T$. Royden's theorem \cite{Ro}, whose generalization is due to Earle--Kra \cite{E-K}, asserts that with a few exceptions, the group of automorphisms of $T(S)$ is the group Mod$(S)$. Following Bers \cite{Bers2} elements $\alpha\in \mbox{Mod}(S)$ can be classified as elliptic, parabolic, hyperbolic, or pseudo-hyperbolic elements with the aid of the index $a(\alpha)=\inf \{d_T(y,\alpha(y)): y\in T(S)\}$. That is, $\alpha$ is elliptic if there is $y_0\in T(S)$ such that $a(\alpha)=d_T(y_0,\alpha(y_0))=0$; parabolic if $a(\alpha)=0$ but $d_T(y,\alpha(y))>0$ for all $y\in T(S)$; hyperbolic if there is $y_0\in T(S)$ such that $a(\alpha)=d_T(y_0,\alpha(y_0))>0$; and pseudo-hyperbolic if $a(\alpha)>0$ and for all $y\in T(S)$, $a(\alpha)<d_T(y,\alpha(y))$. Bers \cite{Bers2} proved that an element $\alpha\in \mbox{Mod}(S)$ is elliptic if and only if it is represented by a periodic map; $\alpha$ is parabolic or pseudo-hyperbolic if and only if it is represented by a reducible map; and $\alpha$ is hyperbolic if and only if it is represented by a pseudo-Anosov map. Among other things, it is well known that any hyperbolic element $\alpha$ preserves a unique bi-infinite geodesic $l$ in $T(S)$ (called Teichm\"{u}ller geodesics in the literature), and hyperbolic elements are the only elements that keep some bi-infinite geodesics invariant. We remark here that the existence of $l$ was proved by Bers \cite{Bers2}; and the uniqueness of $l$ was proved in Bestvina--Feighn \cite{B-F} using topological methods. The mapping class group Mod$(S)$ acts on the {\it complex of curves} $\mathcal{C}(S)$ of $S$ as well, where $\mathcal{C}(S)$ is the simplicial complex whose vertex set $\mathcal{C}_0(S)$ is the collection of simple closed geodesics on $S$ and whose $k$-dimensional simplicies $\mathcal{C}_k(S)$ are the collections of $(k+1)$-tuples $(v_0,v_1,\cdots, v_k)$ of disjoint simple closed geodesics on $S$ (see Harvey \cite{H}). It is well-known that $\mathcal{C}(S)$ is connected and locally infinite. For simplicity, any path $\{(u, u_1), (u_1,u_2), \cdots, (u_s,v)\}$ joining two vertices $u,v\in \mathcal{C}_0(S)$ is denoted by $[u,u_1,\cdots, u_s,v]$. It is natural to define a path distance $d_{\mathcal{C}}(u,v)$ for any $u,v\in \mathcal{C}_0(S)$ to be the minimum number of sides in $\mathcal{C}_1(S)$ joining $u$ and $v$, where one of the paths that achieves the minimum length is called a geodesic segment joining $u$ and $v$. Masur--Minsky \cite{M-M} showed that $\mathcal{C}(S)$ has an infinite diameter and is $\delta$-hyperbolic in the sense of Gromov \cite{Gro}. When considering actions of elements of Mod$(S)$ on $\mathcal{C}(S)$, things are similar but different than that on $T(S)$. Ivanov \cite{Iva} showed that with a few exceptions, the group of automorphisms of $\mathcal{C}(S)$ is the full group Mod$(S)$. It was shown in \cite{M-M} that elements of Mod$(S)$ can be classified as elliptic and hyperbolic elements (see also \cite{Gro} for the definition and terminology). In particular, Mod$(S)$ contains no parabolic elements and hyperbolic elements are represented by pseudo-Anosov maps. In \cite{Bow}, Bowditch proved that there exists an integer $m$, whose precise value is unknown, such that for any hyperbolic mapping class $f$, the power $f^m$ preserves finitely many bi-infinite geodesics in $\mathcal{C}(S)$, where an infinite path $[\cdots, u_{-m}, \cdots, u_0,\cdots, u_m, \cdots]$ is called a bi-infinite geodesic if $u_{-m}$ and $u_m$ both tend to points in the Gromov boundary $\partial \mathcal{C}(S)$ of $\mathcal{C}(S)$ and for any $m$, the subpath $[u_{-m}, \cdots, u_0, \cdots, u_m]$ is a geodesic segment connecting $u_{-m}$ and $u_m$. It is quite obvious that a non periodic or a non pseudo-Anosov map does not preserve any bi-infinite geodesic. See Section 2 for more expositions. The question arises as to whether there exist some primitive pseudo-Anosov maps that preserve bi-infinite geodesics. Let $x$ be a puncture of $S$. Let $\mathscr{F}^*\subset \mbox{Mod}(S)$ be the subgroup consisting of mapping classes projecting to the trivial mapping class on $\tilde{S}=S\cup \{x\}$. Let $\mathscr{F}\subset \mathscr{F}^*$ be the subset consisting of primitive pseudo-Anosov elements isotopic to the identity on $\tilde{S}$. Then $\mathscr{F}\neq \emptyset$ and contains infinitely many elements (Kra \cite{Kr}). More precisely, each primitive and oriented filling closed geodesic $\tilde{c}$ on $\tilde{S}$ (that is, $\tilde{c}$ is not a power of any other closed geodesic and intersects every simple closed geodesic on $\tilde{S}$) is associated with a conjugacy class $H(\tilde{c})$ that consists of mapping classes conjugate in $\mbox{Mod}(S)$ to the point-pushing pseudo-Anosov mapping class along the geodesic $\tilde{c}$, and $\mathscr{F}$ is partitioned into a disjoint union of conjugacy classes $H(\tilde{c})$ for all primitive and oriented filling closed geodesics on $\tilde{S}$. Let $\mathscr{S}$ denote the set of primitive, oriented filling closed geodesics on $\tilde{S}$, and let $\mathscr{S}(2)$ be the subset of $\mathscr{S}$ consisting of filling closed geodesics that intersect every simple closed geodesic at least twice. It is easy to see that both $\mathscr{S}(2)$ and $\mathscr{S}\backslash \mathscr{S}(2)$ are not empty. For every $\tilde{c}\in \mathscr{S}\backslash \mathscr{S}(2)$, we denote by $\mathscr{S}_{\tilde{c}}$ the (finite) set of simple closed geodesics intersecting $\tilde{c}$ only once. Our aim in this paper is to investigate the actions of elements of $\mathscr{F}^*$ on $\mathcal{C}_0(S)$ and to uncover elements in $\mathscr{F}^*$ that preserve some bi-infinite geodesics in $\mathcal{C}(S)$. In contrast to Theorem 1.3 of \cite{Bow}, we will prove the following result. \begin{thm}\label{T1} Let $S$ be of type $(p,1)$ with $p>1$. We have: \noindent $(1)$ Elements of $\mathscr{F}^*\backslash \mathscr{F}$ do not preserve any bi-infinite geodesics in $\mathcal{C}(S)$. \noindent $(2)$ Let $f\in \mathscr{F}$ be such that the corresponding filling geodesic $\tilde{c}\in \mathscr{S}\backslash \mathscr{S}(2)$. Then $f$ preserves at least one bi-infinite geodesic in $\mathcal{C}(S)$. \noindent $(3)$ There is a injective map $\mathscr{I}$ of $\mathscr{S}_{\tilde{c}}$ into the set of $f$-invariant bi-infinite geodesics in $\mathcal{C}(S)$ so that $\mathscr{I}(\mathscr{S}_{\tilde{c}})$ consists of disjoint bi-infinite geodesics. \end{thm} \noindent {\em Remark. } It is not known whether $\mathscr{I}$ is a bijection; and whether $f\in \mathscr{F}$ preserves a bi-infinite geodesic when the corresponding filling geodesic $\tilde{c}$ is in $\mathscr{S}(2)$. \smallskip The curve complex $\mathcal{C}(\tilde{S})$ along with the vertex set $\mathcal{C}_0(\tilde{S})$ and the path metric $d_{\mathcal{C}}$ on $\mathcal{C}(\tilde{S})$ can similarly be defined. For each $\tilde{u}\in \mathcal{C}_0(\tilde{S})$, let $F_{\tilde{u}}$ denote the set of vertices $u$ in $\mathcal{C}_0(S)$ such that $u$ is freely homotopic to $\tilde{u}$ as the puncture $x$ is filled in. Let $\mathbf{H}$ be a hyperbolic plane and let $\varrho:\mathbf{H}\rightarrow \tilde{S}$ be the universal covering map with covering group $G$. Then with the help of the covering map $\varrho$, every $u\in F_{\tilde{u}}$ is associated with a configuration $(\tau_u,\Omega_u, \mathscr{U}_u)$, and every element $f\in H(\tilde{c})$ corresponds to an essential hyperbolic element $g$ of $G$. Let $\mbox{axis}(g)$ be the axis of $g$ that is the unique $g$-invariant geodesic in $\mathbf{H}$. See Section 2 for more details. Let $f^m(u)$ denote the geodesic freely homotopic to the image curve of $u$ under $f^m$. Theorem \ref{T1} follows from the following result. \begin{thm}\label{T2} Let $\tilde{u}\in \mathcal{C}_0(\tilde{S})$ and $\tilde{c}\in \mathscr{S}$. Let $u\in F_{\tilde{u}}$ and $f\in H(\tilde{c})$ be such that $\Omega_u\cap \rm{axis}(g)\neq \emptyset$. Then $\tilde{u}$ intersects $\tilde{c}$ only once if and only if $d_{\mathcal{C}}(u,f^m(u))=m$ for all $m$, in which case, there is a unique geodesic segment connecting $u$ and $f^m(u)$. \end{thm} This paper is organized as follows. In Section 2, we collect some basic facts about mapping class groups acting on the curve complex, as well as some background information on Bers isomorphisms. In Section 3, we refine the argument in \cite{CZ11} to estimate the lower bound for the distance $d_{\mathcal{C}}(u,f^m(u))$ in terms of the intersection number between the corresponding geodesics. In Section 4, we relate a geodesic segment joining $u$ and $f^m(u)$ to a sequence of adjacent convex regions in $\mathbf{H}$. In Section 5, we prove the main results. \section{Mapping class group acting on the curve complex} \setcounter{equation}{0} \noindent {\bf \S 2.1. } In \cite{M-M}, Masur--Minsky proved that there is a constant $\epsilon$, depending only on the type $(p,n)$ (with $3p+n-4>0$) of the surface $S$, such that for any pseudo-Anosov map $f$, any vertex $u\in \mathcal{C}_0(S)$ and any integer $m>0$, $d_{\mathcal{C}}(u,f^m(u))\geq \epsilon |m|$. From this fact together with the Nielsen--Thurston classification for mapping classes \cite{Th}, the following result is easily deduced: \begin{lem} \cite{M-M} \label{L2.1} Let $S$ be as above, and let $f\in \rm{Mod}(S)$. Then either $f^q$ for some $q$ has fixed points in $\mathcal{C}_0(S)$, or $f$ acts on $\mathcal{C}(S)$ as a hyperbolic translation which has two fixed points on $\partial \mathcal{C}(S)$. \end{lem} For the notion of hyperbolic translations, we refer to Gromov \cite{Gro}. Note that the two classes in Lemma \ref{L2.1} are exclusive. As an easy corollary of Lemma \ref{L2.1}, we obtain \begin{lem}\label{L2.2} If $f\in \rm{Mod}(S)$ is reducible, then $f$ does not preserve any bi-infinite geodesic in $\mathcal{C}(S)$. \end{lem} \begin{proof} Suppose that a reducible mapping class $f$ in $\mbox{Mod}(S)$ keeps an infinite geodesic $L=[\cdots, u_{-m}, \cdots, u_0, \cdots, u_m, \cdots]$ invariant, i.e., $f(L)=L$. Since $f^q$ (for all $q$) keeps only finitely many vertices in $\mathcal{C}_0(S)$, there is an integer $m>0$ such that $f^q$ for any $q$ has no fixed points on the union of the rays $[u_{m}, \cdots]\cup [\cdots, u_{-m}]$. By selecting a subsequence if needed, we may assume without loss of generality that (i) $f^j([u_m,\cdots])\subset [u_m,\cdots]$, (ii) $f^{-j}([\cdots,u_{-m}])\subset [\cdots,u_{-m}]$, and (iii) as $j\rightarrow +\infty$, both $d_{\mathcal{C}}(u_{m}, f^j(u_{m}))$ and $d_{\mathcal{C}}(u_{-m}, f^{-j}(u_{-m}))$ tend to infinity. Note that $f^{j}(u_{m})$ and $f^{-j}(u_{-m})$ belong to $L$ and that $d_{\mathcal{C}}(u_{-m}, u_{m})$ is finite. It follows that $$ d_{\mathcal{C}}(f^{j}(u_{m}), f^{-j}(u_{-m}))\rightarrow +\infty $$ as $m\rightarrow +\infty$. By taking a suitable power if necessary, we may also assume that $f(u)=u$ for some $u\in \mathcal{C}_0(S)$. Denote by $K=\mbox{max} \{ i(u,u_{m}), i(u,u_{-m})\}$. Then since $f^j$ is a homeomorphism, $K\geq i(u,u_m)=i(f^j(u),f^j(u_{m}))=i(u,f^j(u_{m}))$. Similarly, we have $K\geq i(u,f^{-j}(u_{-m}))$. From Lemma 2.1 of \cite{M-M} (or \cite{Bow1}), we conclude that $d_{\mathcal{C}}(u,f^j(u_{m}))\leq K+1$ and $d_{\mathcal{C}}(u,f^{-j}(u_{-m}))\leq K+1$. It follows from the triangle inequality that $d_{\mathcal{C}}(f^{-j}(u_{m}),f^j(u_{m}))< +\infty$ for all $m$. This contradicts that $d_{\mathcal{C}}(f^{j}(u_{m}), f^{-j}(u_{-m}))\rightarrow +\infty$. \end{proof} \noindent {\bf \S 2.2. } Let $Q(G)$ denote the group of quasiconformal automorphisms $w$ on the hyperbolic plane $\mathbf{H}$ that satisfy $wGw^{-1}=G$. Following Bers \cite{Bers1}, two such maps $w,w'\in Q(G)$ are said equivalent if $w|_{\mathbf{S}^1}=w'|_{\mathbf{S}^1}$, where $\mathbf{S}^1$ denotes the unit circle which can be identified with the boundary of $\mathbf{H}$. Denote by $[w]$ the equivalence class of $w\in Q(G)$ and by $Q(G)/\!\sim$ the quotient group of $Q(G)$ by the above equivalence relation. The Bers isomorphism theorem (Theorem 9 of \cite{Bers1}) asserts that there is an isomorphism $\varphi^*$ of $Q(G)/\!\sim$ onto Mod$(S)$. For simplicity, let $[w]^*$ denote the mapping class $\varphi^*([w])$. It is clear that $G$ can be regarded as a normal subgroup of $Q(G)/\!\sim$. Every hyperbolic element $h\in G$ keeps a unique geodesic in $\mathbf{H}$ invariant. This geodesic is called the axis of $h$ and is denoted by axis$(h)$. A hyperbolic element $g\in G$ is called essential if $\varrho(\mbox{axis}(g))$ is a filling closed geodesic. Let $G'\subset G$ be the collection of all primitive essential hyperbolic elements. Then $\varphi^*(G')=\mathscr{F}$ and $\varphi^*(G)=\mathscr{F}^*$. For an element $h\in G$, we denote by $h^*$ the mapping class $\varphi^*(h)$. Let $\pi_1(\tilde{S},x)$ denote the fundamental group of $\tilde{S}$. Let $\mu:G\rightarrow \pi_1(\tilde{S},x)$ denote an isomorphism (which depends only on the choice of a point $\hat{x}\in \mathbf{H}$ with $\varrho(\hat{x})=x$). By virtue of Theorem 4.1 and Theorem 4.2 in Birman \cite{Bir}, there is an exact sequence \begin{equation}\label{BIR} 0 \longrightarrow \pi_1(\tilde{S},x)\cong G\longrightarrow \mbox{Mod}(S)\longrightarrow \mbox{Mod}(\tilde{S})\longrightarrow 0, \end{equation} where $\mbox{Mod}(S)\rightarrow \mbox{Mod}(\tilde{S})$ is the natural puncture-forgetting projection. In (\ref{BIR}) an element $g\in G$ is identified with the pure mapping class in $\mbox{Mod}(S)$ that corresponds to the loop representing $\mu(g)$ in $\pi_1(\tilde{S},x)$. Let $u\in \mathcal{C}_0(S)$ be a non preperipheral vertex; that is, $u$ is homotopic to a non-trivial geodesic on $\tilde{S}$ if $u$ is also viewed as a curve on $\tilde{S}$. Let $\tilde{u}\in \mathcal{C}_0(\tilde{S})$ be the corresponding vertex. Denote by $\mathscr{R}_{\tilde{u}}$ the collection of all components of $\mathbf{H}\backslash \{\varrho^{-1}(\tilde{u})\}$, where $$\{\varrho^{-1}(\tilde{u})\}=\{\mbox{all geodesics $\hat{u}$ in }\mathbf{H}\ \mbox{such that }\varrho(\hat{u})=\tilde{u}\}. $$ Two components $\Omega_1,\Omega_2\in \mathscr{R}_{\tilde{u}}$ are said adjacent if $\Omega_1$ and $\Omega_2$ share a common geodesic boundary $a$, that is, $\overline{\Omega}_1\cap \overline{\Omega}_2=a$. Note that $a\in \{\varrho^{-1}(\tilde{u})\}$. It was shown (Lemma 2.1 of \cite{CZ11}) that there is a bijection $\chi$ between $\mathscr{R}_{\tilde{u}}$ and $F_{\tilde{u}}$, and two regions $\Omega_1$ and $\Omega_2\in \mathscr{R}_{\tilde{u}}$ are adjacent if and only if $d_{\mathcal{C}}(\chi(\Omega_1),\chi(\Omega_2))=1$, in which case, $\{\chi(\Omega_1), \chi(\Omega_2)\}$ forms the boundary of an $x$-punctured cylinder on $S$. That is to say, $\chi(\Omega_1)$ and $\chi(\Omega_2)$ are disjoint and homotopic to each other on $\tilde{S}$ when $\chi(\Omega_1)$ and $\chi(\Omega_2)$ are viewed as curves on $\tilde{S}$. It was shown in \cite{CZ11} that any fiber $F_{\tilde{u}}$, $\tilde{u}\in \mathcal{C}_0(\tilde{S})$, is path connected in $F_{\tilde{u}}$ (The fact that $F_{\tilde{u}}$ is connected for closed surface $\tilde{S}$ was proved in \cite{Sch}). Now each $u\in F_{\tilde{u}}$ is non preperipheral, which allows us to define a {\it configuration} $(\tau_u,\Omega_u, \mathscr{U}_u)$ corresponding to $u$, where $\Omega_u=\chi^{-1}(u)$, $\tau_u$ is the lift of the Dehn twist $t_{\tilde{u}}$ so that $\tau_u|_{\Omega_u}=\mbox{id}$ and $[\tau_u]^*=t_u$, and $\mathscr{U}_u$ is a partially ordered set which is the collection of all half-planes in $\mathbf{H}$ defined by $\tau_u$. Maximal elements of $\mathscr{U}_u$ are mutually disjoint, and their union is the complement of $\Omega_u$ in $\mathbf{H}$. From the construction, we also know that $\tau_u$ keeps each maximal element of $\mathscr{U}_u$ invariant. See \cite{CZ2} for more details. \section{Distances and intersection numbers between vertices} \setcounter{equation}{0} Throughout the rest of the article we assume that $S$ is of type $(p,1)$ with $p>1$. This assumption guarantees that each vertex in $\mathcal{C}_0(S)$ is non preperipheral. Fix $\tilde{u}_0\in \mathcal{C}_0(\tilde{S})$ and $\tilde{c}\in \mathscr{S}$. For simplicity, we also use the symbol $i(\tilde{c}, \tilde{u}_0)$ to denote the geometric intersection number between $\tilde{u}_0$ and $\tilde{c}$. We may assume that $\tilde{u}_0$ intersects $\tilde{c}$ at non self-intersection points of $\tilde{c}$ by performing a small perturbation if needed. Let $u_0\in \mathcal{C}_0(S)$ be obtained from $\tilde{u}_0$ by removing the point $x$. Let $(\tau_0,\Omega_0,\mathscr{U}_0)$ be the configuration that corresponds to $u_0$. Let $g\in G$ be essential hyperbolic such that $\varrho(\mbox{axis}(g))=\tilde{c}$ and $\mbox{axis}(g)\cap \Omega_{0}\neq \emptyset$. Write $f=g^*$. Then $f\in \mathscr{F}$ is an element of $H(\tilde{c})$. By abuse of language, in what follows, for each $u\in \mathcal{C}_0(S)$, we let $\tilde{u}$ be the corresponding vertex in $\mathcal{C}_0(\tilde{S})$ under the natural projection from $\mathcal{C}_0(S)$ onto $\mathcal{C}_0(\tilde{S})$ (which is well defined since $S$ contains only one puncture $x$), which means that $u$ and $\tilde{u}$ are homotopic to each other on $\tilde{S}$ as $x$ is filled in. The following lemma is a refinement of Theorem 1.2 of \cite{CZ10}. \begin{lem}\label{D} Suppose $i(\tilde{c},\tilde{u}_0)\geq 2$. Then for any integer $m>0$, we have $d_{\mathcal{C}}(u_0,f^m(u_0))\geq m+1$. \end{lem} \begin{proof} Since $g$ is an essential hyperbolic element of $G$, by Lemma 3.1 of \cite{CZ6}, $\mbox{axis}(g)$ is not contained in $\Omega_0$, which implies that there exist maximal elements $\Delta_0, \Delta_0^*\in \mathscr{U}_0$ such that $\mbox{axis}(g)$ intersects $\partial \Delta_0$ and $\partial \Delta_0^*$. Let $A,B$ denote the attracting and repelling fixed points of $g$. $\Delta_0$ and $\Delta_0^*$ are disjoint. Assume that $A\in \Delta_0\cap \mathbf{S}^1$ and $B\in \Delta_0^*\cap \mathbf{S}^1$. We know that $\Omega_0\subset \mathbf{H}\backslash \overline{\Delta_0\cup \Delta_0^*}$. Write $\overline{P_0Q_0}=\partial \Delta_0$. We refer to Figure 1, where $\Delta_0$ is the component of $\mathbf{H}\backslash \overline{P_0Q_0}$ containing $A$. Note that $\varrho(\mbox{axis}(g))=\tilde{c}$. The assumption that $i(\tilde{c},\tilde{u}_0)=N$, where $N\geq 2$, says that $\overline{P_1Q_1}=g(\partial \Delta_0^*)$ is disjoint from $\overline{P_0Q_0}$ and ``lies below" $\overline{P_0Q_0}$. Let $R_0$ be the region bounded by $\overline{P_0Q_0}$ and $\overline{P_1Q_1}$. Observe that the geodesic $\mbox{axis}(g)$ inherits a natural orientation that points from $B$ to $A$. Now consider a point $z\in \mbox{axis}(g)$ moving from $B$ to $A$ along $\mbox{axis}(g)$. When $z$ starts entering the region $R_0$, it crosses $N-1$ disjoint geodesics in $\{\varrho^{-1}(\tilde{u}_0)\}$ and then crosses $\overline{P_1Q_1}$ and leaves the region $R_0$. Of course, careful investigations on the $N-1$ geodesics and their relative positions are interesting but not needed in this paper. For $j=1,\cdots, m$, we denote by \begin{equation}\label{K1} \overline{P_{2j-1}Q_{2j-1}}=g^j(\partial \Delta_0^*) \ \ \mbox{and} \ \ \Delta_{2j-1}'=g^j(\Delta_0^*), \end{equation} and for $j=1,\cdots, m-1$, we let \begin{equation}\label{K2} \overline{P_{2j}Q_{2j}}=g^j(\overline{P_0Q_0}). \end{equation} Let $\Delta_{2j}'$ be the component of $\mathbf{H}\backslash \overline{P_{2j}Q_{2j}}$ containing the repelling fixed point $B$ of $g$. Then all $\overline{P_kQ_k}\in \{\varrho^{-1}(\tilde{u}_0)\}$; that is, $\varrho(\overline{P_kQ_k})=\tilde{u}_0$ for all $k=0,\cdots, 2m-1$. \bigskip \unitlength 1mm \linethickness{0.4pt} \ifx\plotpoint\undefined\newsavebox{\plotpoint}\fi \begin{picture}(93.75,92)(0,0) \put(90.919,54){\line(0,1){1.3336}} \put(90.894,55.334){\line(0,1){1.3317}} \multiput(90.819,56.665)(-.03135,.33198){4}{\line(0,1){.33198}} \multiput(90.693,57.993)(-.029217,.220374){6}{\line(0,1){.220374}} \multiput(90.518,59.315)(-.032138,.187815){7}{\line(0,1){.187815}} \multiput(90.293,60.63)(-.030478,.145034){9}{\line(0,1){.145034}} \multiput(90.019,61.935)(-.032326,.129405){10}{\line(0,1){.129405}} \multiput(89.695,63.229)(-.030979,.106747){12}{\line(0,1){.106747}} \multiput(89.324,64.51)(-.0322859,.097389){13}{\line(0,1){.097389}} \multiput(88.904,65.776)(-.0333635,.0892397){14}{\line(0,1){.0892397}} \multiput(88.437,67.026)(-.0321123,.0769302){16}{\line(0,1){.0769302}} \multiput(87.923,68.257)(-.0329281,.0712156){17}{\line(0,1){.0712156}} \multiput(87.363,69.467)(-.0336091,.0660406){18}{\line(0,1){.0660406}} \multiput(86.758,70.656)(-.0324646,.0582555){20}{\line(0,1){.0582555}} \multiput(86.109,71.821)(-.0329857,.0542779){21}{\line(0,1){.0542779}} \multiput(85.416,72.961)(-.0334148,.0505885){22}{\line(0,1){.0505885}} \multiput(84.681,74.074)(-.0323545,.0451867){24}{\line(0,1){.0451867}} \multiput(83.905,75.159)(-.0326716,.042179){25}{\line(0,1){.042179}} \multiput(83.088,76.213)(-.0329198,.0393451){26}{\line(0,1){.0393451}} \multiput(82.232,77.236)(-.0331046,.0366675){27}{\line(0,1){.0366675}} \multiput(81.338,78.226)(-.0332309,.0341309){28}{\line(0,1){.0341309}} \multiput(80.408,79.182)(-.0344924,.0328555){28}{\line(-1,0){.0344924}} \multiput(79.442,80.102)(-.0370274,.0327014){27}{\line(-1,0){.0370274}} \multiput(78.442,80.985)(-.0397029,.0324873){26}{\line(-1,0){.0397029}} \multiput(77.41,81.829)(-.0443061,.0335502){24}{\line(-1,0){.0443061}} \multiput(76.346,82.634)(-.0475178,.0332434){23}{\line(-1,0){.0475178}} \multiput(75.254,83.399)(-.0509511,.0328594){22}{\line(-1,0){.0509511}} \multiput(74.133,84.122)(-.0546356,.0323899){21}{\line(-1,0){.0546356}} \multiput(72.985,84.802)(-.0616918,.0335004){19}{\line(-1,0){.0616918}} \multiput(71.813,85.439)(-.0664043,.0328846){18}{\line(-1,0){.0664043}} \multiput(70.618,86.031)(-.0715716,.032147){17}{\line(-1,0){.0715716}} \multiput(69.401,86.577)(-.0824287,.0333534){15}{\line(-1,0){.0824287}} \multiput(68.165,87.077)(-.0895994,.0323852){14}{\line(-1,0){.0895994}} \multiput(66.91,87.531)(-.0977364,.0312185){13}{\line(-1,0){.0977364}} \multiput(65.64,87.937)(-.116814,.032519){11}{\line(-1,0){.116814}} \multiput(64.355,88.294)(-.129751,.030908){10}{\line(-1,0){.129751}} \multiput(63.057,88.603)(-.163528,.032501){8}{\line(-1,0){.163528}} \multiput(61.749,88.863)(-.188156,.030081){7}{\line(-1,0){.188156}} \multiput(60.432,89.074)(-.264817,.032165){5}{\line(-1,0){.264817}} \put(59.108,89.235){\line(-1,0){1.3292}} \put(57.779,89.346){\line(-1,0){1.3324}} \put(56.446,89.406){\line(-1,0){1.3338}} \put(55.113,89.417){\line(-1,0){1.3332}} \put(53.779,89.377){\line(-1,0){1.3308}} \multiput(52.449,89.287)(-.265291,-.027983){5}{\line(-1,0){.265291}} \multiput(51.122,89.147)(-.220041,-.031626){6}{\line(-1,0){.220041}} \multiput(49.802,88.958)(-.164021,-.029917){8}{\line(-1,0){.164021}} \multiput(48.49,88.718)(-.144692,-.032063){9}{\line(-1,0){.144692}} \multiput(47.187,88.43)(-.129044,-.033739){10}{\line(-1,0){.129044}} \multiput(45.897,88.092)(-.106402,-.032145){12}{\line(-1,0){.106402}} \multiput(44.62,87.707)(-.0970299,-.0333494){13}{\line(-1,0){.0970299}} \multiput(43.359,87.273)(-.0829447,-.0320486){15}{\line(-1,0){.0829447}} \multiput(42.115,86.792)(-.0765743,-.032952){16}{\line(-1,0){.0765743}} \multiput(40.889,86.265)(-.0708511,-.0337052){17}{\line(-1,0){.0708511}} \multiput(39.685,85.692)(-.0622127,-.0325228){19}{\line(-1,0){.0622127}} \multiput(38.503,85.074)(-.0578968,-.0331){20}{\line(-1,0){.0578968}} \multiput(37.345,84.412)(-.0539138,-.0335776){21}{\line(-1,0){.0539138}} \multiput(36.213,83.707)(-.0480365,-.0324895){23}{\line(-1,0){.0480365}} \multiput(35.108,82.96)(-.04483,-.0328469){24}{\line(-1,0){.04483}} \multiput(34.032,82.171)(-.041819,-.0331311){25}{\line(-1,0){.041819}} \multiput(32.987,81.343)(-.0389826,-.0333482){26}{\line(-1,0){.0389826}} \multiput(31.973,80.476)(-.0363031,-.0335037){27}{\line(-1,0){.0363031}} \multiput(30.993,79.572)(-.0337653,-.0336023){28}{\line(-1,0){.0337653}} \multiput(30.047,78.631)(-.033679,-.0361405){27}{\line(0,-1){.0361405}} \multiput(29.138,77.655)(-.0335365,-.0388208){26}{\line(0,-1){.0388208}} \multiput(28.266,76.646)(-.0333331,-.0416582){25}{\line(0,-1){.0416582}} \multiput(27.433,75.604)(-.0330635,-.0446705){24}{\line(0,-1){.0446705}} \multiput(26.639,74.532)(-.0327215,-.0478787){23}{\line(0,-1){.0478787}} \multiput(25.887,73.431)(-.0323,-.0513075){22}{\line(0,-1){.0513075}} \multiput(25.176,72.302)(-.0333798,-.057736){20}{\line(0,-1){.057736}} \multiput(24.508,71.147)(-.0328235,-.0620546){19}{\line(0,-1){.0620546}} \multiput(23.885,69.968)(-.0321562,-.0667601){18}{\line(0,-1){.0667601}} \multiput(23.306,68.767)(-.0333222,-.0764139){16}{\line(0,-1){.0764139}} \multiput(22.773,67.544)(-.0324496,-.0827887){15}{\line(0,-1){.0827887}} \multiput(22.286,66.302)(-.031403,-.0899483){14}{\line(0,-1){.0899483}} \multiput(21.846,65.043)(-.03266,-.106245){12}{\line(0,-1){.106245}} \multiput(21.455,63.768)(-.03124,-.117163){11}{\line(0,-1){.117163}} \multiput(21.111,62.479)(-.032763,-.144535){9}{\line(0,-1){.144535}} \multiput(20.816,61.178)(-.03071,-.163874){8}{\line(0,-1){.163874}} \multiput(20.57,59.867)(-.032691,-.219886){6}{\line(0,-1){.219886}} \multiput(20.374,58.548)(-.029266,-.265153){5}{\line(0,-1){.265153}} \put(20.228,57.222){\line(0,-1){1.3303}} \put(20.132,55.892){\line(0,-1){2.6668}} \put(20.089,53.225){\line(0,-1){1.3327}} \put(20.144,51.892){\line(0,-1){1.3297}} \multiput(20.248,50.563)(.030884,-.264969){5}{\line(0,-1){.264969}} \multiput(20.403,49.238)(.02917,-.188299){7}{\line(0,-1){.188299}} \multiput(20.607,47.92)(.031709,-.163684){8}{\line(0,-1){.163684}} \multiput(20.86,46.61)(.033644,-.144332){9}{\line(0,-1){.144332}} \multiput(21.163,45.311)(.031954,-.11697){11}{\line(0,-1){.11697}} \multiput(21.515,44.025)(.033307,-.106043){12}{\line(0,-1){.106043}} \multiput(21.914,42.752)(.0319512,-.089755){14}{\line(0,-1){.089755}} \multiput(22.362,41.495)(.0329541,-.0825892){15}{\line(0,-1){.0825892}} \multiput(22.856,40.257)(.0318003,-.0717263){17}{\line(0,-1){.0717263}} \multiput(23.397,39.037)(.0325629,-.0665627){18}{\line(0,-1){.0665627}} \multiput(23.983,37.839)(.0332014,-.0618532){19}{\line(0,-1){.0618532}} \multiput(24.614,36.664)(.0337314,-.0575313){20}{\line(0,-1){.0575313}} \multiput(25.288,35.513)(.0326124,-.0511095){22}{\line(0,-1){.0511095}} \multiput(26.006,34.389)(.033013,-.0476781){23}{\line(0,-1){.0476781}} \multiput(26.765,33.292)(.0333354,-.044468){24}{\line(0,-1){.044468}} \multiput(27.565,32.225)(.0335866,-.041454){25}{\line(0,-1){.041454}} \multiput(28.405,31.189)(.0325219,-.0371852){27}{\line(0,-1){.0371852}} \multiput(29.283,30.185)(.0326882,-.034651){28}{\line(0,-1){.034651}} \multiput(30.198,29.215)(.0339697,-.0333957){28}{\line(1,0){.0339697}} \multiput(31.149,28.279)(.0365068,-.0332816){27}{\line(1,0){.0365068}} \multiput(32.135,27.381)(.0391853,-.0331098){26}{\line(1,0){.0391853}} \multiput(33.154,26.52)(.0420204,-.0328753){25}{\line(1,0){.0420204}} \multiput(34.204,25.698)(.0450296,-.0325728){24}{\line(1,0){.0450296}} \multiput(35.285,24.916)(.0504262,-.0336592){22}{\line(1,0){.0504262}} \multiput(36.394,24.176)(.0541177,-.033248){21}{\line(1,0){.0541177}} \multiput(37.531,23.478)(.0580977,-.0327462){20}{\line(1,0){.0580977}} \multiput(38.693,22.823)(.0624099,-.0321426){19}{\line(1,0){.0624099}} \multiput(39.879,22.212)(.0710554,-.0332723){17}{\line(1,0){.0710554}} \multiput(41.087,21.646)(.0767739,-.0324842){16}{\line(1,0){.0767739}} \multiput(42.315,21.127)(.0831387,-.0315419){15}{\line(1,0){.0831387}} \multiput(43.562,20.653)(.0972316,-.0327568){13}{\line(1,0){.0972316}} \multiput(44.826,20.228)(.106596,-.031495){12}{\line(1,0){.106596}} \multiput(46.105,19.85)(.129247,-.032952){10}{\line(1,0){.129247}} \multiput(47.398,19.52)(.144885,-.03118){9}{\line(1,0){.144885}} \multiput(48.702,19.24)(.187657,-.033046){7}{\line(1,0){.187657}} \multiput(50.015,19.008)(.22023,-.030283){6}{\line(1,0){.22023}} \multiput(51.337,18.827)(.33182,-.03295){4}{\line(1,0){.33182}} \put(52.664,18.695){\line(1,0){1.3313}} \put(53.995,18.613){\line(1,0){1.3334}} \put(55.329,18.581){\line(1,0){1.3337}} \put(56.662,18.6){\line(1,0){1.332}} \put(57.994,18.669){\line(1,0){1.3285}} \multiput(59.323,18.788)(.220513,.02815){6}{\line(1,0){.220513}} \multiput(60.646,18.957)(.187969,.031228){7}{\line(1,0){.187969}} \multiput(61.962,19.175)(.163327,.033498){8}{\line(1,0){.163327}} \multiput(63.268,19.443)(.12956,.031699){10}{\line(1,0){.12956}} \multiput(64.564,19.76)(.116613,.033232){11}{\line(1,0){.116613}} \multiput(65.847,20.126)(.0975441,.0318143){13}{\line(1,0){.0975441}} \multiput(67.115,20.54)(.0894001,.0329312){14}{\line(1,0){.0894001}} \multiput(68.366,21.001)(.0770847,.0317397){16}{\line(1,0){.0770847}} \multiput(69.6,21.508)(.0713741,.0325831){17}{\line(1,0){.0713741}} \multiput(70.813,22.062)(.0662025,.0332891){18}{\line(1,0){.0662025}} \multiput(72.005,22.662)(.0584119,.0321823){20}{\line(1,0){.0584119}} \multiput(73.173,23.305)(.0544369,.0327227){21}{\line(1,0){.0544369}} \multiput(74.316,23.992)(.0507496,.0331696){22}{\line(1,0){.0507496}} \multiput(75.433,24.722)(.0473141,.0335327){23}{\line(1,0){.0473141}} \multiput(76.521,25.493)(.0423366,.0324671){25}{\line(1,0){.0423366}} \multiput(77.579,26.305)(.039504,.032729){26}{\line(1,0){.039504}} \multiput(78.606,27.156)(.0368272,.0329267){27}{\line(1,0){.0368272}} \multiput(79.601,28.045)(.0342913,.0330653){28}{\line(1,0){.0342913}} \multiput(80.561,28.971)(.033022,.034333){28}{\line(0,1){.034333}} \multiput(81.485,29.932)(.0328802,.0368687){27}{\line(0,1){.0368687}} \multiput(82.373,30.928)(.0326791,.0395452){26}{\line(0,1){.0395452}} \multiput(83.223,31.956)(.0324137,.0423775){25}{\line(0,1){.0423775}} \multiput(84.033,33.015)(.0334729,.0473564){23}{\line(0,1){.0473564}} \multiput(84.803,34.104)(.0331055,.0507914){22}{\line(0,1){.0507914}} \multiput(85.531,35.222)(.032654,.0544782){21}{\line(0,1){.0544782}} \multiput(86.217,36.366)(.0321086,.0584525){20}{\line(0,1){.0584525}} \multiput(86.859,37.535)(.0332056,.0662444){18}{\line(0,1){.0662444}} \multiput(87.457,38.727)(.032493,.0714152){17}{\line(0,1){.0714152}} \multiput(88.009,39.941)(.0316424,.0771247){16}{\line(0,1){.0771247}} \multiput(88.516,41.175)(.0328184,.0894416){14}{\line(0,1){.0894416}} \multiput(88.975,42.428)(.0316912,.0975841){13}{\line(0,1){.0975841}} \multiput(89.387,43.696)(.033084,.116655){11}{\line(0,1){.116655}} \multiput(89.751,44.979)(.031536,.1296){10}{\line(0,1){.1296}} \multiput(90.066,46.275)(.033292,.163369){8}{\line(0,1){.163369}} \multiput(90.333,47.582)(.030991,.188008){7}{\line(0,1){.188008}} \multiput(90.55,48.898)(.033447,.264658){5}{\line(0,1){.264658}} \put(90.717,50.222){\line(0,1){1.3286}} \put(90.834,51.55){\line(0,1){2.4497}} \put(55.5,89.5){\line(0,-1){71}} \qbezier(20.75,60.75)(54.625,54.25)(90,60.75) \qbezier(20.25,55)(54,53.875)(90.75,55.25) \qbezier(22.5,41.5)(55.125,51.875)(87.25,39.75) \qbezier(24.5,37.25)(55.75,48.25)(85,35.25) \qbezier(31.75,28.25)(55.125,42.25)(76,26.25) \qbezier(35.25,25.25)(56,38.5)(72.75,23.75) \qbezier(43.75,20.5)(56,32.625)(65.25,20.25) \qbezier(47.5,19.75)(56.125,28.25)(62.25,19.75) \qbezier(25.25,72.75)(55.125,59)(85.5,72.25) \qbezier(28,76.25)(54.875,63.125)(83.25,75.5) \qbezier(34.75,83)(55.25,64.875)(76.75,82.25) \put(78.5,83.75){\makebox(0,0)[cc]{$Q_1$}} \put(85.5,77.5){\makebox(0,0)[cc]{$Q_2$}} \put(88,73.5){\makebox(0,0)[cc]{$Q_3$}} \put(92.75,62){\makebox(0,0)[cc]{$Q_4$}} \put(93.75,55.25){\makebox(0,0)[cc]{$Q_5$}} \put(91.25,37.75){\makebox(0,0)[cc]{$Q_6$}} \put(88,32.75){\makebox(0,0)[cc]{$Q_7$}} \put(64,15){\makebox(0,0)[cc]{$Q_{2m-1}$}} \put(70.25,17.25){\makebox(0,0)[cc]{$Q_{2m-2}$}} \put(55.5,92){\makebox(0,0)[cc]{$B$}} \put(55.25,15.25){\makebox(0,0)[cc]{$A$}} \put(33.5,85){\makebox(0,0)[cc]{$P_1$}} \put(26.5,77.75){\makebox(0,0)[cc]{$P_2$}} \put(23.25,74){\makebox(0,0)[cc]{$P_3$}} \put(18.25,61.75){\makebox(0,0)[cc]{$P_4$}} \put(17.75,54.75){\makebox(0,0)[cc]{$P_5$}} \put(19.75,40.5){\makebox(0,0)[cc]{$P_6$}} \put(21.75,36){\makebox(0,0)[cc]{$P_7$}} \put(39.5,19.25){\makebox(0,0)[cc]{$P_{2m-2}$}} \put(47,15.75){\makebox(0,0)[cc]{$P_{2m-1}$}} \put(32,77){\makebox(0,0)[cc]{$\Delta_2'$}} \put(24.25,62.5){\makebox(0,0)[cc]{$\Delta_4'$}} \put(24.75,45){\makebox(0,0)[cc]{$\Delta_6'$}} \put(55,4.75){\makebox(0,0)[cc]{Fig. 1}} \put(55.5,40.25){\vector(0,-1){2.25}} \put(58,61.25){\makebox(0,0)[cc]{$g$}} \put(87.75,57.5){\makebox(0,0)[cc]{$\Delta_5'$}} \put(82.25,72.75){\makebox(0,0)[cc]{$\Delta_3'$}} \put(38.25,87.75){\makebox(0,0)[cc]{$P_0$}} \qbezier(45.75,88.25)(56,78.625)(66.25,87.5) \qbezier(71.75,85.5)(55.75,69.25)(39.75,86) \put(72,81.5){\makebox(0,0)[cc]{$\Delta_1'$}} \put(73,87.75){\makebox(0,0)[cc]{$Q_0$}} \put(51.5,86.75){\makebox(0,0)[cc]{$\Delta_0^*$}} \put(83.75,38.5){\makebox(0,0)[cc]{$\Delta_7'$}} \end{picture} It is evident that all the geodesics $\overline{P_kQ_k}$, $0\leq k\leq 2m-1$, are mutually disjoint and for any $j=1,\cdots,m-1$, the geodesics $\overline{P_{2j}Q_{2j}}$ lies in between $\overline{P_{2j-1}Q_{2j-1}}$ and $\overline{P_{2j+1}Q_{2j+1}}$. The geodesics $\overline{P_kQ_k}$ with $1\leq k\leq 2m-1$ give rise to a partition of $\mathbf{H}$, and each one of which is referred to as a level geodesic with level $k$ in the sequel. Let $(P_kP_{k+1})$ and $(Q_kQ_{k+1})$ denote the subarcs of $\mathbf{S}^1\backslash \{A,B\}$ connecting $P_k, P_{k+1}$ and $Q_k, Q_{k+1}$, respectively. By examining the action of $g$ on $\mathbf{S}^1$, for $j=1,\cdots, m-2$, we have \begin{equation}\label{K3} g(P_{2j-2}P_{2j})=(P_{2j}P_{2j+2}) \ \ \mbox{and}\ \ g(Q_{2j-2}Q_{2j})=(Q_{2j}Q_{2j+2}). \end{equation} As usual, let $f^j(u_0)$ denote the geodesic homotopic to the image curve of $u_0$ under the map $f^j$ for all $j$. Set $u_m=f^m(u_0)$. Let $[u_0,u_1,\cdots, u_s,u_m]$ be a geodesic segment in $\mathcal{C}_1(S)$ joining $u_0$ and $u_m$. Then all $u_j$ are non-preperipheral and thus $\tilde{u}_j$ are all non-trivial simple closed geodesics on $\tilde{S}$. Let $(\tau_j, \Omega_j,\mathscr{U}_j)$ be the configurations corresponding to $u_j$. In what follows, the region $\Omega_j$ is said to be located above level $k$ for some $1\leq k\leq 2m-1$ if $\Omega_j\cap \Delta_k'\neq \emptyset$. Likewise, $\Omega_j$ is said to be located at level $k$ if $\Delta_k'$ is a maximal element of $\mathscr{U}_j$. See \cite{CZ10} for more detailed information. By construction, $\Omega_0\subset \mathbf{H}\backslash \overline{\Delta_0\cup \Delta_0^*}$ (in fact, $\mathbf{H}\backslash (\overline{\Omega_0}\cup \overline{\Delta_0\cup \Delta_0^*})$ is a disjoint union of infinitely many maximal elements of $\mathscr{U}_0$). By a similar argument of Theorem 1.2 of \cite{CZ10} (together with Lemma 2.1 of \cite{CZ7}, (\ref{K2}) and (\ref{K3})), we know that $\Omega_1$ is located above or at level zero. By an induction argument, one shows that for all $j=1,\cdots, s$ with $s\leq m-1$, $\Omega_j$ is located above or at level $2(j-1)$. In particular, we conclude that $\Omega_{m-1}$ is located above or at level $2(m-2)$. If $\Omega_{m-1}$ is located at level $2(m-2)=2m-4$, then there is a maximal element $\Delta_{m-1}\in \mathscr{U}_{m-1}$ that covers the attracting fixed point $A$ of $g$ such that $\partial \Delta_{m-1}$ lies above level $2(m-1)=2m-2$. Note that the point $P_{2m-2}$ lies in the arc $(P_{2m-3}P_{2m-1})$ and $Q_{2m-2}$ lies in the arc $(Q_{2m-3}Q_{2m-1})$. So the region bounded by the two geodesic $\overline{P_{2m-2}Q_{2m-2}}$ and $\overline{P_{2m-3}Q_{2m-3}}$ is not empty. By construction, $\Delta_{2m-1}'$ is the component of $\mathbf{H}\backslash \overline{P_{2m-1}Q_{2m-1}}$ containing the repelling fixed point $B$. Hence $\Delta_{m-1}\cap \Delta_{2m-1}'\neq \emptyset$ and $\Delta_{m-1}\cup \Delta_{2m-1}'=\mathbf{H}$. Note also that the configuration $$ (\tau_m, \Omega_m,\mathscr{U}_m):=(g^m\tau_0g^{-m}, g^m(\Omega_0), g^m(\mathscr{U}_0)) $$ corresponds to $f^m(u_0)\in \mathcal{C}_0(S)$. Since $\Delta_0^*\in \mathscr{U}_0$, $g^m(\Delta_0^*)=\Delta_{2m-1}'\in g^m(\mathscr{U}_0)=\mathscr{U}_m$, we conclude that $\Delta_{2m-1}'\in \mathscr{U}_m$. By Lemma 4 of \cite{CZ1}, $u_{m-1}$ intersects $u_m=f^m(u_0)$, which implies that $d_{\mathcal{C}}(u_{m-1}, u_m)\geq 2$. Thus $s\geq m$, which says $d_{\mathcal{C}}(u_0, u_m)\geq m+1$, as asserted. If $\Omega_{m-1}$ is located above level $2(m-2)=2m-4$, then by Lemma 3.1 of \cite{CZ10}, there is a maximal element $\Delta_{m-1}\in \mathscr{U}_{m-1}$, which covers the attracting fixed point $A$, such that either (i) $\partial \Delta_{m-1}\cap \overline{P_{2m-2}Q_{2m-2}}\neq \emptyset$, or (ii) $\partial \Delta_{m-1}\cap \overline{P_{2m-2}Q_{2m-2}}= \emptyset$ but $\Delta_{m-1}\cup \Delta_{2m-1}'=\mathbf{H}$. If (ii) occurs, by Lemma 4 of \cite{CZ1} again, $u_{m-1}$ intersects $u_m=f^m(u_0)$, which implies that $d_{\mathcal{C}}(u_{m-1}, u_m)\geq 2$. So $s\geq m$. Suppose (i) occurs. We observe that $\varrho(\partial \Delta_{m-1})=\tilde{u}_{m-1}$ and $\varrho(\overline{P_{2m-2}Q_{2m-2}})=\tilde{u}_0$. Then $\tilde{u}_{m-1}$ intersects $\tilde{u}_{m}$. But $\tilde{u}_m=\tilde{u}_0$. This in turn implies that $u_{m-1}$ intersects $u_m$, and so $s\geq m$. \end{proof} \unitlength 1mm \linethickness{0.4pt} \ifx\plotpoint\undefined\newsavebox{\plotpoint}\fi \begin{picture}(92.5,81.75)(0,0) \put(91.85,46){\line(0,1){1.2761}} \put(91.825,47.276){\line(0,1){1.2742}} \multiput(91.751,48.55)(-.03095,.31759){4}{\line(0,1){.31759}} \multiput(91.627,49.821)(-.028842,.210765){6}{\line(0,1){.210765}} \multiput(91.454,51.085)(-.031721,.179559){7}{\line(0,1){.179559}} \multiput(91.232,52.342)(-.030079,.138593){9}{\line(0,1){.138593}} \multiput(90.961,53.59)(-.031896,.123588){10}{\line(0,1){.123588}} \multiput(90.642,54.825)(-.033339,.111141){11}{\line(0,1){.111141}} \multiput(90.276,56.048)(-.0318419,.0928758){13}{\line(0,1){.0928758}} \multiput(89.862,57.255)(-.0328954,.0850281){14}{\line(0,1){.0850281}} \multiput(89.401,58.446)(-.031652,.0732253){16}{\line(0,1){.0732253}} \multiput(88.895,59.617)(-.0324449,.0677086){17}{\line(0,1){.0677086}} \multiput(88.343,60.768)(-.0331034,.0627084){18}{\line(0,1){.0627084}} \multiput(87.747,61.897)(-.0336453,.0581448){19}{\line(0,1){.0581448}} \multiput(87.108,63.002)(-.0324616,.051385){21}{\line(0,1){.051385}} \multiput(86.426,64.081)(-.0328682,.0478085){22}{\line(0,1){.0478085}} \multiput(85.703,65.133)(-.0331919,.0444741){23}{\line(0,1){.0444741}} \multiput(84.94,66.156)(-.0334406,.0413531){24}{\line(0,1){.0413531}} \multiput(84.137,67.148)(-.0336209,.0384219){25}{\line(0,1){.0384219}} \multiput(83.297,68.109)(-.0337386,.0356604){26}{\line(0,1){.0356604}} \multiput(82.419,69.036)(-.0337985,.0330516){27}{\line(-1,0){.0337985}} \multiput(81.507,69.928)(-.0364053,.0329335){26}{\line(-1,0){.0364053}} \multiput(80.56,70.785)(-.0391634,.0327541){25}{\line(-1,0){.0391634}} \multiput(79.581,71.603)(-.0420899,.0325083){24}{\line(-1,0){.0420899}} \multiput(78.571,72.384)(-.0472592,.0336532){22}{\line(-1,0){.0472592}} \multiput(77.531,73.124)(-.0508419,.0333058){21}{\line(-1,0){.0508419}} \multiput(76.464,73.823)(-.0547022,.0328708){20}{\line(-1,0){.0547022}} \multiput(75.37,74.481)(-.058882,.0323379){19}{\line(-1,0){.058882}} \multiput(74.251,75.095)(-.0671636,.0335585){17}{\line(-1,0){.0671636}} \multiput(73.109,75.666)(-.0726926,.0328568){16}{\line(-1,0){.0726926}} \multiput(71.946,76.191)(-.0788418,.0320087){15}{\line(-1,0){.0788418}} \multiput(70.763,76.672)(-.0923373,.0333712){13}{\line(-1,0){.0923373}} \multiput(69.563,77.105)(-.101361,.032239){12}{\line(-1,0){.101361}} \multiput(68.347,77.492)(-.111858,.030848){11}{\line(-1,0){.111858}} \multiput(67.116,77.832)(-.138077,.032363){9}{\line(-1,0){.138077}} \multiput(65.874,78.123)(-.156634,.030347){8}{\line(-1,0){.156634}} \multiput(64.621,78.366)(-.21026,.032319){6}{\line(-1,0){.21026}} \multiput(63.359,78.56)(-.253628,.028951){5}{\line(-1,0){.253628}} \put(62.091,78.704){\line(-1,0){1.2728}} \put(60.818,78.8){\line(-1,0){1.2755}} \put(59.542,78.846){\line(-1,0){1.2764}} \put(58.266,78.842){\line(-1,0){1.2753}} \put(56.991,78.789){\line(-1,0){1.2722}} \multiput(55.719,78.686)(-.253455,-.030429){5}{\line(-1,0){.253455}} \multiput(54.451,78.534)(-.210068,-.033543){6}{\line(-1,0){.210068}} \multiput(53.191,78.332)(-.156455,-.031259){8}{\line(-1,0){.156455}} \multiput(51.939,78.082)(-.137886,-.033167){9}{\line(-1,0){.137886}} \multiput(50.698,77.784)(-.111677,-.031499){11}{\line(-1,0){.111677}} \multiput(49.47,77.437)(-.101171,-.032829){12}{\line(-1,0){.101171}} \multiput(48.256,77.043)(-.0855598,-.0314868){14}{\line(-1,0){.0855598}} \multiput(47.058,76.602)(-.0786539,-.0324677){15}{\line(-1,0){.0786539}} \multiput(45.878,76.115)(-.0724999,-.03328){16}{\line(-1,0){.0724999}} \multiput(44.718,75.583)(-.0632465,-.0320634){18}{\line(-1,0){.0632465}} \multiput(43.58,75.006)(-.0586925,-.0326805){19}{\line(-1,0){.0586925}} \multiput(42.465,74.385)(-.0545097,-.0331891){20}{\line(-1,0){.0545097}} \multiput(41.374,73.721)(-.0506469,-.0336015){21}{\line(-1,0){.0506469}} \multiput(40.311,73.015)(-.0450161,-.0324529){23}{\line(-1,0){.0450161}} \multiput(39.275,72.269)(-.0418997,-.0327531){24}{\line(-1,0){.0418997}} \multiput(38.27,71.483)(-.0389719,-.0329818){25}{\line(-1,0){.0389719}} \multiput(37.296,70.658)(-.0362127,-.0331451){26}{\line(-1,0){.0362127}} \multiput(36.354,69.797)(-.0336053,-.0332481){27}{\line(-1,0){.0336053}} \multiput(35.447,68.899)(-.0335301,-.0358565){26}{\line(0,-1){.0358565}} \multiput(34.575,67.967)(-.0333964,-.0386172){25}{\line(0,-1){.0386172}} \multiput(33.74,67.001)(-.033199,-.0415473){24}{\line(0,-1){.0415473}} \multiput(32.943,66.004)(-.0329321,-.0446668){23}{\line(0,-1){.0446668}} \multiput(32.186,64.977)(-.0325889,-.0479993){22}{\line(0,-1){.0479993}} \multiput(31.469,63.921)(-.0321616,-.0515733){21}{\line(0,-1){.0515733}} \multiput(30.793,62.838)(-.0333058,-.0583399){19}{\line(0,-1){.0583399}} \multiput(30.161,61.729)(-.0327373,-.0629003){18}{\line(0,-1){.0629003}} \multiput(29.571,60.597)(-.0320497,-.0678966){17}{\line(0,-1){.0678966}} \multiput(29.026,59.443)(-.0333063,-.0783024){15}{\line(0,-1){.0783024}} \multiput(28.527,58.268)(-.0323992,-.0852184){14}{\line(0,-1){.0852184}} \multiput(28.073,57.075)(-.0313,-.0930598){13}{\line(0,-1){.0930598}} \multiput(27.666,55.866)(-.032691,-.111334){11}{\line(0,-1){.111334}} \multiput(27.307,54.641)(-.031175,-.123772){10}{\line(0,-1){.123772}} \multiput(26.995,53.403)(-.032929,-.156112){8}{\line(0,-1){.156112}} \multiput(26.732,52.154)(-.030674,-.179741){7}{\line(0,-1){.179741}} \multiput(26.517,50.896)(-.033136,-.253115){5}{\line(0,-1){.253115}} \put(26.351,49.63){\line(0,-1){1.2711}} \put(26.235,48.359){\line(0,-1){1.2746}} \put(26.168,47.085){\line(0,-1){1.2763}} \put(26.151,45.809){\line(0,-1){1.276}} \put(26.183,44.533){\line(0,-1){1.2738}} \multiput(26.265,43.259)(.0328,-.3174){4}{\line(0,-1){.3174}} \multiput(26.396,41.989)(.03007,-.210593){6}{\line(0,-1){.210593}} \multiput(26.576,40.726)(.032767,-.179371){7}{\line(0,-1){.179371}} \multiput(26.806,39.47)(.030886,-.138415){9}{\line(0,-1){.138415}} \multiput(27.084,38.224)(.032616,-.1234){10}{\line(0,-1){.1234}} \multiput(27.41,36.99)(.031154,-.1017){12}{\line(0,-1){.1017}} \multiput(27.784,35.77)(.0323826,-.0926887){13}{\line(0,-1){.0926887}} \multiput(28.205,34.565)(.0333904,-.084835){14}{\line(0,-1){.084835}} \multiput(28.672,33.377)(.0320782,-.0730396){16}{\line(0,-1){.0730396}} \multiput(29.185,32.209)(.0328389,-.0675184){17}{\line(0,-1){.0675184}} \multiput(29.743,31.061)(.0334683,-.0625144){18}{\line(0,-1){.0625144}} \multiput(30.346,29.936)(.0322844,-.0550504){20}{\line(0,-1){.0550504}} \multiput(30.992,28.835)(.0327606,-.0511949){21}{\line(0,-1){.0511949}} \multiput(31.68,27.759)(.0331462,-.0476161){22}{\line(0,-1){.0476161}} \multiput(32.409,26.712)(.0334505,-.0442798){23}{\line(0,-1){.0442798}} \multiput(33.178,25.693)(.033681,-.0411575){24}{\line(0,-1){.0411575}} \multiput(33.987,24.706)(.0325426,-.0367551){26}{\line(0,-1){.0367551}} \multiput(34.833,23.75)(.0326886,-.0341497){27}{\line(0,-1){.0341497}} \multiput(35.715,22.828)(.0339905,-.0328541){27}{\line(1,0){.0339905}} \multiput(36.633,21.941)(.0365966,-.0327207){26}{\line(1,0){.0365966}} \multiput(37.584,21.09)(.0393537,-.0325253){25}{\line(1,0){.0393537}} \multiput(38.568,20.277)(.0441169,-.0336652){23}{\line(1,0){.0441169}} \multiput(39.583,19.503)(.0474546,-.0333771){22}{\line(1,0){.0474546}} \multiput(40.627,18.768)(.0510352,-.0330089){21}{\line(1,0){.0510352}} \multiput(41.699,18.075)(.0548929,-.0325514){20}{\line(1,0){.0548929}} \multiput(42.797,17.424)(.0590695,-.0319941){19}{\line(1,0){.0590695}} \multiput(43.919,16.816)(.0673581,-.0331665){17}{\line(1,0){.0673581}} \multiput(45.064,16.253)(.0728829,-.0324326){16}{\line(1,0){.0728829}} \multiput(46.23,15.734)(.079027,-.0315486){15}{\line(1,0){.079027}} \multiput(47.416,15.26)(.0925303,-.0328325){13}{\line(1,0){.0925303}} \multiput(48.618,14.834)(.101547,-.031648){12}{\line(1,0){.101547}} \multiput(49.837,14.454)(.12324,-.033215){10}{\line(1,0){.12324}} \multiput(51.069,14.122)(.138264,-.031558){9}{\line(1,0){.138264}} \multiput(52.314,13.838)(.17921,-.033638){7}{\line(1,0){.17921}} \multiput(53.568,13.602)(.210444,-.031092){6}{\line(1,0){.210444}} \multiput(54.831,13.416)(.253792,-.027473){5}{\line(1,0){.253792}} \put(56.1,13.278){\line(1,0){1.2733}} \put(57.373,13.19){\line(1,0){1.2758}} \put(58.649,13.152){\line(1,0){1.2763}} \put(59.925,13.163){\line(1,0){1.2749}} \put(61.2,13.224){\line(1,0){1.2716}} \multiput(62.472,13.334)(.253273,.031906){5}{\line(1,0){.253273}} \multiput(63.738,13.494)(.179887,.029801){7}{\line(1,0){.179887}} \multiput(64.997,13.702)(.15627,.03217){8}{\line(1,0){.15627}} \multiput(66.248,13.959)(.123922,.030573){10}{\line(1,0){.123922}} \multiput(67.487,14.265)(.111491,.032149){11}{\line(1,0){.111491}} \multiput(68.713,14.619)(.100978,.033418){12}{\line(1,0){.100978}} \multiput(69.925,15.02)(.0853748,.0319849){14}{\line(1,0){.0853748}} \multiput(71.12,15.468)(.0784633,.0329255){15}{\line(1,0){.0784633}} \multiput(72.297,15.962)(.0723047,.033702){16}{\line(1,0){.0723047}} \multiput(73.454,16.501)(.0630586,.0324314){18}{\line(1,0){.0630586}} \multiput(74.589,17.085)(.058501,.0330221){19}{\line(1,0){.058501}} \multiput(75.701,17.712)(.0543153,.0335063){20}{\line(1,0){.0543153}} \multiput(76.787,18.382)(.048157,.0323554){22}{\line(1,0){.048157}} \multiput(77.846,19.094)(.0448262,.0327147){23}{\line(1,0){.0448262}} \multiput(78.877,19.846)(.0417081,.0329968){24}{\line(1,0){.0417081}} \multiput(79.878,20.638)(.038779,.0332084){25}{\line(1,0){.038779}} \multiput(80.848,21.468)(.0360189,.0333556){26}{\line(1,0){.0360189}} \multiput(81.784,22.336)(.0334109,.0334434){27}{\line(0,1){.0334434}} \multiput(82.686,23.239)(.0333206,.0360513){26}{\line(0,1){.0360513}} \multiput(83.553,24.176)(.0331707,.0388112){25}{\line(0,1){.0388112}} \multiput(84.382,25.146)(.0329562,.0417401){24}{\line(0,1){.0417401}} \multiput(85.173,26.148)(.0326712,.044858){23}{\line(0,1){.044858}} \multiput(85.924,27.18)(.0323086,.0481884){22}{\line(0,1){.0481884}} \multiput(86.635,28.24)(.0334535,.0543479){20}{\line(0,1){.0543479}} \multiput(87.304,29.327)(.0329652,.0585331){19}{\line(0,1){.0585331}} \multiput(87.931,30.439)(.0323702,.06309){18}{\line(0,1){.06309}} \multiput(88.513,31.575)(.0336317,.0723374){16}{\line(0,1){.0723374}} \multiput(89.051,32.732)(.0328493,.0784952){15}{\line(0,1){.0784952}} \multiput(89.544,33.909)(.031902,.0854058){14}{\line(0,1){.0854058}} \multiput(89.991,35.105)(.03332,.101011){12}{\line(0,1){.101011}} \multiput(90.391,36.317)(.032041,.111522){11}{\line(0,1){.111522}} \multiput(90.743,37.544)(.030453,.123951){10}{\line(0,1){.123951}} \multiput(91.048,38.784)(.032019,.156301){8}{\line(0,1){.156301}} \multiput(91.304,40.034)(.029626,.179916){7}{\line(0,1){.179916}} \multiput(91.511,41.293)(.03166,.253304){5}{\line(0,1){.253304}} \put(91.669,42.56){\line(0,1){1.2717}} \put(91.778,43.832){\line(0,1){2.1684}} \qbezier(46.25,76.5)(59.5,61.75)(73.75,75) \qbezier(30.5,62.75)(59.25,45.875)(88,61.5) \qbezier(27.25,37.75)(59.625,45.625)(89.5,35) \qbezier(34.5,24.25)(59.25,43.75)(82,23.25) \qbezier(42.5,18)(60.375,39.25)(73.75,17.5) \qbezier(49.5,14.75)(59.125,33.625)(68.25,15) \put(28.25,64.25){\makebox(0,0)[cc]{$P_1$}} \put(90.5,63){\makebox(0,0)[cc]{$Q_1$}} \put(24.25,36.75){\makebox(0,0)[cc]{$P_2$}} \put(92.5,33.5){\makebox(0,0)[cc]{$Q_2$}} \put(32.25,22.5){\makebox(0,0)[cc]{$P_3$}} \put(84,21){\makebox(0,0)[cc]{$Q_3$}} \put(48.5,12){\makebox(0,0)[cc]{$P_m$}} \put(69.5,12){\makebox(0,0)[cc]{$Q_m$}} \put(40.5,15.5){\makebox(0,0)[cc]{$P_{m-1}$}} \put(75.75,13.25){\makebox(0,0)[cc]{$Q_{m-1}$}} \put(59.25,9.75){\makebox(0,0)[cc]{$A$}} \put(59.5,81.75){\makebox(0,0)[cc]{$B$}} \put(52.25,75.25){\makebox(0,0)[cc]{$\Delta_0^*$}} \put(34.5,63.25){\makebox(0,0)[cc]{$\Delta_1'$}} \put(29.25,41){\makebox(0,0)[cc]{$\Delta_2'$}} \put(35,28.25){\makebox(0,0)[cc]{$\Delta_3'$}} \put(48.5,18.25){\makebox(0,0)[cc]{$\Delta_m'$}} \put(41,22.5){\makebox(0,0)[cc]{$\Delta_{m-1}'$}} \put(62.25,47.75){\makebox(0,0)[cc]{$g$}} \put(59.25,1.5){\makebox(0,0)[cc]{Fig. 2}} \put(59.75,78.75){\vector(0,-1){65.75}} \end{picture} \section{Geodesic paths in the curve complex} \setcounter{equation}{0} In this section, we study geodesic segments connecting $u_0$ and $u_m$, where we recall that $u_m=f^m(u_0)$ which is the geodesic homotopic to the image curve of $u_0$ under the map $f^m$. For a discussion purpose, in what follows we only need a ``coarser partition" of $\mathbf{H}$ which is described below. See also \cite{CZ10} for more details. Let $\Delta_0, \Delta_0^*$ and $g$ be as in Section 3. For $j=1,\cdots, m$, write $\overline{P_jQ_j}=g^j(\partial \Delta_0^*)$. These geodesics $\overline{P_jQ_j}$ are referred to as level geodesics with level $j$. As usual, put $\Delta_j'=g^j(\Delta_0^*)$. See Figure 2. Let $[u_0,u_1,\cdots, u_s,u_m]$ be a path connecting $u_0$ and $u_m$. Here we emphasize that the path is not assumed to be a geodesic segment. Then all $u_j$ are non-preperipheral. Once again, let $(\tau_j,\Omega_j, \mathscr{U}_j)$, $j=0,\cdots, s,m$, be the configurations corresponding to $u_j$. \begin{lem}\label{L3.1} With the above notation, if $\Omega_j$ is located above level $j$ for some $j$ with $1\leq j\leq s$, then $s\geq m$. \end{lem} \begin{proof} If $\Omega_j$ is located above level $j$ for some $j\leq m-2$, then by Lemma 3.1 of \cite{CZ10}, there exists a maximal element $\Delta_j\in \mathscr{U}_j$ such that $\Delta_j$ covers attracting fixed point $A$ of $g$ and either $\partial \Delta_j$ lies above $\overline{P_{j+1}Q_{j+1}}$ or $\partial \Delta_j$ crosses $\overline{P_{j+1}Q_{j+1}}$ (Figure 3 and Figure 4). \bigskip \unitlength 1mm \linethickness{0.4pt} \ifx\plotpoint\undefined\newsavebox{\plotpoint}\fi \begin{picture}(114.637,68.75)(0,0) \put(50.887,40.75){\line(0,1){1.0864}} \put(50.864,41.836){\line(0,1){1.0845}} \put(50.794,42.921){\line(0,1){1.0805}} \multiput(50.678,44.001)(-.032442,.214905){5}{\line(0,1){.214905}} \multiput(50.516,45.076)(-.029721,.152371){7}{\line(0,1){.152371}} \multiput(50.308,46.143)(-.031688,.13209){8}{\line(0,1){.13209}} \multiput(50.054,47.199)(-.033166,.1161){9}{\line(0,1){.1161}} \multiput(49.756,48.244)(-.031176,.093743){11}{\line(0,1){.093743}} \multiput(49.413,49.275)(-.032229,.084629){12}{\line(0,1){.084629}} \multiput(49.026,50.291)(-.033066,.0767743){13}{\line(0,1){.0767743}} \multiput(48.596,51.289)(-.0337269,.0699111){14}{\line(0,1){.0699111}} \multiput(48.124,52.268)(-.0321019,.0598532){16}{\line(0,1){.0598532}} \multiput(47.61,53.225)(-.0325967,.0549878){17}{\line(0,1){.0549878}} \multiput(47.056,54.16)(-.03298,.0505679){18}{\line(0,1){.0505679}} \multiput(46.463,55.07)(-.0332658,.0465254){19}{\line(0,1){.0465254}} \multiput(45.83,55.954)(-.0334651,.0428062){20}{\line(0,1){.0428062}} \multiput(45.161,56.81)(-.033587,.0393665){21}{\line(0,1){.0393665}} \multiput(44.456,57.637)(-.0336391,.0361707){22}{\line(0,1){.0361707}} \multiput(43.716,58.433)(-.0336277,.0331893){23}{\line(-1,0){.0336277}} \multiput(42.942,59.196)(-.0366089,.0331616){22}{\line(-1,0){.0366089}} \multiput(42.137,59.926)(-.0398038,.0330676){21}{\line(-1,0){.0398038}} \multiput(41.301,60.62)(-.0432416,.0329006){20}{\line(-1,0){.0432416}} \multiput(40.436,61.278)(-.0469579,.0326525){19}{\line(-1,0){.0469579}} \multiput(39.544,61.899)(-.0509962,.0323137){18}{\line(-1,0){.0509962}} \multiput(38.626,62.48)(-.0554108,.0318724){17}{\line(-1,0){.0554108}} \multiput(37.684,63.022)(-.0642872,.0334015){15}{\line(-1,0){.0642872}} \multiput(36.72,63.523)(-.0703476,.0328068){14}{\line(-1,0){.0703476}} \multiput(35.735,63.982)(-.0772015,.0320559){13}{\line(-1,0){.0772015}} \multiput(34.731,64.399)(-.085045,.031116){12}{\line(-1,0){.085045}} \multiput(33.711,64.773)(-.103558,.032938){10}{\line(-1,0){.103558}} \multiput(32.675,65.102)(-.116525,.03164){9}{\line(-1,0){.116525}} \multiput(31.627,65.387)(-.132494,.029952){8}{\line(-1,0){.132494}} \multiput(30.567,65.626)(-.178206,.032339){6}{\line(-1,0){.178206}} \multiput(29.497,65.82)(-.215312,.02962){5}{\line(-1,0){.215312}} \put(28.421,65.968){\line(-1,0){1.0819}} \put(27.339,66.07){\line(-1,0){1.0853}} \put(26.254,66.126){\line(-1,0){1.0867}} \put(25.167,66.135){\line(-1,0){1.0861}} \put(24.081,66.097){\line(-1,0){1.0835}} \multiput(22.997,66.013)(-.26972,-.03256){4}{\line(-1,0){.26972}} \multiput(21.919,65.883)(-.178717,-.029383){6}{\line(-1,0){.178717}} \multiput(20.846,65.707)(-.151968,-.031718){7}{\line(-1,0){.151968}} \multiput(19.782,65.485)(-.131662,-.033418){8}{\line(-1,0){.131662}} \multiput(18.729,65.217)(-.104089,-.031218){10}{\line(-1,0){.104089}} \multiput(17.688,64.905)(-.093325,-.032403){11}{\line(-1,0){.093325}} \multiput(16.662,64.549)(-.084199,-.033337){12}{\line(-1,0){.084199}} \multiput(15.651,64.149)(-.0708814,-.0316369){14}{\line(-1,0){.0708814}} \multiput(14.659,63.706)(-.0648317,-.0323319){15}{\line(-1,0){.0648317}} \multiput(13.686,63.221)(-.0594269,-.0328845){16}{\line(-1,0){.0594269}} \multiput(12.736,62.695)(-.0545554,-.0333153){17}{\line(-1,0){.0545554}} \multiput(11.808,62.128)(-.0501308,-.0336407){18}{\line(-1,0){.0501308}} \multiput(10.906,61.523)(-.0437807,-.0321797){20}{\line(-1,0){.0437807}} \multiput(10.03,60.879)(-.0403461,-.0324037){21}{\line(-1,0){.0403461}} \multiput(9.183,60.199)(-.0371532,-.0325506){22}{\line(-1,0){.0371532}} \multiput(8.366,59.483)(-.0341729,-.0326277){23}{\line(-1,0){.0341729}} \multiput(7.58,58.732)(-.0327452,-.0340602){23}{\line(0,-1){.0340602}} \multiput(6.826,57.949)(-.0326784,-.0370408){22}{\line(0,-1){.0370408}} \multiput(6.108,57.134)(-.0325425,-.0402342){21}{\line(0,-1){.0402342}} \multiput(5.424,56.289)(-.0323304,-.0436695){20}{\line(0,-1){.0436695}} \multiput(4.778,55.416)(-.0320336,-.0473822){19}{\line(0,-1){.0473822}} \multiput(4.169,54.515)(-.0335031,-.0544403){17}{\line(0,-1){.0544403}} \multiput(3.599,53.59)(-.0330891,-.0593132){16}{\line(0,-1){.0593132}} \multiput(3.07,52.641)(-.0325551,-.0647199){15}{\line(0,-1){.0647199}} \multiput(2.582,51.67)(-.031881,-.070772){14}{\line(0,-1){.070772}} \multiput(2.135,50.679)(-.033627,-.084083){12}{\line(0,-1){.084083}} \multiput(1.732,49.67)(-.032725,-.093213){11}{\line(0,-1){.093213}} \multiput(1.372,48.645)(-.031576,-.103981){10}{\line(0,-1){.103981}} \multiput(1.056,47.605)(-.030108,-.11693){9}{\line(0,-1){.11693}} \multiput(.785,46.553)(-.032241,-.151858){7}{\line(0,-1){.151858}} \multiput(.559,45.49)(-.029998,-.178615){6}{\line(0,-1){.178615}} \multiput(.379,44.418)(-.03349,-.2696){4}{\line(0,-1){.2696}} \put(.245,43.34){\line(0,-1){1.0832}} \put(.158,42.256){\line(0,-1){2.1726}} \put(.122,40.084){\line(0,-1){1.0855}} \put(.173,38.998){\line(0,-1){1.0823}} \multiput(.272,37.916)(.028878,-.215413){5}{\line(0,-1){.215413}} \multiput(.416,36.839)(.031725,-.178316){6}{\line(0,-1){.178316}} \multiput(.606,35.769)(.033709,-.151539){7}{\line(0,-1){.151539}} \multiput(.842,34.708)(.031238,-.116633){9}{\line(0,-1){.116633}} \multiput(1.124,33.659)(.032581,-.103671){10}{\line(0,-1){.103671}} \multiput(1.449,32.622)(.033625,-.092892){11}{\line(0,-1){.092892}} \multiput(1.819,31.6)(.0317896,-.0773115){13}{\line(0,-1){.0773115}} \multiput(2.232,30.595)(.0325642,-.0704602){14}{\line(0,-1){.0704602}} \multiput(2.688,29.609)(.0331797,-.0644019){15}{\line(0,-1){.0644019}} \multiput(3.186,28.643)(.0336613,-.0589903){16}{\line(0,-1){.0589903}} \multiput(3.725,27.699)(.0321378,-.0511073){18}{\line(0,-1){.0511073}} \multiput(4.303,26.779)(.0324905,-.0470701){19}{\line(0,-1){.0470701}} \multiput(4.92,25.885)(.0327514,-.0433547){20}{\line(0,-1){.0433547}} \multiput(5.575,25.017)(.0329302,-.0399175){21}{\line(0,-1){.0399175}} \multiput(6.267,24.179)(.0330352,-.036723){22}{\line(0,-1){.036723}} \multiput(6.994,23.371)(.0330732,-.0337418){23}{\line(0,-1){.0337418}} \multiput(7.754,22.595)(.0344869,-.0322955){23}{\line(1,0){.0344869}} \multiput(8.548,21.852)(.0392505,-.0337225){21}{\line(1,0){.0392505}} \multiput(9.372,21.144)(.0426906,-.0336124){20}{\line(1,0){.0426906}} \multiput(10.226,20.472)(.0464105,-.0334259){19}{\line(1,0){.0464105}} \multiput(11.108,19.837)(.0504539,-.0331541){18}{\line(1,0){.0504539}} \multiput(12.016,19.24)(.0548752,-.032786){17}{\line(1,0){.0548752}} \multiput(12.949,18.683)(.0597422,-.032308){16}{\line(1,0){.0597422}} \multiput(13.904,18.166)(.0651415,-.0317031){15}{\line(1,0){.0651415}} \multiput(14.882,17.69)(.0766599,-.0333304){13}{\line(1,0){.0766599}} \multiput(15.878,17.257)(.084517,-.032521){12}{\line(1,0){.084517}} \multiput(16.892,16.867)(.093635,-.031499){11}{\line(1,0){.093635}} \multiput(17.922,16.52)(.115985,-.033566){9}{\line(1,0){.115985}} \multiput(18.966,16.218)(.13198,-.032143){8}{\line(1,0){.13198}} \multiput(20.022,15.961)(.152268,-.030246){7}{\line(1,0){.152268}} \multiput(21.088,15.749)(.214792,-.033183){5}{\line(1,0){.214792}} \put(22.162,15.583){\line(1,0){1.0801}} \put(23.242,15.464){\line(1,0){1.0842}} \put(24.326,15.39){\line(1,0){2.1729}} \put(26.499,15.383){\line(1,0){1.0847}} \put(27.584,15.449){\line(1,0){1.0809}} \multiput(28.665,15.561)(.215015,.031702){5}{\line(1,0){.215015}} \multiput(29.74,15.72)(.152473,.029196){7}{\line(1,0){.152473}} \multiput(30.807,15.924)(.132198,.031232){8}{\line(1,0){.132198}} \multiput(31.865,16.174)(.116214,.032766){9}{\line(1,0){.116214}} \multiput(32.91,16.469)(.093849,.030853){11}{\line(1,0){.093849}} \multiput(33.943,16.808)(.084739,.031937){12}{\line(1,0){.084739}} \multiput(34.96,17.191)(.0768878,.0328013){13}{\line(1,0){.0768878}} \multiput(35.959,17.618)(.0700269,.0334858){14}{\line(1,0){.0700269}} \multiput(36.94,18.086)(.0599635,.0318955){16}{\line(1,0){.0599635}} \multiput(37.899,18.597)(.0550998,.032407){17}{\line(1,0){.0550998}} \multiput(38.836,19.148)(.0506812,.0328056){18}{\line(1,0){.0506812}} \multiput(39.748,19.738)(.0466398,.0331053){19}{\line(1,0){.0466398}} \multiput(40.634,20.367)(.0429213,.0333174){20}{\line(1,0){.0429213}} \multiput(41.493,21.034)(.039482,.0334511){21}{\line(1,0){.039482}} \multiput(42.322,21.736)(.0362864,.0335142){22}{\line(1,0){.0362864}} \multiput(43.12,22.473)(.033305,.0335131){23}{\line(0,1){.0335131}} \multiput(43.886,23.244)(.0332876,.0364944){22}{\line(0,1){.0364944}} \multiput(44.618,24.047)(.0332046,.0396896){21}{\line(0,1){.0396896}} \multiput(45.316,24.881)(.0330494,.043128){20}{\line(0,1){.043128}} \multiput(45.977,25.743)(.0328141,.0468451){19}{\line(0,1){.0468451}} \multiput(46.6,26.633)(.0324893,.0508846){18}{\line(0,1){.0508846}} \multiput(47.185,27.549)(.0320632,.0553006){17}{\line(0,1){.0553006}} \multiput(47.73,28.489)(.0336228,.0641717){15}{\line(0,1){.0641717}} \multiput(48.234,29.452)(.033049,.0702341){14}{\line(0,1){.0702341}} \multiput(48.697,30.435)(.0323217,.0770906){13}{\line(0,1){.0770906}} \multiput(49.117,31.437)(.031409,.084937){12}{\line(0,1){.084937}} \multiput(49.494,32.456)(.033294,.103444){10}{\line(0,1){.103444}} \multiput(49.827,33.491)(.032041,.116415){9}{\line(0,1){.116415}} \multiput(50.115,34.539)(.030408,.13239){8}{\line(0,1){.13239}} \multiput(50.359,35.598)(.032953,.178093){6}{\line(0,1){.178093}} \multiput(50.556,36.666)(.030362,.215208){5}{\line(0,1){.215208}} \put(50.708,37.742){\line(0,1){1.0816}} \put(50.814,38.824){\line(0,1){1.9261}} \put(114.637,40.75){\line(0,1){1.0864}} \put(114.614,41.836){\line(0,1){1.0845}} \put(114.544,42.921){\line(0,1){1.0805}} \multiput(114.428,44.001)(-.032442,.214905){5}{\line(0,1){.214905}} \multiput(114.266,45.076)(-.029721,.152371){7}{\line(0,1){.152371}} \multiput(114.058,46.143)(-.031688,.13209){8}{\line(0,1){.13209}} \multiput(113.804,47.199)(-.033166,.1161){9}{\line(0,1){.1161}} \multiput(113.506,48.244)(-.031176,.093743){11}{\line(0,1){.093743}} \multiput(113.163,49.275)(-.032229,.084629){12}{\line(0,1){.084629}} \multiput(112.776,50.291)(-.033066,.0767743){13}{\line(0,1){.0767743}} \multiput(112.346,51.289)(-.0337269,.0699111){14}{\line(0,1){.0699111}} \multiput(111.874,52.268)(-.0321019,.0598532){16}{\line(0,1){.0598532}} \multiput(111.36,53.225)(-.0325967,.0549878){17}{\line(0,1){.0549878}} \multiput(110.806,54.16)(-.03298,.0505679){18}{\line(0,1){.0505679}} \multiput(110.213,55.07)(-.0332658,.0465254){19}{\line(0,1){.0465254}} \multiput(109.58,55.954)(-.0334651,.0428062){20}{\line(0,1){.0428062}} \multiput(108.911,56.81)(-.033587,.0393665){21}{\line(0,1){.0393665}} \multiput(108.206,57.637)(-.0336391,.0361707){22}{\line(0,1){.0361707}} \multiput(107.466,58.433)(-.0336277,.0331893){23}{\line(-1,0){.0336277}} \multiput(106.692,59.196)(-.0366089,.0331616){22}{\line(-1,0){.0366089}} \multiput(105.887,59.926)(-.0398038,.0330676){21}{\line(-1,0){.0398038}} \multiput(105.051,60.62)(-.0432416,.0329006){20}{\line(-1,0){.0432416}} \multiput(104.186,61.278)(-.0469579,.0326525){19}{\line(-1,0){.0469579}} \multiput(103.294,61.899)(-.0509962,.0323137){18}{\line(-1,0){.0509962}} \multiput(102.376,62.48)(-.0554108,.0318724){17}{\line(-1,0){.0554108}} \multiput(101.434,63.022)(-.0642872,.0334015){15}{\line(-1,0){.0642872}} \multiput(100.47,63.523)(-.0703476,.0328068){14}{\line(-1,0){.0703476}} \multiput(99.485,63.982)(-.0772015,.0320559){13}{\line(-1,0){.0772015}} \multiput(98.481,64.399)(-.085045,.031116){12}{\line(-1,0){.085045}} \multiput(97.461,64.773)(-.103558,.032938){10}{\line(-1,0){.103558}} \multiput(96.425,65.102)(-.116525,.03164){9}{\line(-1,0){.116525}} \multiput(95.377,65.387)(-.132494,.029952){8}{\line(-1,0){.132494}} \multiput(94.317,65.626)(-.178206,.032339){6}{\line(-1,0){.178206}} \multiput(93.247,65.82)(-.215312,.02962){5}{\line(-1,0){.215312}} \put(92.171,65.968){\line(-1,0){1.0819}} \put(91.089,66.07){\line(-1,0){1.0853}} \put(90.004,66.126){\line(-1,0){1.0867}} \put(88.917,66.135){\line(-1,0){1.0861}} \put(87.831,66.097){\line(-1,0){1.0835}} \multiput(86.747,66.013)(-.26972,-.03256){4}{\line(-1,0){.26972}} \multiput(85.669,65.883)(-.178717,-.029383){6}{\line(-1,0){.178717}} \multiput(84.596,65.707)(-.151968,-.031718){7}{\line(-1,0){.151968}} \multiput(83.532,65.485)(-.131662,-.033418){8}{\line(-1,0){.131662}} \multiput(82.479,65.217)(-.104089,-.031218){10}{\line(-1,0){.104089}} \multiput(81.438,64.905)(-.093325,-.032403){11}{\line(-1,0){.093325}} \multiput(80.412,64.549)(-.084199,-.033337){12}{\line(-1,0){.084199}} \multiput(79.401,64.149)(-.0708814,-.0316369){14}{\line(-1,0){.0708814}} \multiput(78.409,63.706)(-.0648317,-.0323319){15}{\line(-1,0){.0648317}} \multiput(77.436,63.221)(-.0594269,-.0328845){16}{\line(-1,0){.0594269}} \multiput(76.486,62.695)(-.0545554,-.0333153){17}{\line(-1,0){.0545554}} \multiput(75.558,62.128)(-.0501308,-.0336407){18}{\line(-1,0){.0501308}} \multiput(74.656,61.523)(-.0437807,-.0321797){20}{\line(-1,0){.0437807}} \multiput(73.78,60.879)(-.0403461,-.0324037){21}{\line(-1,0){.0403461}} \multiput(72.933,60.199)(-.0371532,-.0325506){22}{\line(-1,0){.0371532}} \multiput(72.116,59.483)(-.0341729,-.0326277){23}{\line(-1,0){.0341729}} \multiput(71.33,58.732)(-.0327452,-.0340602){23}{\line(0,-1){.0340602}} \multiput(70.576,57.949)(-.0326784,-.0370408){22}{\line(0,-1){.0370408}} \multiput(69.858,57.134)(-.0325425,-.0402342){21}{\line(0,-1){.0402342}} \multiput(69.174,56.289)(-.0323304,-.0436695){20}{\line(0,-1){.0436695}} \multiput(68.528,55.416)(-.0320336,-.0473822){19}{\line(0,-1){.0473822}} \multiput(67.919,54.515)(-.0335031,-.0544403){17}{\line(0,-1){.0544403}} \multiput(67.349,53.59)(-.0330891,-.0593132){16}{\line(0,-1){.0593132}} \multiput(66.82,52.641)(-.0325551,-.0647199){15}{\line(0,-1){.0647199}} \multiput(66.332,51.67)(-.031881,-.070772){14}{\line(0,-1){.070772}} \multiput(65.885,50.679)(-.033627,-.084083){12}{\line(0,-1){.084083}} \multiput(65.482,49.67)(-.032725,-.093213){11}{\line(0,-1){.093213}} \multiput(65.122,48.645)(-.031576,-.103981){10}{\line(0,-1){.103981}} \multiput(64.806,47.605)(-.030108,-.11693){9}{\line(0,-1){.11693}} \multiput(64.535,46.553)(-.032241,-.151858){7}{\line(0,-1){.151858}} \multiput(64.309,45.49)(-.029998,-.178615){6}{\line(0,-1){.178615}} \multiput(64.129,44.418)(-.03349,-.2696){4}{\line(0,-1){.2696}} \put(63.995,43.34){\line(0,-1){1.0832}} \put(63.908,42.256){\line(0,-1){2.1726}} \put(63.872,40.084){\line(0,-1){1.0855}} \put(63.923,38.998){\line(0,-1){1.0823}} \multiput(64.022,37.916)(.028878,-.215413){5}{\line(0,-1){.215413}} \multiput(64.166,36.839)(.031725,-.178316){6}{\line(0,-1){.178316}} \multiput(64.356,35.769)(.033709,-.151539){7}{\line(0,-1){.151539}} \multiput(64.592,34.708)(.031238,-.116633){9}{\line(0,-1){.116633}} \multiput(64.874,33.659)(.032581,-.103671){10}{\line(0,-1){.103671}} \multiput(65.199,32.622)(.033625,-.092892){11}{\line(0,-1){.092892}} \multiput(65.569,31.6)(.0317896,-.0773115){13}{\line(0,-1){.0773115}} \multiput(65.982,30.595)(.0325642,-.0704602){14}{\line(0,-1){.0704602}} \multiput(66.438,29.609)(.0331797,-.0644019){15}{\line(0,-1){.0644019}} \multiput(66.936,28.643)(.0336613,-.0589903){16}{\line(0,-1){.0589903}} \multiput(67.475,27.699)(.0321378,-.0511073){18}{\line(0,-1){.0511073}} \multiput(68.053,26.779)(.0324905,-.0470701){19}{\line(0,-1){.0470701}} \multiput(68.67,25.885)(.0327514,-.0433547){20}{\line(0,-1){.0433547}} \multiput(69.325,25.017)(.0329302,-.0399175){21}{\line(0,-1){.0399175}} \multiput(70.017,24.179)(.0330352,-.036723){22}{\line(0,-1){.036723}} \multiput(70.744,23.371)(.0330732,-.0337418){23}{\line(0,-1){.0337418}} \multiput(71.504,22.595)(.0344869,-.0322955){23}{\line(1,0){.0344869}} \multiput(72.298,21.852)(.0392505,-.0337225){21}{\line(1,0){.0392505}} \multiput(73.122,21.144)(.0426906,-.0336124){20}{\line(1,0){.0426906}} \multiput(73.976,20.472)(.0464105,-.0334259){19}{\line(1,0){.0464105}} \multiput(74.858,19.837)(.0504539,-.0331541){18}{\line(1,0){.0504539}} \multiput(75.766,19.24)(.0548752,-.032786){17}{\line(1,0){.0548752}} \multiput(76.699,18.683)(.0597422,-.032308){16}{\line(1,0){.0597422}} \multiput(77.654,18.166)(.0651415,-.0317031){15}{\line(1,0){.0651415}} \multiput(78.632,17.69)(.0766599,-.0333304){13}{\line(1,0){.0766599}} \multiput(79.628,17.257)(.084517,-.032521){12}{\line(1,0){.084517}} \multiput(80.642,16.867)(.093635,-.031499){11}{\line(1,0){.093635}} \multiput(81.672,16.52)(.115985,-.033566){9}{\line(1,0){.115985}} \multiput(82.716,16.218)(.13198,-.032143){8}{\line(1,0){.13198}} \multiput(83.772,15.961)(.152268,-.030246){7}{\line(1,0){.152268}} \multiput(84.838,15.749)(.214792,-.033183){5}{\line(1,0){.214792}} \put(85.912,15.583){\line(1,0){1.0801}} \put(86.992,15.464){\line(1,0){1.0842}} \put(88.076,15.39){\line(1,0){2.1729}} \put(90.249,15.383){\line(1,0){1.0847}} \put(91.334,15.449){\line(1,0){1.0809}} \multiput(92.415,15.561)(.215015,.031702){5}{\line(1,0){.215015}} \multiput(93.49,15.72)(.152473,.029196){7}{\line(1,0){.152473}} \multiput(94.557,15.924)(.132198,.031232){8}{\line(1,0){.132198}} \multiput(95.615,16.174)(.116214,.032766){9}{\line(1,0){.116214}} \multiput(96.66,16.469)(.093849,.030853){11}{\line(1,0){.093849}} \multiput(97.693,16.808)(.084739,.031937){12}{\line(1,0){.084739}} \multiput(98.71,17.191)(.0768878,.0328013){13}{\line(1,0){.0768878}} \multiput(99.709,17.618)(.0700269,.0334858){14}{\line(1,0){.0700269}} \multiput(100.69,18.086)(.0599635,.0318955){16}{\line(1,0){.0599635}} \multiput(101.649,18.597)(.0550998,.032407){17}{\line(1,0){.0550998}} \multiput(102.586,19.148)(.0506812,.0328056){18}{\line(1,0){.0506812}} \multiput(103.498,19.738)(.0466398,.0331053){19}{\line(1,0){.0466398}} \multiput(104.384,20.367)(.0429213,.0333174){20}{\line(1,0){.0429213}} \multiput(105.243,21.034)(.039482,.0334511){21}{\line(1,0){.039482}} \multiput(106.072,21.736)(.0362864,.0335142){22}{\line(1,0){.0362864}} \multiput(106.87,22.473)(.033305,.0335131){23}{\line(0,1){.0335131}} \multiput(107.636,23.244)(.0332876,.0364944){22}{\line(0,1){.0364944}} \multiput(108.368,24.047)(.0332046,.0396896){21}{\line(0,1){.0396896}} \multiput(109.066,24.881)(.0330494,.043128){20}{\line(0,1){.043128}} \multiput(109.727,25.743)(.0328141,.0468451){19}{\line(0,1){.0468451}} \multiput(110.35,26.633)(.0324893,.0508846){18}{\line(0,1){.0508846}} \multiput(110.935,27.549)(.0320632,.0553006){17}{\line(0,1){.0553006}} \multiput(111.48,28.489)(.0336228,.0641717){15}{\line(0,1){.0641717}} \multiput(111.984,29.452)(.033049,.0702341){14}{\line(0,1){.0702341}} \multiput(112.447,30.435)(.0323217,.0770906){13}{\line(0,1){.0770906}} \multiput(112.867,31.437)(.031409,.084937){12}{\line(0,1){.084937}} \multiput(113.244,32.456)(.033294,.103444){10}{\line(0,1){.103444}} \multiput(113.577,33.491)(.032041,.116415){9}{\line(0,1){.116415}} \multiput(113.865,34.539)(.030408,.13239){8}{\line(0,1){.13239}} \multiput(114.109,35.598)(.032953,.178093){6}{\line(0,1){.178093}} \multiput(114.306,36.666)(.030362,.215208){5}{\line(0,1){.215208}} \put(114.458,37.742){\line(0,1){1.0816}} \put(114.564,38.824){\line(0,1){1.9261}} \qbezier(5.25,25.5)(25.75,40.125)(44.25,24.25) \qbezier(69,25.5)(89.5,40.125)(108,24.25) \put(2.5,23.75){\makebox(0,0)[cc]{$P_{j+1}$}} \put(66.25,23.75){\makebox(0,0)[cc]{$P_{j+1}$}} \put(48.25,21.25){\makebox(0,0)[cc]{$Q_{j+1}$}} \qbezier(1.5,49.5)(25.625,39)(49.25,48.5) \put(47.25,44.25){\makebox(0,0)[cc]{$\Delta_j$}} \put(-.75,51){\makebox(0,0)[cc]{$Y_j$}} \put(52,49.5){\makebox(0,0)[cc]{$X_j$}} \put(25.75,66){\vector(0,-1){50.5}} \put(89.5,66){\vector(0,-1){50.5}} \put(28,56.25){\makebox(0,0)[cc]{$g$}} \put(91.75,56.25){\makebox(0,0)[cc]{$g$}} \put(25.5,12.5){\makebox(0,0)[cc]{$A$}} \put(89.25,12.5){\makebox(0,0)[cc]{$A$}} \put(25.75,68.75){\makebox(0,0)[cc]{$B$}} \put(89.5,68.75){\makebox(0,0)[cc]{$B$}} \qbezier(77.5,18.75)(89.375,40.625)(110.75,54) \put(113.75,55){\makebox(0,0)[cc]{$X_j$}} \put(75.75,15.75){\makebox(0,0)[cc]{$Y_j$}} \put(111.75,20.5){\makebox(0,0)[cc]{$Q_{j+1}$}} \put(109,48.75){\makebox(0,0)[cc]{$\Delta_j$}} \put(25.25,3.75){\makebox(0,0)[cc]{Fig. 3}} \put(89.5,4){\makebox(0,0)[cc]{Fig. 4}} \put(8,31.25){\makebox(0,0)[cc]{$\Delta_{j+1}'$}} \put(70.75,31.25){\makebox(0,0)[cc]{$\Delta_{j+1}'$}} \end{picture} There are two cases to consider. Case 1. $\Omega_{j+1}$ is located at level $j+1$. There is a maximal element $\Delta_{j+1}''\in \mathscr{U}_{j+1}$ such that $\Delta_{j+1}''=\Delta_{j+1}'$. If Figure 3 occurs, then $\Delta_{j+1}''\cap \Delta_{j}\neq \emptyset$, $\partial \Delta_{j+1}''\cap \partial \Delta_j=\emptyset$ and $\Delta_{j+1}''\cup \Delta_{j}=\mathbf{H}$. From Lemma 4 of \cite{CZ1}, we deduce that $d_{\mathcal{C}}(u_{j+1}, u_j)\geq 2$. This is a contradiction. If Figure 4 occurs, then $\partial \Delta_j$ intersects $\partial \Delta_{j+1}''$, which implies that $\tilde{u}_{j+1}$ intersects $\tilde{u}_{j}$. Thus $u_{j+1}$ intersects $u_j$. This again contradicts that $d_{\mathcal{C}}(u_{j+1}, u_j)=1$. Case 2. $\Omega_{j+1}$ is located below level $j+1$. This means that there is a maximal element $\Delta_{j+1}''\in \mathscr{U}_{j+1}$ that contains $\Delta_{j+1}'$. If Figure 3 occurs, then by the same argument as in Case 1, we deduce that $d_{\mathcal{C}}(u_{j+1}, u_j)\geq 2$. If Figure 4 occurs, then either $\partial \Delta_{j+1}''$ crosses $\partial \Delta_j$ or we have $\Delta_{j+1}''\cap \Delta_{j}\neq \emptyset$, $\partial \Delta_{j+1}''\cap \partial \Delta_j=\emptyset$ and $\Delta_{j+1}''\cup \Delta_{j}=\mathbf{H}$. In both cases, by the same argument as in Case 1, we deduce that $d_{\mathcal{C}}(u_{j+1}, u_j)\geq 2$. This again contradicts that $d_{\mathcal{C}}(u_{j+1}, u_j)=1$. We conclude that all $\Omega_{k}$ with $k>j$ lie above level $k$. In particular, $\Omega_{m-1}$ is located above level $m-1$. So there is a maximal element $\Delta_{m-1}\in \mathscr{U}_{m-1}$ such that either $\partial \Delta_{m-1}$ lies above level $m$ or $\partial \Delta_m'$ crosses $\partial \Delta_{m-1}$. In both cases, by the same argument as in Case 1 and Case 2, we assert that $d_{\mathcal{C}}(u_{m-1}, u_m)\geq 2$. It follows that $s\geq m$. \end{proof} Let $\tilde{u}_0\in \mathcal{C}_0(\tilde{S})$ and $\tilde{c}\in \mathscr{S}\backslash \mathscr{S}(2)$ be such that $i(\tilde{u}_0, \tilde{c})=1$. Let $u_0, g$, and $(\tau_0,\Omega_0,\mathscr{U}_0)$ be as before. Then $g$ possesses the property that $\varrho(\mbox{axis}(g))\cap \Omega_0\neq \emptyset$. As an easy consequence of Lemma \ref{L3.1}, we obtain \begin{lem}\label{L3.2} With the above conditions, if $\Omega_j$ is located above level $j$ for some $j$ with $1\leq j\leq s$, then the path $[u_0,\cdots, u_s,u_m]$, where $u_m=f^m(u_0)$ and $f=g^*$, is not a geodesic path. \end{lem} \begin{proof} By Lemma \ref{L3.1}, we assert that $s\geq m$. But from the assumption, we know that $i(\tilde{c},\tilde{u}_0)=1$, which means that $d_{\mathcal{C}}(u_0, f(u_0))=1$, and so for all $j$ with $0\leq j\leq m-1$, $d_{\mathcal{C}}(f^j(u_0), f^{j+1}(u_0))=1$. It follows from the triangle inequality that $d_{\mathcal{C}}(u_0,f^m(u_0))\leq m$. So by the definition, $[u_0,\cdots, u_s,u_m]$ is not a geodesic path. \end{proof} \begin{lem}\label{L3.3} With the same notations as in Lemma $\ref{L3.1}$, suppose that a path $[u_0,u_1,\cdots, u_s,u_m]$ is a geodesic path. Then $i(\tilde{c},\tilde{u}_0)=1$ if and only if all $\Omega_j$ are located at level $j$. \end{lem} \begin{proof} By the same argument as in Lemma \ref{L3.2}, we obtain \begin{equation}\label{LP} d_{\mathcal{C}}(u_0,f^m(u_0))\leq m. \end{equation} If there is $\Omega_{j_0}$ that is located above level $j_0$, then by Lemma \ref{L3.1}, all $\Omega_j$ with $j\geq j_0$ are located above level $j$. By the same argument of Lemma \ref{L3.1}, we conclude that $d_{\mathcal{C}}(u_0,f^m(u_0))\geq m+1$. This contradicts (\ref{LP}). Conversely, if all $\Omega_j$ are located at level $j$, then for $j=0,\cdots, m-1$, $\Omega_j$ is adjacent to $\Omega_{j+1}$. By Lemma 2.1 of \cite{CZ11}, $d_{\mathcal{C}}(u_j,u_{j+1})=1$. Hence $d_{\mathcal{C}}(u_0,f^m(u_0))=m$. By virtue of Lemma \ref{D}, we deduce that $i(\tilde{c},\tilde{u}_0)=1$. \end{proof} \section{Proof of results} \setcounter{equation}{0} \noindent {\em Proof of Theorem $1.2$}: Assume that $d_{\mathcal{C}}(u_0,f^m(u_0))=m$. If $i(\tilde{c},\tilde{u}_0)\geq 2$, then by Lemma \ref{D}, we have $d_{\mathcal{C}}(u_0,f^m(u_0))\geq m+1$. This is a contradiction. This shows that $i(\tilde{c},\tilde{u}_0)=1$. Conversely, suppose $i(\tilde{c},\tilde{u}_0)=1$. Let $[u_0,u_1,\cdots, u_s,u_m]$ be a geodesic segment joining $u_0$ and $u_m$. Then all $u_1,\cdots, u_s$ are non-preperipheral geodesic, which means that $\tilde{u}_1,\cdots, \tilde{u}_s$, are non-trivial, Let $(\tau_j,\Omega_j,\mathscr{U}_j)$ be the configurations corresponding to $u_j$. By Lemma \ref{L3.3}, all $\Omega_j$ where $j=1,\ldots, s$, are located at level $j$. This implies that $\Omega_j$ is adjacent to $\Omega_{j+1}$ for $j=1,\cdots, s-1$. If $s\leq m-2$, then by the same argument of Lemma \ref{D}, $d_{\mathcal{C}}(u_s,u_m)\geq 2$. This is absurd. So $s\geq m-1$. On the other hand, if for all $j=1,2,\cdots, m-2$, $\Omega_j$ is adjacent to $\Omega_{j+1}$, then $\Omega_{m-1}$ is also adjacent to $\Omega_m$, which tells us that $d_{\mathcal{C}}(\chi(\Omega_{m-1}),\chi(\Omega_m))=1$, that is, $d_{\mathcal{C}}(u_{m-1},u_m)=1$. It follows that $s=m-1$. In this case, $$ d_{\mathcal{C}}(u_0,f^m(u_0))=\sum_{j=0}^{m-1}d_{\mathcal{C}}(\chi(\Omega_j),\chi(\Omega_{j+1}))=m. $$ Hence the geodesic segment connecting $u_0$ and $u_m$ is realized by the sequence $\Omega_0, \Omega_1,\cdots, \Omega_m$. Note that $\chi(\Omega_j)=\chi(g^{j}(\Omega_0))=f^j(u_0)$. We conclude that the geodesic segment connecting $u_0$ and $u_m$ is $$ [u_0,f(u_0), f^2(u_0),\cdots, f^{m-1}(u_0), f^m(u_0)]. $$ If there is another geodesic segment $[u_0, v_1,\cdots, v_{m-1}, u_m]$ connecting $u_0$ and $u_m$, then there is $j$, such that $v_j\neq f^j(u_0)$. Since $v_j$ for $j=1,\cdots, m-1$ are non-preperipheral, $\tilde{v}_j$ are all non-trivial geodesics, which allows us to define configurations $(\tau_j', \Omega_j', \mathscr{U}_j')$ corresponding to $v_j$. Then the assumption that $v_j\neq f^j(u_0)$ implies that $\Omega_j'$ is not located at level $j$. By the argument of Theorem 1.2 of \cite{CZ10}, $\Omega_j$ lies above level $j$. From the same argument of Lemma \ref{L3.1}, we conclude that $d_{\mathcal{C}}(u_0,f^m(u_0))\geq m+1$. This leads to a contradiction, proving that the geodesic segment connecting $u_0$ and $u_m$ is unique. \qed \medskip \noindent {\em Proof of Theorem $1.1$}: Assume that $\tilde{c}\in \mathscr{S}\backslash \mathscr{S}(2)$. Choose $\tilde{u}_0\in \mathcal{C}_0(\tilde{S})$ so that $i(\tilde{c},\tilde{u}_0)=1$. Let $u_0\in F_{\tilde{u}_0}$ be such that $\Omega_0\cap \mbox{axis}(g)\neq \emptyset$, where $g\in G$ satisfies the condition $g^*=f$ and $(\tau_0,\Omega_0,\mathscr{U}_0)$ be the configuration corresponding to $u_0$. By Theorem 1.2, for every $m\geq 1$, $[u_0, f(u_0), \cdots, f^m(u_0)]$ and $[u_0, f^{-1}(u_0), \cdots, f^{-m}(u_0)]$ are the unique geodesic segments connecting $u_0, u_m$, and $u_0, u_{-m}$, respectively. We claim that $L_m=[f^{-m}(u_0), \cdots, f^{-1}(u_0), u_0, f(u_0), \cdots, f^m(u_0)]$ is a geodesic segment connecting $f^{-m}(u_0)$ and $f^{m}(u_0)$. Otherwise, the triangle inequality yields that $d_{\mathcal{C}}(f^{-m}(u_0),f^m(u_0))< 2m$. If $L_m$ is not a geodesic segment, then since $f^m$ acts on $\mathcal{C}(S)$ as an isometry with respect to the path metric $d_{\mathcal{C}}$, $f^m(L_m)=[u_0,\cdots, f^{2m}(u_0)]$ would not be a geodesic segment, which contradicts Theorem 1.2. We conclude that $L_m$ is a geodesic path connecting $u_{-m}$ and $u_m$ for all $m>0$. To see that $L_m$ is the only geodesic segment joining $u_{-m}$ and $u_m$, we suppose there are two different geodesic segments $L_m$ and $L_m'$ joining $u_{-m}$ and $u_m$. Then since $f^m$ is an isometry, $f^m(L_m)$ and $f^m(L_m')$ would be two different geodesic segments connecting $u_0$ and $u_{2m}$, and this would contradict the uniqueness part of Theorem 1.2. It is now clear that both $f^{-m}(u_0)$ and $f^{m}(u_0)$ tend to the boundary $\partial \mathcal{C}(S)$ as $m\rightarrow +\infty$, and $$ \mathscr{L}_{u_0}=[\cdots, f^{-m}(u_0), \cdots, f^{-1}(u_0), u_0, f(u_0), \cdots, f^m(u_0), \cdots ] $$ is an invariant bi-infinite geodesic under the action of $f^j$ for any $j$. We then define the map $\mathscr{I}$ by sending $\tilde{u}_0$ to $\mathscr{L}_{u_0}$. Let $u_0'\in F_{\tilde{u}_0}$ be such that $u_0\neq u_0'$ and $\mbox{axis}(g)\cap \Omega_0'\neq \emptyset$. We have $\tilde{u}_0=\tilde{u}_0'$. Hence $\Omega_0'\in \mathscr{R}_{\tilde{u}_0}$. By assumption we have $\mbox{axis}(g)\cap \Omega_0'\neq \emptyset$. Therefore, there is $j\in \mathbf{Z}$ such that $\Omega_0'=g^j(\Omega_0)$. This shows that $\mathscr{L}_{u_0}=\mathscr{L}_{u_0'}$. Thus the map $\mathscr{I}$ is well defined. Assume that $\tilde{u}_0, \tilde{v}_0\in \mathcal{C}_0(\tilde{S})$ be such that $\tilde{u}_0\neq \tilde{v}_0$ and $i(\tilde{c},\tilde{u}_0)=i(\tilde{c},\tilde{v}_0)=1$. The vertices $u_0$ and $v_0\in \mathcal{C}_0(S)$ are so chosen that satisfy (i) $u_0\in F_{\tilde{u}_0}$, $v_0\in F_{\tilde{v}_0}$, and \\ \indent (ii) $\Omega_{u_0}\cap \mbox{axis}(g)\neq \emptyset$ and $\Omega_{v_0}\cap \mbox{axis}(g)\neq \emptyset$. \noindent By Theorem 1.1, we assert that $$ \mathscr{L}_{v_0}=[\cdots, f^{-m}(v_0), \cdots, f^{-1}(v_0), v_0, f(v_0), \cdots, f^m(v_0), \cdots ] $$ is also an invariant bi-infinite geodesic under the action of $f^j$ for any $j$. To show that $\mathscr{I}$ is injective, i.e., $\mathscr{L}_{u_0}\neq \mathscr{L}_{v_0}$, we only need to show that $v_0$ is not a vertex in $\mathscr{L}_{u_0}$. Suppose that $v_0=f^i(u_0)$ for some $m\in \mathbf{Z}$. Then since $f\in \mathscr{F}$, it is isotopic to the identity on $\tilde{S}$ as $x$ is filled in. It follows that $v_0$ is freely homotopic to $u_0$ if $u_0$ and $v_0$ are both viewed as curves on $\tilde{S}$. That is, $\tilde{u}_0=\tilde{v}_0$. This contradicts that $\tilde{u}_0\neq \tilde{v}_0$. The argument above also shows that $\mathscr{L}_{u_0}$ and $\mathscr{L}_{v_0}$ are disjoint bi-infinite geodesics in $\mathcal{C}(S)$. Since $\mathscr{F}^*$ is isomorphic to the fundamental group $\pi_1(\tilde{S}, x)$; it does not contain any elliptic elements. Thus (1) in Theorem 1.1 is a special case of Lemma 2.1. \qed
1,116,691,501,361
arxiv
\section{Introduction} This paper gives a proof of concept practical application of the recently developed statistical integrating decision support system (IDSS) paradigm. An IDSS is developed for policymakers concerned with deciding between candidate policies designed to ameliorate household food insecurity within the UK context of rising food charity use. \subsection {Food Security} Food security exists when all people, at all times, have physical and economic access to sufficient, safe and nutritious food to meet their dietary needs and food preferences for an active and healthy life \citep{FAO1996}. Missing meals and changing diet is a common response to food insecurity, and the latter may persist over extended periods, leading to adverse health effects, especially in children \citep{Seligman2010}. Food insecurity can result in an increased risk of death or illness from stunting, wasting, weakened responses to infection, diabetes, cardiovascular diseases, some cancers, food-borne disease and mental ill health, via insufficient quantity, poor nutritional quality of food, contaminated foods, or social exclusion \cite{Friel2015}. Rising food insecurity has been strongly associated not just with malnutrition, but with sustained deterioration of mental health, inability to manage chronic disease, and worse child health \citep{Loopstra2015a, LoopstraThesis2014}. Food insecurity is associated with hypertension and hyperlipidemia which are cardiovascular risk factors. It is also associated with poor glycaemic control in those with diabetes, whose additional medical expenses exacerbate their food insecurity \citep{NHANESLee2019}. Food insecurity has been found to affect school children’s academic performance, weight gain, and social skills \citep{Faught2017}. Whilst obesity is more prevalent among food-insecure women, controlling for BMI did not attenuate the association of food insecurity and chronic disease \citep{Pan2012}. \subsection{The UK picture} The recent increases in food insecurity the UK is well known through the much-publicised increase in the uptake of humanitarian aid, principally through food banks and their corresponding increase in number \citep{Loopstra2015}. As a nation, the UK is wealthy and one of the world’s most food secure; in 2017 it was 3rd of 113, just after Ireland and the USA \citep{GlobalFoodSecurityIndex} but by December 2019 has declined to 17th place. In 2013, a letter published in the BMJ \citep{Taylor-Robinson2013} on the rise of food poverty in the UK alerted readers to the fact that the number of malnutrition-related admissions to hospital had doubled since 2008/9. When food parcel distribution by the Trussell Trust exceeded one million in 2014/15, this was interpreted by some as evidence that the UK government is not fulfilling its legal duty under the International Covenant on Economic, Social and Cultural Rights \citep{ICESCR} to take appropriate steps to realise the right of everyone to be free of hunger. Year ending March 2019 more than 1.6 million parcels were distributed, and in the six months to September 2019, the number of parcels had risen by 23\% on the previous year \citep{Trussell2019}. Persistent and widespread low pay, the proliferation of zero-hours contracts and rising living costs, especially food prices, have been suggested as contributory factors for the increase in food insecurity, and the health consequences of inadequate diets have also been raised by health professionals \citep{CSI13}. Relative to other advanced western economies, Britain had higher general inflation, higher food, fuel and housing price inflation, lower growth in wages in the years immediately following the 2008 global financial crisis. The UK also has a history of very large numbers of very low paid employees; many of those accessing food banks are in work \citep{APPG2014}. For many years, the exact scale of the problem in the UK was unknown. This was because there was no systematic, national assessment of the numbers of households experiencing food insecurity, but only small-scale studies \citep{Pilgrim2012}, \citep{Tingay2003}. However, from 2016, the Food Standards Agency included the Adult Food Security Module of the USDA Household Food Security Survey (HFSS) \citep{HFSS_adult} in the bi-annual Food and You Survey. The HFSS contains 10 items for households without children and 18 items for households with children (age 0 - 17) to assess their experiences over the last 12 months. The HFSS classifies households as being food insecure when the respondent reports three or more food insecure conditions and as very low food security category if at least one member experienced reduced food intake or if insufficient resources for food disrupted eating patterns. The latest UK survey, Wave 5 (2018) \citep{FoodandYou5}, found that 80\% of respondents lived in households with high food security, 10\% in households classified as marginally food secure, and 10\% reported living in household with low or very low food security. There is more food insecurity amongst families with children: those who lived with children under the age of 16 were less likely than those with no children to have high levels of food security (70\% compared with 84\%). Employment and income are key determinants of food security. Nearly a quarter (23\%) of unemployed people lived in households with very low food security, compared to 4\% of those in work. In the lowest income group, 59\% of households had high food security, increasing with income to 93\% in the highest income households. In households in the lowest income groups, 13\% had very low food security (compared with less than 1\% of those in the highest income households). \subsection {Comparison with USA and Canada} Like the UK, USA and Canada, are wealthy nations with significant household food insecurity. In contrast to the UK, the USA and Canada have undertaken regular monitoring of household food security over many years through the HFSS module within regular household surveys \citep{Canada2016}. This means that research on determinants and rates of food insecurity over time is more advanced and detailed in USA and Canada than in the UK.\\ The USA and Canada are similar to the UK in their profiles of poverty and types of government, which allows us to draw on their research where UK data and evidence is sparse. In 2017-18, and the UK absolute poverty rate was 19.0\%, ranging from 26.5\% among children to 13.5\% among pensioners \citep{UKPoverty2019}. In the USA, the official poverty rate in 2018 was 11.8\%, for children under age 18 it was 16.2\%, for people aged 18 to 64, 10.7\% and for people aged 65+, 9.7\% \citep{USAPoverty2019}. In Canada, the official poverty rate is 9.5\% overall and 9.0\% for children. 3.9\% of seniors were living in poverty in 2017 \citep{CanadaPoverty2017}, although the Market Basket Measure has been criticised for omitting housing and childcare costs. The Canadian Low Income measure, 50\% of median income, adjusted for family size, was 12.9\% in 2017 on an after-tax basis. In 2018 in the USA, 11.1\% of households were food insecure and 4.3\% had very low food security. In Canada it was 12.3\% in 2011, the latest figures available, with 2.5\% of households with very low food security. \citep{LoopstraThesis2014,Tarasuk2010} \begin{table} \caption{Poverty measures across three countries. UK absolute poverty rate measures the fraction of population with household income below 60\% of median income in 2010–11, updated by the Consumer Prices Index. USA Census Bureau uses a set of dollar value thresholds that vary by family size and composition to determine poverty. Canada uses the Market Basket Measure, the concept of an individual or family not having enough income to afford the cost of a basket of goods and services.\label{table:Pov}} \centering \begin{tabular}{|l c c c|} \hline & UK & USA & Canada \\ \hline Overall & 19.0\% & 11.8\% & 9.5\% \\ Child Poverty & 26.5\% & 16.2\% & 9.0\% \\ Working adults with no children & 16.4\% & -- & -- \\ Adults 18-64 & -- & 10.7\% & -- \\ Pensioners & 13.5\% & 9.7\% & 3.9\% \\ Food security low (very low) & 10.0\% & 11.1\% (4.3\%) & 12.3\% (2.5\%)\\ \hline \end{tabular} \end{table} \subsection{Need for decision support} There is a need to gather what information does exist for the UK in order to ascertain the principal drivers of household food security to support policy-makers to design policy to tackle food security and to evaluate other policies which may impact on food security.\\ In ever-larger dynamic systems, such as the food security, it is increasingly difficult for decision makers to effectively account for all the variables within the system that may influence the outcomes of interest under enactments of various given policies. In particular, government policies on welfare, farming, the environment, employment, health, etc. all have an impact on food security at various levels. Each of the influencing variables are likely, themselves, to be dynamic sub-systems with domain expertise, often supported by sophisticated probabilistic models. Within the food system, examples of these are medium to long range weather forecasting which influences food supply which might be large numerical models, and economic models such as autoregressive or moving average which estimate the behaviour of global markets and prices under various plausible scenarios. The emerging crisis in the UK is not merely a matter for charity, but of great concern to policymakers, who are legally and morally obligated to act, but may lack recent experience in dealing with needs of this kind and scale, and so require decision support. This paper proposes an integrating decision support system (IDSS) \citep{SmithBarons2015, Barons2017} for household food security in the UK. The IDSS is a computer-based tool which integrates uncertainties of different parts of a complex system and addresses the decision problem as a whole. \subsection{Practical considerations} In \cite{Barons2017}, we detail the iterative manner of the development of an IDSS with its decision-makers and expert panels. Before the elicitation starts it is always necessary to do some preparatory work. With the help of various domain experts, the analyst will need to trawl any relevant literature and check which hypotheses found there might still be current. We repeatedly review the qualitative structure of the IDSS in light of the more profound understanding of the process acquired through more recent elicitation. This modification and improvement continues until the decision centre is content that the structure is requisite \citep{Phillips1984}. Since the process of model elicitation is an iterative one, it is often wise to begin with some simple utility measures, proceed with an initial structural model elicitation, and then to revisit the initial list of attributes of the utility; detailed exploration of the science, economics or sociology can prompt the decision centre to become fully aware of the suitability of certain types of utility attribute measures. By focusing the centre and its expert panels on those issues that really impact on final outcomes we can vastly reduce the scope of a potentially enormous model; only those features that might be critical in helping to discriminate between the potential effectiveness of one candidate policy against another are required. If there is strong disagreement about whether or not a dependency exists in the system then we assume initially that a dependency does exist, except where the consensus is the its effect is weak. Further iterations of the model building process usually clarify the understanding, and if not, a sensitivity analysis can usually distinguish a meaningful inclusion form others. The decision centre also need to decide what time step is the most natural one to use for the purposes of the specific IDSS. This choice depends on the speed of the process, how relevant data is routinely collected on some of the components, and some technical acyclicity assumptions that are typically known only to the decision analysts. There may be conflict between the granularity of informing economic models of the process, sample survey regularity, and the needs of the system. The granularity needed is driven by the granularity of the attributes of the utility. In addition, decision analysts need to match precisely the outputs of a donating panel with the requirements of a receiving panel. When these do not naturally align, then some translation, possibly a bespoke model, may be needed between them. When expert panels design their own systems, sometimes the internal structure of one component can share variables with the internal structure of another. So, for example, flooding could disrupt both the production of food and its distribution and yet these might be forecast using different components. In such cases, the coherence of the system will be lost and the most efficient way to ensure ongoing coherence is to separate out the shared variables and ask the panels concerned to take as inputs, probability distributions from the expert panel in the shared variable, flood risk. One element of these IDSS systems is the way they can appropriately handle uncertainties associated with various modules. This is vital to reliable decision making. For example if the inputs from one module are very speculative - and so have a high variance - Then policies that work well over a wide range of such inputs will - under the sorts of risk averse decisions we have here - ted to be preferred to ones whose efficacy is very sensitive to such inputs. That is why we need conditional inputs to communicate such uncertainties. \section{Integrating decision support systems\label{DDSS}} Integrating Decision Support systems are introduced in \cite{SmithBarons2015} and \cite{Smith2016} and briefly reviewed in section 2.1. The IDSS aids decision makers in the understanding of a problem by providing a clear evaluation and comparison of the possible options available. It combines expert judgement with data for each subsystem resulting in a full inferential procedure able to represent complex systems. However, decision support systems often require sophisticated architectures and algorithms to calculate the outputs needed by the decision-makers to inform policy selection when the system is composed of many multi-faceted stochastic processes. There is currently no generic framework or software which is capable of faithfully expressing underlying processes for the scale of problems under consideration here, nor sufficiently focused to make calculations quickly enough for practical use in a dynamic, changing environment. In this application, the framework knitting together the different component subsystems in the IDSS is the dynamical Bayesian Network \citep{West97}. In particular, the model can be seen as a multi-regression dynamic model (MDM) \citep{Queen1993}. Here this framework is extended to allow variances to vary stochastically over time. The assumed approach is suitable because regression models are well understood but we need to allow for the fact that within this application regression coefficients can drift in time. The dynamical model also allows for separability of the different components of the series. A simulation algorithm is developed which enables decision making to be fast and dynamical over time even for a large system with many dependent variables and time points with nonlinear characteristics. Using the MDM, we can model shocks to the system within the given framework by introducing change point. This sort of property is exploited in the brain imaging \citep{Costa2019}. Within each of the expert panels lies a complex sub-network of variables. We seem to a BN/DBN for all the modules since these are a very well developed method used in main analogous applications and have supporting software easily available. In Section \ref{DDSS2}, the integrating decision support system methodology is briefly reviewed. Section \ref{IDSSFood} details the model and variables used for utility computation in the context of food security in the UK. Then Section \ref{Out} presents the outputs and policy evaluation for the food security system. We end the paper with a short discussion of our findings and the planned next steps in this research programme. \subsection{Technical underpinning \label{DDSS2}} In this section, we briefly review these recent methodological developments to support inference for decision support as they apply here. Full details and proofs are provided in \citep{Smith2016}. Consider a vector of random variables relevant to the system $\mathbf{Y}=(Y_1,\ldots, Y_n)$. Typically, there are expert panels with expertise in particular aspects of the multivariate problem. The most appropriate expert panels for each sub-system are identified, each sub-panel will defer to the others, adopting their models, reasoning and evaluations as the most appropriate domain experts. Each expert panel, $G_i$, is responsible for a subvector $\mathbf{Y_{B_i}}$ of $\mathbf{Y}$, with $B_1,\ldots,B_m$ a partition of $1,\ldots,n$. The multivariate problem is then decomposed in sub-models. The joint model thus accommodates the diversity of information coming from the different component models and deals robustly with the intrinsic uncertainty in these sub-models. Decisions $d\in \mathcal{D}$ will be taken by a decision maker (DM) where $\mathcal{D}$ represents the set of all policy options that it plans to consider. In the context of large problems like this, the decision-maker is often a centre composed of several individuals. These individuals are henceforth assumed to want to work together constructively and collaboratively supported by using a probabilistic decision tool that can provide a benchmark evaluation of $d\in \mathcal{D}$ the underlying processes that drive the dynamics of the unfolding scenario. However, to use the Bayesian paradigm, we would like to assume that this centre will strive to act an a single rational person would when that person is the owner of the beliefs expressed in the system and so the need for coherence is satisfied. The DM receives information from each panel and reaches a conclusion that depends on a reward function $R(\mathbf{Y},d)$, $\mathbf{Y}\in R_Y$, $d\in {\mathcal{D}}$. For this level of coherence, we must be able to configure the panels and their relationships so that certain assumptions are satisfied. Below we briefly outline what these assumptions need to be. More generic descriptions can be found in \citep{Smith2016}. We introduce some notation: For each $i=1, \ldots,m$ let the subvector $\mathbf{Y_{B_i}}$ be delivered by $G_i$ depend on a function ${\mathcal{L}}_i(\mathbf{Y}_{B_i})$. Each panel $G_i$ provides a model $\mathbf{Y}_{B_i}\mid {\mathcal{L}}_i(\mathbf{Y}_{B_i}),\boldsymbol{\theta}_{B_i},d$, and prior information about $\boldsymbol{\theta}_{B_i}$. Each panel $G_i$ will deliver summaries denoted by $S_i^y({\mathcal{L}}(Y),d)$ which are expectations of functions of $Y$ conditional on the values of ${\mathcal{L}}(Y)$ for each decision $d\in {\mathcal{D}}$. Let $U(R(\mathbf{Y},d))$ be the utility function for decision $d\in \mathcal{D}$. Our main goal is to compute the expected utilities $\{\bar U(d):\; d\in {\mathcal{D}}\}$ which represents the expected utilities of a decision maker. To be formally valid, any IDSS must respect a set of common knowledge assumptions shared by all panels and which comprises the union of the utility, policy and structural consensus, described as follows. \begin{enumerate} \item {\bf Structural consensus:} The structural consensus requires that all the experts agree, in a transparent and understandable manner, the qualitative structure of the problem in terms of how different features relate to one another and how the future might unfold within the system. Formally, these can be couched in terms of sets of irrelevance statements. We propose such a structure in \ref{UKDBN}. There needs to be an agreed narrative of what might happen within each component of the system, based on best evidence. Also for each component, there needs to be a quantitative evaluation of how the critical variables might be affected by the developing environment when appropriate mitigating policies are applied. Where there are agreed sets of irrelevance statements, and the semigraphoid axioms are assumed to hold \citep{Smith2010}, these can be used to populate the common knowledge framework belonging to a decision centre. \item {\bf Utility consensus:} requires all to agree \emph{a priori} on the class of utility functions supported by the IDSS and the types of preferential independence across its various attributes it will need to entertain (such as value independence, mutually utility independent attributes \citep{Keeney1993} and more sophisticated versions, see \cite{Leonelli2015}. Sections \ref{UF1} and \ref{UF2} give details of the multiattribute utility, its measurement and rationale. \item {\bf Policy consensus:} must be sufficiently rich to contain a set of policies that might be adopted and an appropriate utility structure on which the efficacy of these different policies might be scrutinised. \item {\bf Adequate:} An adequate IDSS will be able to unambiguously calculate expected utility score for each policy that might be adopted on the basis of the panels' inputs; if it has this property the IDSS is called adequate. Note that it should be immediate from the formulae of a given probabilistic composition to calculate these expectations whether or not the system is adequate (see \cite{Smith2016} for an illustrative example). \item {\bf Sound:} A sound IDSS is one which is both adequate and allows the decision-maker, by adopting the structural consensus, to admit coherently all the underlying beliefs about a domain overseen by a panel as her own, and so accept the summary statistics donated by the panels to the IDSS. \item {\bf Distributive:} For such a system to be formal and functional, each component panel can reason autonomously about those parts of the system they oversee and the centre can legitimately adopt their delivered judgements as its own. The semigraphoid axioms provide means to satisfy this requirement and panel autonomy liberates each panel of domain experts to produce their quantitative domain knowledge in the way most appropriate for their domain and using their own choice of probability models. They can update their beliefs through any models they might be using and continually refine their inputs to the system without disrupting the agreed overarching structure and its quantitative narrative. \item {\bf Separately informed:} An essential condition for panel autonomy is that panel are separately informed. This requirement can be subdivided within a Bayesian framework into two conditions - prior panel independence and separable likelihood - using the usual properties of conditional independence. The first of these is a straightforward generalisation of the global independence assumption within Bayesian inference \citep{Cowell1999}. The second, the assumption that the collection of data sets gives a likelihood that separates over subvectors of panel parameters, is far from automatic and is almost always violated when there are unobserved confounders or missing data. In such circumstances, one approach is to devise appropriate approximations. \item {\bf Admissibility protocols:} Another approach is to impose an admissibility protocol on the information used to make inferences within the system, analogous to quality of evidence rules within Cochrane Database of Systematic Reviews. When data is derived from well-designed experiments, randomisation and conditioning often leads to a likelihood which is a function only of its own parameters, so trivially separates. When there is a consensus that a quantitative causal structure is a \emph{causal} Bayesian network, dynamic Bayesian network, chain event graph or multiprocess model and the IDSS is sound (delegable, separately informed and adequate), then the IDSS remains sound under a likelihood composed of ancestral sampling experiments and observational sampling \citep{Smith1997}. \item {\bf Transparent:} In such a distributive framework, any query made by another panellist or an external auditor can be referred to the expert panel donating the summaries in question which can provide a detailed explanation of its statistical models, data, expert judgements and other factors informing how its evaluation have been arrived at and why the judgements expressed are appropriate. \end{enumerate} For a distributive IDSS, the question then becomes precisely which information each of the panels needs to donate about their areas of expertise for the maximum utility scores to be calculated. Provided that the utility function is in an appropriate polynomial form, each panel need deliver only a short vector of conditional moments and not entire distributions because this type of overarching framework embeds collections of conditional independences allowing the use of tower rule recurrences \citep{Leonelli2015}. This facilitates fast calculations and propagation algorithms to be embedded within the customised IDSS for timely decision-making. In such a system, individual panels can easily and quickly perform prior to posterior analyses to update the information they donate when relevant new information comes to light and this can be propagated to update the expected utility scores; this quality is especially useful within decision support for an emergency, but in any circumstances represents a huge efficiency gain over having to rebuild and re-parameterise a large model. There are a number of frameworks which satisfy the requirements of the IDSS properties, including staged trees, Bayesian Networks, Chain graphs, Multiregression dynamic models and uncoupled dynamic BNs. The paradigm outlined here will be illustrated throughout the remainder of the paper through a proof of concept application to an IDSS for government policy for household food security in the UK, using a Bayesian network as the overarching framework. \subsection{BN and Dynamical BN\label{subBN}} Bayesian networks (BNs) and their dynamic analogues are particularly suited to the role of decision support as they represent the state of the world as a set of variables and model the probabilistic dependencies between the variables. They are able to build in the knowledge of domain experts, provide a narrative for the system and can be transparently and coherently revised as the domain changes. A Bayesian network is formally defined as a directed acyclic graph (DAG) together with a set of conditional independence statements having the form A is independent of B given C written $A \perp B|C$. They are a simple and convenient way of representing a factorisation of a joint probability density function of a vector of random variables $\mathbf{Y}=(Y_1, Y_2, \ldots, Y_n)$. Each node has a conditional probability distribution, which in the case of discrete variables will be conditional probability tables (CPTs). In this model, ${\mathcal{L}}_i(\mathbf{Y}_{B_i})=\mathbf{Y}_{\Pi_i}$, with $\Pi_i$ the indices of parents of $Y_i$. The joint density of $\mathbf{Y}$ may be written as $$f(\mathbf{y}\mid d)=\prod_{i\in [n]}f_i(y_{B_i}\mid y_{\Pi_{B_i}},d).$$ Assume that $U(R(\mathbf{Y},d))=\sum_{i\in [m]} k_i \; U_i(R_i(\mathbf{Y}_{B_i},d))$. Thus the expected utility is given by $\bar U(d)=\sum_{i\in [n]} k_i \; \bar U_i(d\mid y_{\Pi_i})$, with $$\bar U_i(d\mid y_{\Pi_i})=\int_{\Theta_{B_i}}\int_{R_{y_{B_i}}} U_i(R_i(y_{B_i},d))\; f_i(y_{B_i}\mid y_{\Pi_i},\theta_{B_i},d)\; \pi_i(\theta_{B_i}\mid d) dy_{B_i}d\theta_{B_i}.$$ Dynamic Bayesian networks are able to accommodate systems which change over time \citep{Dean1990}. DBNs are a series of BNs created for different units of time, each BN called a time slice. The time slices are connected through temporal links to form the full model. DBNs can be unfolded in time to accommodate the probabilistic dependencies of the variables within and between time steps. It is usually assumed that the configuration of the BN does not change over time, i.e. the dependencies between variables are static. Consider the general setting such that \begin{equation} \mathbf{Y}_{it} \perp \mathbf{Y}_{Q_i}^ t\mid \mathbf{Y}_{\Pi_i}^ t,\mathbf{Y}_i^{t-1},\; i=1,\ldots, n, \end{equation} with $\{\mathbf{Y}_t:\; t=1,\ldots,T\}$ a multivariate time series composing a DAG whose vertices are univariate processes and $\Pi_i $ the index parent set of $Y_{it}$ and $\mathbf{Y}_i^ t=(Y_{i1},\ldots,Y_{it})'$ the historical data. Thus, the model assumes that each variable at time t depends on its own past series, the past series of its parents and the value of its parents at time t. This results in the joint density function \begin{equation} f(\mathbf{y})=\prod_{t=1}^{T}\prod_{i=1}^{n}f_{i,t}(y_{it}\mid y_{\Pi_i}^ t,y_i^{t-1}). \end{equation} The observation and system equations are defined as \begin{eqnarray}\nonumber Y_{it} & = & F_{it} \theta_{it} + \epsilon_{it},\\ \nonumber \theta_{it} & = & G_{it} \theta_{i,t-1} + \omega_{it}, \end{eqnarray} with $\epsilon_{it}\sim N[0,V_{it}]$ and $\omega_{it}\sim N[0,W_{it}]$. The errors are assumed to be independent of each other and through time and $F_{it}$, $G_{it}$ are assumed to be known. Given the initial information, $\theta_{i0}\mid {\mathcal{I}}_0\sim N[m_{i0},C_{i0}]$. The parameters $\theta_{it}$, $i=1,\ldots,n$ may be updated independently given the observations at time $t$. Conditional forecasts may also be obtained independently. These results are proved in \cite{Queen1993} assuming Gaussian distributions for the error terms. The predictive density is given by \begin{eqnarray}\nonumber f(\mathbf{y}_t\mid \mathbf{y}^ {t-1})& = & \int_{\Theta} f(\mathbf{y}_t\mid \mathbf{y}^ {t-1},\theta_t)\; \pi(\theta_t\mid \mathbf{y}^ {t-1})\; d\theta_t\\ \nonumber & = & \prod_{i=1}^{n}\int_{\Theta_i} g_{it}(y_{it}\mid y_{\Pi_i}^t,y_i^ {t-1},\theta_{it})\pi_i(\theta_{it}\mid y_{\Pi_i}^{t-1},y_i^ {t-1})d\theta_{it}. \end{eqnarray} Let $\textbf{D}_{t}=(\mathbf{y}_t,\textbf{D}_{t-1})$ be the information available at time t. Inference about ${\boldsymbol{\theta}}_t$ is based on Forward filtering equations to obtain posterior moments at time $t$. \begin{itemize} \item[--] Posterior distribution at time $t-1$: $ {\boldsymbol{\theta}}_{i,t-1}\mid \textbf{D}_{t-1} \sim N[\textbf{m}_{i,t-1},C_{i,t-1}]; $ \item[--] Prior distribution at time $t$: $ {\boldsymbol{\theta}}_{it}|\textbf{D}_{t-1} \sim N[\textbf{a}_{it},R_{it}],$ \noindent with $\textbf{a}_{it} = G_{it}\textbf{m}_{i,t-1}$ and $R_{it}=G_{it}C_{i,t-1}G'_{it}+W_{it} ;$ \item[--] One step ahead prediction: $ \textbf{y}_{it}\mid \textbf{y}_{\Pi_i,t}, \textbf{D}_{t-1} \sim N[\textbf{f}_{it},Q_{it}], $ \noindent with $\textbf{f}_{it} = \textbf{F}'_{it}\textbf{a}_{it}$ and $Q_{it}=\textbf{F}'_{it}R_{it}\textbf{F}_{it}+V_{it}$; \item[--] Posterior distribution at time $t$: ${\boldsymbol{\theta}}_{it}\mid \textbf{D}_{t} \sim N[\textbf{m}_{it},C_{it}],$ \noindent with $\textbf{m}_{it} = \textbf{a}_{it}+\textbf{A}_{it}\textbf{e}_{it}$ and $C_{it}=R_{it}-A_{it}Q_{it}A'_{it}$ and $\textbf{e}_{it}=\textbf{y}_{it}-\textbf{f}_{it}$, $A_{it}=R_{it}\textbf{F}_{it}Q_{it}^{-1}$. \end{itemize} If data is observed from time $1$ to $T$ then backward smoothing may be used to obtain the posterior moments of $\theta_{it}\mid D_T$, $t=1,\ldots,T$. Thus, $$\theta_{it}|\theta_{i,t+1},\textbf{D}_T \sim N(\textbf{h}_{it},H_{it}),$$ with $\textbf{h}_{it} = \textbf{m}_{it}+C_{it} G'_{i,t+1}R_{i,t+1}^{-1}(\theta_{i,t+1}-\textbf{a}_{i,t+1})$, $H_{it} = C_{it} - C_{it}G'_{i,t+1}R_{i,t+1}^{-1}G_{i,t+1}C_{it}$ and $\textbf{h}_{iT} = \textbf{m}_{iT}$ e $H_{iT} = C_{iT}$, the initial values. \\ The variance evolution follows \cite{West97} which define $V_{it}=V/\phi_{it}$ and $\phi_{i,t-1}\mid D_{t-1}\sim G(n_{i,t-1}/2,d_{i,t-1}/2)$. The gamma evolution model is given by $$\phi_{it}\mid D_{t-1}\sim Gamma(\delta_i n_{i,t-1}/2,\delta_i d_{i,t-1}/2),$$ with $\delta_i \in (0,1)$ being the discount factors. The posterior distribution at time $t$ is obtained analytically as $\phi_{it}\mid D_t \sim Gamma(n_{it}/2,d_{it}/2)$ with $n_{it}=\delta_i n_{i,t-1}+1$ and $d_{it}=\delta_i d_{i,t-1}+S_{i,t-1}e_{it}'Q_{it}^{-1}e_{it}$, with $S_{i,t-1}= d_{i,t-1}/ n_{i,t-1}$. This conjugacy results in closed-form recurrence updating equations for this variance model. \subsection{Expected utility computation and scenario evaluation} Suppose that $\theta_{1:T}$ was simulated using the Forward filtering and backwards sampling algorithm as described in subsection \ref{subBN}. The predictive posterior distribution for a replicated observation $\tilde y$ is given by \begin{eqnarray}\nonumber f(\mathbf{\tilde y}_{t}\mid \mathbf{y}^ {t})& = & \int_{\Theta} f(\mathbf{\tilde y}_t\mid \mathbf{y}^ {t},\theta_t)\; \pi(\theta_t\mid \mathbf{y}^ {t})\; d\theta_t\\ \nonumber & = & \prod_{i=1}^{n}\int_{\Theta_i} g_{it}(\tilde y_{it}\mid \tilde y_{\Pi_i}^t,\tilde y_i^ {t-1},\theta_{it})\pi_i(\theta_{it}\mid y_{\Pi_i}^{t},y_i^ {t})d\theta_{it}. \end{eqnarray} The predictive distribution of a new observation $\tilde y_{it}$ may be obtained by simulating from $g_{it}(\cdot\mid \tilde y_{\Pi_i}^t,\tilde y_i^ {t-1},\theta_{it})$. If $U(\mathbf{\tilde{y}}_{t},d)$ are linear functions of $\mathbf{\tilde y}_t$ the expected utilities may be computed analytically using chain rules of conditional probabilities. If $U(\mathbf{\tilde{y}}_{t},d)$ is a nonlinear function of $\mathbf{\tilde{y}}_{t}$ then expected values are computed by Monte Carlo integration \citep{Rob04}. Note that some ordering in computing expectations need to be followed, starting from the variables such that $\mathcal{L}_i(\mathbf{Y}_{it})=\emptyset$, their descendants and so on. In addition, the types of overarching descriptions suitable for these applications must be rich enough to explore both the effects of shocks to the system and the application of policies. These can be conveniently modelled through chains of causal relationships, where causal means that there is an implicit partial order to the objects in the system and we assume that the joint distributions of variables not downstream of a controlled variable remain unaffected by that control. The downstream variables are affected in response to a controlled variable in the same way as if the controlled variable had simply taken that value. This is the assumption underlying designed experiments. \section{IDSS: UK Food security\label{IDSSFood}} \subsection{Utility function elicitation} \label{UF1} In every decision support scenario, it is essential to clarify the goals of the decision-maker (DM). Support for household food security is provided in the UK context through Local government, typically city or county councils through their financial inclusion and child poverty policies. The goal of a city or county council in the UK is to fulfil their statutory obligations to the satisfaction of central government. Whenever possible, they wish to go beyond mere compliance and continually improve the lives of the citizens within their geographic region, with a special focus on improving the circumstances of the most disadvantaged. In order to construct an IDSS for food security, the next step is to define the utility function and develop a suitable mathematical form for it. One requirement of the attributes of a utility function is that they must be measurable; it must be possible to say whether an event has happened or a threshold has been reached. One candidate measure of household food security would be data from food bank charities. However, studies have shown that food bank use is not a good measure of food poverty \citep{Tarasuk2009,USDA2016}. In the absence of a direct measure of household food security in the UK, the decision-maker needs a good proxy in order to construct a suitable Utility function. Council officers identified the variables: education, health and social unrest as suitable attributes of a utility. In constructing a utility function based on these attributes, it appeared appropriate to assume value independence \citep{Keeney1993}. Let $Z_1=$measures of education, $Z_2=$measures of health, $Z_3=$Measures of social unrest, $Z_4=$cost of ameliorating policies to be enacted.. The forms of the marginal utility functions then needed to be specified. For social unrest, health and education was assumed exponential, whilst the utility on cost was assumed linear. It was therefore decided that one family of appropriate utility functions might take the form: \begin{equation} U(z)=a+bz_4+\sum_{i=1}^3 1-exp({-c_i z_i}), \label{Eq:WarwickshireUtility} \end{equation} where $z=(z_1, z_2, z_3, z_4)$ and whose parameters $(a, b, c_1, c_2, c_3)$ were then elicited. As follows, observable variables are defined as proxies for the attributes required to compute the utility function in (\ref{Eq:WarwickshireUtility}). \subsection{Measuring the attributes in the utility function} \label{UF2} The utility function depends on the proxy variables of health and education which are defined as follows.\\ \textbf{Health:} Suppose the expert panellists define a proxy as a function of number of admission to hospital with diagnosis of malnutrition (primary or secondary) and number of deaths with malnutrition listed on the death certificate either as primary or secondary cause. Admissions data are available in the Hospital Episode Statistics (HES) from the UK government's Health and a Social Care Information Service which routinely links UK Office for National Statistics (ONS) mortality data to HES data. In the UK, the number of deaths caused primarily by malnutrition are very low and rates are not significantly different over time. Besides, malnutrition is usually accompanied by other diagnoses such as diseases of digestive system, cancers, dementia and Alzheimer’s disease. Thus, the increase of deaths with malnutrition as a contributory factor might be due to ageing of the population and not due to food insecurity. Regarding admissions with malnutrition even the primary diagnosis numbers have increased over time with 391 in 2007-08 and 780 in 2017-18. Thus, in this work we considered the primary and secondary admission cases as a proxy for the health variable. Thus, the variable Health is defined as the count of finished admission episodes with a primary or secondary diagnosis of malnutrition coded ICD-10. A ICD-10 code of malnutrition on the episode indicates that the patient was diagnosed with, and would therefore being treated for malnutrition during the episode of care. \\ \textbf{Education:} The proxy for education could be defined as a function of educational attainment such as the proportion of pupils achieving expected grades in key stages 1, 2 and 4. Even though educational attainment is published annually at local and national levels by the UK government's Department for Education, the score system has changed in previous years and temporal comparisons are not adequate \citep{DeptEduc14}. Thus, as a proxy for education and its relation to food security we considered the proportion of pupils at the end of key stage 4 who were classified as disadvantaged. Thus, the variable Education is measured as the percentage of pupils at Key Stage 4 who were classified by the Department for Education as disadvantaged including pupils known to be eligible for free school meals (FSM) in any spring, autumn, summer, alternative provision or pupil referral unit census from year 6 to year 11 or are looked after children for at least one day or are adopted from care. Before 2015 this classification considered those who have been eligible for Free School Meals at any point in the last 6 years and Children who are ‘Looked After’. In 2015 this definition was widened to also include those children who have been ‘Adopted From Care’. Pupils classified as disadvantaged have a lower average educational attainment record than other pupils and there is a direct correlation between level of qualification and unemployment in later life; Poor educational attainment is strongly correlated with teenage pregnancy, offending behaviour, and alcohol and drug misuse. Comparisons between educational attainment for disadvantage and other pupils indicate a difference of 4.07 (2010/2011) and 3.66 (2016/2017) in the attainment gap index for Key stage 4 for state funded schools in England. The gap index are scores measuring the differences between the disadvantaged and non-disadvantaged groups in Key level 2 and 4 \citep{DeptEduc14}. The index is the mean rank for all the disadvantaged and non-disadvantaged pupils divided by the number of pupils in each cohort. This decimal mean rank difference is scaled to 10 and ranges from 0 to 10, where a higher value means a higher attainment of non-disadvantaged compared to disadvantaged pupils. The index aims to be resilient to changes in the grading systems and in the assessments and curricula, and may be used for temporal comparisons.\\ \textbf{Social Unrest:} Inadequate food security can cause food riots \citep{Lagi2012a}. In the UK, a riot is defined by section 1(1) of the Public Order Act 1986 as where 12 or more persons who are present together use or threaten unlawful violence for a common purpose and the conduct of them (taken together) is such as would cause a person of reasonable firmness present at the scene to fear for his personal safety, each of the persons using unlawful violence for the common purpose is guilty of riot. Riot data is collected by the police. Whilst the likelihood of a food riot is small in the UK currently, post-riot repairs both to physical environment and community relations can be considerable. \\ \textbf{Costs} Costs of candidate intervention policies are routinely calculated and form part of the decision-making process. Indeed, as a response to falling budgets, decision makers might revise the criteria for assistance of various kinds, for instance by making the eligible cohort smaller. Interventions which are effective but budget-neutral or cost-saving are obviously preferred, however, when the benefit of intervention may not be seen within the same financial year, this would form part of the decision-makers' discussion after the policies had been scored. This is the approach we take here, by scoring the policies and leaving the costs for final discussions of decision makers. \subsection {Structure of the IDSS} Having found a parsimonious form of utility function, we are able to begin to build the architecture of the supporting structural model. The paradigm we used for this is described in detail in \citep{Smith2010}. The method involves first eliciting those variables which directly influence the attributes of the utility function, then the variables which affect those variables and so on until a suitable level of detail has been obtained. This was effected using an iterative process, drawing on the food poverty literature and checking with domain experts, refining and repeating. In particular, the general framework was confirmed by work produced independently in \cite{LoopstraThesis2014}. The variables and their dependencies for the UK food system are shown in Figure \ref{UKDBN} There are a range models which can be used for the overarching model of an IDSS, as listed in \cite{Smith2016}, and for the purpose of the IDSS for food security we selected a dynamic Bayesian network (DBN) as summarised in subsection \ref{subBN}. The structure was assumed to be fixed over time. Figure \ref{UKDBN} illustrates the 16-node DBN obtained through literature and confirmed by the experts. The node food security represents the two variables, health and education, considered in the utility function. \begin{figure}[htb] \centerline{\includegraphics[width=0.9\textwidth]{FoodNet.pdf}} \caption{\footnotesize IDSS proposed for UK food security decision support. } \label{UKDBN} \end{figure} \newpage \clearpage \subsection{Expert panels} Having identified the factors influencing household food security in the UK the next step is to identify the most relevant experts to provide information on these. The panels constituted for such an IDSS will often be chosen to mirror the panels that are already constituted for similar purposes, e.g. in the UK, the Office for Budget Responsibility, HM Treasury and The Confederation of British Industry all produce economic forecasts on the UK Economy. Looking at where the relevant information is held gives some very natural panels. The 16-node DBN illustrated in figure \ref{UKDBN} becomes a 9-panel IDSS (figure \ref{FoodDBNTikz}). Panel G2 reports on cost of food given inputs from pane G5 on food supply, incorporating imports and exports, domestic food production and supply chain disruption. Panel G5, in turn, relies on information from G8 the Met office on weather and climate patterns to calculate its expectations of food supply, since both domestic and world production and supply chain disruption are weather related. Household income, G1, impacts directly on the utility. Panel G1 relies on information provided by G3 and G4 to make its predictions under different policy scenarios. G4 adivises on cost of living including energy, housing and other essentials. G3 assesses income taking into account employment, tax and social security, taking inputs from G7 and G9. G7 advised on demography, including single parents, immigrants, disability and those with no recourse to public funds. G9 advises on matters of the economy and informs the oil price panel, G6, and the cost of living panel, G4 as well as G3. \begin{figure}[htb] \centerline{\includegraphics[angle=270,width=0.9\textwidth]{ExpertNet.pdf}} \caption{The expert panels required for this IDSS. Each node represents an expert panel which, using its models and data, provides summaries of expected values and relevant moments under each policy decision being considered.} \label{FoodDBNTikz} \end{figure} \subsection{Dynamical Bayesian Network IDSS for food security} Here we assume plausible models for the expert panels and utility, based on publicly available data. The attributes being measured to compose the food network were obtained at the Office for National Statistics which publishes official statistics for the UK. The time series for all nodes are measured yearly and the temporal window considered goes from 2008 to 2018. Each variable is detailed at Appendix A. \vspace{0.3cm} For the purposes of this proof of concept, social unrest was omitted since there was no available data. The health and education indicators are the attributes in the utility function and are directly affected by household income (HIncome, panel $G_1$) and food costs (CFood, panel $G_2$). The variables are modelled in the log scale as both are percentages or rates. \begin{eqnarray}\nonumber log(Health_t) & = & \delta_{01,t}+\delta_{11,t} HIncome_t+\delta_{21,t} CFood_t +\epsilon_{ht},\\ \nonumber log(Education_t) & = & \delta_{02,t}+\delta_{12,t} HIncome_t+\delta_{22,t} CFood_t +\epsilon_{et}. \end{eqnarray} Panel $G_1$ advises on household income aiming to reflect the amount of money that households have available after accounting for the expendures with living (panel $G_4$), taxes and also the access to credit and benefits (panel $G_3$). \begin{eqnarray*} HIncome_t & = & \theta_{01,t}+\theta_{11,t} Lending_t +\theta_{21,t} Tax_t+\theta_{31,t} Benefits_t+\theta_{41,t} CLiving_t+\epsilon_{1t}. \end{eqnarray*} The variable costs of food (Panel $G_2$) depends on costs of energy (panel $G_6$) and on food supply, imports and exports and food production (panel $G_5$). \begin{eqnarray}\nonumber CFood_t & = & \theta_{02,t}+\theta_{12,t}, FProduction_t +\theta_{22,t} FImports_t +\theta_{32,t} CEnergy_t+ \epsilon_{2t}. \end{eqnarray} Panel $G_3$ reports on variables affecting the income such as lending, tax and unemployment. Unemployment depends on the economic context (panel $G_9$) represented by GDP and on part-time workers (panel $G_7$). \begin{eqnarray}\nonumber Lending_t & = & \theta_{03,t}+\theta_{13,t} Unemployment_t +\epsilon_{3t},\\ \nonumber Tax_t & = & \theta_{03,t}^*+\theta_{13,t}^* Unemployment_t +\epsilon_{3t}^*,\\ \nonumber Benefits_t & = & \theta_{03,t}^{**}+\theta_{13,t}^{**} Unemployment_t +\epsilon_{3t}^{**},\\ \nonumber Unemployment_t & = & \theta_{03,t}^{***}+\theta_{13,t}^{***} \mbox{\it Part-time}_t + \theta_{23,t}^{***}GDP_t +\epsilon_{3t}^{***}. \end{eqnarray} Panel $G_4$ reports on costs of living which depend on costs of food (panel $G_2$), on costs of housing including energy. Costs of housing depends on costs of energy (panel $G_6$). \begin{eqnarray}\nonumber Cliving_t & = & \theta_{04,t}+\theta_{14,t} CFood_t+\theta_{24,t} CHousing_t + \epsilon_{4t},\\ \nonumber CHousing_t & = & \theta_{04,t}^*+ \theta_{14,t}^* CEnergy_t+ \epsilon_{4t}^*. \end{eqnarray} Panel $G_5$ (Food supply) reports on food production and imports which depend on the economic context (panel $G_9$): \begin{eqnarray}\nonumber FProduction & = & \theta_{05,t}+\theta_{15,t} Gdp_t +\theta_{25,t} Imports_t + \epsilon_{5t},\\ \nonumber FImports_t & = & \theta_{05,t}^{*}+\theta_{25,t}^* GDP_t + \epsilon_{5t}^{*}. \end{eqnarray} Panel $G_6$ reports on oil costs and energy given inputs from panel $G_9$ about economic context. \begin{eqnarray}\nonumber COil_t & = & \theta_{06,t}+\theta_{16,t} GDP_t + \epsilon_{6t},\\ \nonumber CEnergy_t & = & \theta_{05,t}^{*}+\theta_{15,t}^{*} COil_t + \epsilon_{5t}^{*}. \end{eqnarray} Panel $G_7$ (Demography), $G_8$ (Weather) and $G_9$ (Economy) reports on demography, weather and economic context, respectively with model equations given by \begin{eqnarray*} log(PartTime_t) & = & \theta_{07,t},+ \epsilon_{7t},\\ Frost_t & = & \theta_{08,t}+ \epsilon_{8t},\\ Gdp_t & = & \theta_{09,t}+ \epsilon_{9t}. \end{eqnarray*} Using these models as the panels' models, we now examine what happens to the utility under an number of scenarios. \section{Model outputs and scenario evaluation\label{Out}} Figure \ref{Fig:fitz} presents the fit and effects of household income and food costs on health and education obtained by recursively updating of posterior moments based on the forward filtering and backward algorithm presented in subsection \ref{subBN}. Notice the negative effect of household income and positive effect of food costs on the rate of malnutrition and percentage of disadvantaged pupils. Figure \ref{Fig:fity} presents the fit for all the variables in the food security network. After fitting the dynamical model, different policies were compared using the IDSS approach described in section \ref{DDSS}. Policy 1 is `do nothing', i.e. all variables kept on the same observed values. Policy 2 accounts for an increase of $25\%$ in the food costs and policy, such as a no-deal Brexit \citep{Barons2020}. Policy 3 represents a decrease of $25\%$ in the food costs, such as through government subsidies. Figure \ref{Fig:u1} presents the posterior utility function for the 3 policies. Small values for the utility is associated with smaller rates of malnutrition and smaller percentage of disadvantaged pupils. The expected value of utility for policies 1, 2 and 3 are 0.2400, 0.2808 and 0.2091, respectively. Policy 4 considers the situation that food costs are reduced by $15\%$ plus household income is increased in $15\%$, through economic or welfare interventions. In this scenario the expected utility is 0.2232. Policy 5 is an agricultural policy leading to a reduced the output of food production (related to prices) by $25\%$ resulting in an expected utility of 0.2161. Note that the last scenario maintains the variables affecting food production as fixed in the observed values and modify the variables lower in the hierarchy such as food costs. \begin{figure}[htb] \begin{center} \begin{tabular}{ccc} \includegraphics[width=3.8cm,height=3.2cm]{HealthFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{Health1.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{Health2.pdf} \\ \includegraphics[width=3.8cm,height=3.2cm]{EducationFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{Education1.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{Education2.pdf} \\ \end{tabular} \caption{Attributes composing the utility function, effects of household income and food costs and MDM fit (mean and $95\%$ credible interval), 2008-2018.}\label{Fig:fitz} \end{center} \end{figure} \newpage \clearpage \begin{figure}[htb] \begin{center} \begin{tabular}{ccc} \includegraphics[width=3.8cm,height=3.2cm]{HIncomeFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{CFoodFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{LendingFit.pdf}\\ \includegraphics[width=3.8cm,height=3.2cm]{TaxFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{BenefitsFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{UnimploymentFit.pdf} \\ \includegraphics[width=3.8cm,height=3.2cm]{CLivingFit.pdf}& \includegraphics[width=3.8cm,height=3.2cm]{CHousingFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{CEnergyFit.pdf} \\ \includegraphics[width=3.8cm,height=3.2cm]{FProductionFit.pdf}& \includegraphics[width=3.8cm,height=3.2cm]{ImportsFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{COilFit.pdf} \\ \includegraphics[width=3.8cm,height=3.2cm]{DisableFit.pdf}& \includegraphics[width=3.8cm,height=3.2cm]{FrostFit.pdf} & \includegraphics[width=3.8cm,height=3.2cm]{GdpFit.pdf} \end{tabular} \caption{Variables composing the food network and dynamical regression model fit (mean and $95\%$ credible interval), 2008-2018.}\label{Fig:fity} \end{center} \end{figure} \newpage \clearpage \begin{figure}[htb] \begin{center} \begin{tabular}{cc} \includegraphics[width=5.8cm,height=5.2cm]{UtilityScenario1.pdf} & \includegraphics[width=5.8cm,height=5.2cm]{UtilityScenario2.pdf} \\ \includegraphics[width=5.8cm,height=5.2cm]{UtilityScenario3.pdf} & \\ \end{tabular} \caption{Utility function posterior distribution.}\label{Fig:u1} \end{center} \end{figure} \section{Discussion and further developments} We have shown a proof of concept IDSS for policymakers concerned with ameliorating household food security in the UK. We have identified the main drivers of food security, drawing partly on research from the USA and Canada where food security has been measured for a number of years and therefore understanding of determinants of household food security are more advanced than in the UK. We have identified plausible expert panels based on UK structures and have constructed models based on publicly available data. We have demonstrated the output of the IDSS under a number of policies. We have assumed equal weighting between health and educational attainment as a proxy for food insecurity. To move form a proof of concept to a working IDSS, we would need to elicit the user preferences for display of the results, as discussed in \citep{Barons2017}. \newpage
1,116,691,501,362
arxiv
\section{Introduction \label{sec_1}} Using local theory, John S. Bell introduced an inequality which is violated by quantum theory. Later experiments showed that quantum theory is basically non-local \cite{BEL_J_S_64}. As the non-locality feature of quantum theory is intensively used in quantum information, Bell type inequalities have received more attention in recent years \cite{EKE_A_91}. Bell original inequality did not have any capability to be studied empirically in the laboratories. After that, Clauser, Horne, Shimony and Holt introduced their famous inequality called CHSH which was reconsidered in laboratories since then. As no experiment is error-free, there was an endeavor to gain a kind of Bell type inequality that would be violated as much as possible, so that it would be experimentally easy to test non-locality feature of quantum theory \cite{CHSH_69}. Svetlichny (in 1987) and Mermin (in 1990) obtained inequalities for tripartite systems which implied stronger violation of local theories \cite{SVE_G_87,MER_N_90}. Also in 1989, Greenberger, Horn, and Zeilinger obtained some inequalities for N-Particle systems ($N>2$) \cite{GHZ_89}. In this article, we introduce a new Bell type expression for tripartite systems with two measurements in each side and two outputs for each measurement. Then, we will show that the violation factor (i.e. the ratio of the value of Bell expression according to quantum theory to its value according to local theory) and the amount of violation (i.e. the difference between the value of Bell expression according to quantum theory and its extermum value according to local theory) of this inequality exceed those of available inequalities \cite{SVE_G_87,MER_N_90,BE_A_KL_D_93}, while its white noise tolerance agrees with the previous results. \section{Tripartite systems \label{sec_2}} We consider a tripartite system consisting of particles $\mathcal{A}$, $\mathcal{B}$ and $\mathcal{C}$. Two possible measurements $A$ and $A'$ are performed on particle $\mathcal{A}$ with outputs $a$ and $a'$ respectively, $B$ and $B'$ on particle $\mathcal{B}$ with outputs $b$ and $b'$ respectively, and finally $C$ and $C'$ on particle $\mathcal{C}$ with outcomes $c$ and $c'$ respectively, where $a,a',b,b',c,c' \in \{0,1\}$. Let $P_L(A,A',B,B',C,C'|a,a',b,b',c,c')$ denotes the triple joint probability that measurements $A$, $A'$ on particle $\mathcal{A}$ result $a$ and $a'$ respectively, measurements $B$, $B'$ on particle $\mathcal{B}$ result $b$ and $b'$ respectively, and measurements $C$ and $C'$ on particle $\mathcal{C}$ result $c$ and $c'$ respectively. It is obvious that: \begin{equation} \sum_{a,a'}\sum_{b,b'}\sum_{c,c'} P_L(A,A',B,B',C,C'|a,a',b,b',c,c') = 1 \label{eq_1} \end{equation} Also let $P(A,B,C|a,b,c)$ denotes the joint probability that measurement A on particle $\mathcal{A}$ results "$a$", measurement $B$ on particle $\mathcal{B}$ results "$b$", and measurement $C$ on particle $\mathcal{C}$ results "$c$". Clearly: \begin{equation} P(A,B,C|a,b,c)= \sum_{a'}\sum_{b'}\sum_{c'} P_L(A,A',B,B',C,C'|a,a',b,b',c,c') \label{eq_2} \end{equation} The normalization of $P$'s implies: \begin{equation} \sum_{a}\sum_{b}\sum_{c} P(A,B,C|a,b,c)=1 \label{eq_3} \end{equation} As it is well known, a Bell type expression, $\Bbb B$, is a linear combination of joint probabilities that is bounded by local theories, i.e. \begin{equation} \Bbb{B}=\sum_{I,J,K,l,m,n}\gamma(I,J,K | l,m,n) P(I,J,K | l,m,n) \label{eq_4} \end{equation} where $I\in \{A,A'\},J\in \{B,B'\},K\in \{C,C'\}, l\in \{a,a'\},m\in \{b,b'\}$ and $n\in \{c,c'\}$. Using equation (\ref{eq_2}) the Bell inequality in terms of $P_L$'s would become: \begin{equation} \Bbb{B}=\sum_{a,a'}\sum_{b,b'}\sum_{c,c'} [\alpha(a,a',b,b',c,c')- \beta(a,a',b,b',c,c')] P_L(A,A',B,B',C,C'|a,a',b,b',c,c'). \label{eq_5} \end{equation} It is clear that \begin{equation} -e \le \Bbb{B} \le f \label{eq_6} \end{equation} where $f$ ($e$) is the greatest of positive real numbers $\alpha$'s ($\beta$'s) in equation (\ref{eq_5}). \section{A New Bell Expression \label{sec_3}} One of the well-know Bell type expressions for tripartite systems is Mermin inequality, which can be expressed as \cite{CKH_08}: \begin{equation} M=| E(A,B',C') + E(A',B,C') + E(A',B',C) - E(A,B,C) | \label{eq_7} \end{equation} where \begin{equation} E(A,B,C) = \langle A,B,C \rangle = \sum_{a}\sum_{b}\sum_{c} (-1)^z P(A,B,C|a,b,c) \label{eq_8} \end{equation} and $P(A,B,C|a,b,c)$ is the joint probability discussed above. In the above equation $"z"$ is the number of zero's resulted in each particular setting \cite{SVE_G_87}. It is shown in \cite{MER_N_90} that Mermin inequality for local theories satisfies \begin{equation} 0 \le M \le 2 \label{eq_9} \end{equation} However, according to quantum theory, the upper bound of Mermin inequality is $4$ which shows that quantum theory is non-local. Here, the violation factor and amount of violation in Mermin inequality are both 2 and the maximum white noise tolerance calculated is 0.5 \cite{CKH_08}. Now let's consider the following inequality for a tripartite system \begin{eqnarray} G & = & P(A,B,C|1,1,1) + 5P(A,B,C|1,0,0) + 5P(A,B,C|0,0,1) + \nonumber \\ & & P(A,B,C|1,0,1) + 4P(A,B,C|0,0,0) + 4P(A,B,C|0,1,0) + \nonumber \\ & & P(A,B',C'|0,0,0) + P(A,B',C'|0,1,1) - 4P(A,B',C'|0,0,1) - \nonumber \\ & & 4P(A,B',C'|0,1,0) - P(A',B',C|0,0,1) - P(A',B',C|1,1,1) - \nonumber \\ & & 4P(A',B',C|0,1,0) - 4P(A',B',C|1,0,0) - 5P(A',B,C'|1,0,0) - \nonumber \\ & & 5P(A',B,C'|0,0,1) + P(A',B',C'|1,1,0) + P(A',B',C'|0,0,1) - \nonumber \\ & & 4P(A',B',C'|1,1,1) - 4P(A',B',C'|0,0,0) \label{eq_10} \end{eqnarray} In appendix \ref{app_1}, it is shown that \begin{equation} G \le 1. \label{eq_11} \end{equation} However for a three-qubit Greenberger-Horne-Zeilinger state~\cite{GHZ_89} which is \begin{equation} \ket{\Psi}_{GHZ} =\frac{1}{\sqrt{2}} (\ket{\uparrow\uparrow\uparrow}_z + \ket{\downarrow\downarrow\downarrow}_z), \label{eq_12} \end{equation} where $\uparrow$ and $\downarrow$ are spin polarization along z axis, and if $A=\sigma_X^A$, $A'=\sigma_Y^A$, $B=\sigma_X^B$, $B'=\sigma_Y^B$, $C=\sigma_X^C$ and $C'=\sigma_Y^C$, $G$ would become \begin{equation} G=\frac{1}{4} + \frac{5}{4} + \frac{5}{4} + 0 + 0+ \frac{4}{4} + \frac{1}{4} + \frac{1}{4} - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 + \frac{1}{8} + \frac{1}{8} - \frac{4}{8} - \frac{4}{8} = \frac{7}{2} \label{eq_13} \end{equation} It is seen that the violation factor and the amount of violation of the inequality (\ref{eq_10}) are 3.5 and 2.5 respectively, whereas the maximum violation factor and maximum amount of violation of the available inequalities so far, are 2. To calculate the white noise tolerance of G in tripartite systems, we consider the following density matrix: \begin{equation} \rho = (1-p)\ket{\Psi}_{GHZ}~{_{GHZ}}\bra{\Psi} + \frac{p}{8}I. \label{eq_14} \end{equation} Obviously \begin{equation} P(A,B,C|a,b,c) = \frac{p}{8} + (1-p) P_{QM}(A,B,C|a,b,c) \label{eq_15} \end{equation} where $P_{QM}(A,B,C|a,b,c)$ is the joint probability according to quantum theory and $p$ is the tolerance of Bell type expression. From equations (\ref{eq_10}) and (\ref{eq_15}), we have: \begin{equation} p = \frac{G_{QM} - G_L}{G_{QM} - \frac{m-n}{8}} \label{eq_16} \end{equation} where $G_{QM}$ is the value of our Bell expression, $G$, according to quantum theory, $G_L$ is its maximum value, according to local theories and m(n) is the number of positive (negative) terms in $G$. It is easily seen that the white noise tolerance of G is 0.5, which agrees with the maximum value calculated up to now. \section{Conclusion \label{sec_4}} In this article we introduced a Bell type inequality for tripartite systems with two measurements for each side and two outputs for each measurement, which is violated by quantum theory with a stronger violation factor and more amount of violation than the available inequalities. In fact, the violation factor and the amount of violation of our inequality are 3.5 and 2.5 respevtively, which are 1.5 and 0.5 more than the results obtained so far, respectively. However the tolerance of our inequality is the same as others. This increment of violation factor and the amount of violation increase the accuracy of experiments in which the errors are inevitable. Also one of the advantages of our inequality is that it includes only 20 different joint probabilities whereas in other works it is much more than this (in Mermin and Svetlichny inequalities it is 32 and 64 respectively). So, our inequality requires less measurements which in turn, reduces the errors due to experiment. See \cite{MOV_H_09}.
1,116,691,501,363
arxiv
\section{Introduction} Let $\Omega \subset \mathbb{R}^{d}$ denote a smooth, strictly convex set with positive sectional curvature. We shall write $\Omega^c := \R^d\backslash \Omega$. It is well-known (see for instance \cite[\S 5]{GST} or \cite[\S4.4]{dyatlovmathematical}) that for any $k>0$ and any $\phi_{in}\in C^\infty (\Sph^{d-1})$, there is a unique solution $u\in C^\infty(\overline{\Omega^c})$ to the Dirichlet problem $$ (\Delta + k^2)u = 0 \qquad u \rvert_{\p \Omega} = 0 $$ such that \begin{equation}\label{eq:defscattering} u(x)=|x|^{-(d-1)/2}\big{(} e^{-i k|x|} \phi_{in}(-\hat{x}) + e^{i k |x|} \phi_{out}(\hat{x}) \big{)} + O_{|x|\rightarrow \infty}(|x|^{-(d+1)/2}), \end{equation} where we write $\hat{x}= \frac{x}{|x|}\in \Sph^{d-1}$ and $\Delta = \sum_{i = 1}^d \p_{x_i}^2$. In particular $\phi_{out}$ is determined by $\phi_{in}$ and we define the \emph{scattering matrix} $S(k)$, which depends on $k$ and $\Omega$, by $$S(k)(\phi_{in}) := e^{i\pi (d-1)/2} \phi_{out}.$$ In fact $S(k)$ extends to a unitary operator acting on $L^2(\Sph^{d-1})$ with the property that $S(k) - \Id$ is trace class \cite{taylor:vol2, RSIII}. Therefore, for any $k>0$, $S(k)$ has purely discrete spectrum, accumulating only at 1, which we denote by $\sigma(S(k)) := \{e^{i \beta_{k,n}} \}$. \textit{Our aim in this paper will be to study the asymptotic distribution of} the $e^{i \beta_{k,n}}$ as $k\rightarrow \infty$. One of our main results is an estimate for the number of phase shifts in a sector $S \subset \mathbb{S}^1 \setminus \{ 1 \}$ as $k \to \infty$. Define the counting function $$ N_k(\phi_0, \phi_1, \Omega) = N_k(\phi_0, \phi_1) := \# \{ e^{i \beta_{k,n}} \in \sigma(S(k)) : \phi_0 < \beta_{k, n} < \phi_1,\ \mathrm{mod}\ 2\pi \}. $$ Letting $\omega_{d-1} = |B^{d-1}|$ where $B^{d-1}$ is the unit ball in $\mathbb{R}^d$, we will prove \begin{equation} \label{eq:number} N_{k}(\phi_{0}, \phi_{1}) = \frac{\omega_{d-1}}{(2\pi)^{d-1}} \Big{(}\frac{\phi_{1} - \phi_{0}}{2\pi} \Big{)} \Vol(\p \Omega) k^{d-1} + o(k^{d-1}). \end{equation} In particular, the phase shifts accumulate in each sector $S$ at a rate proportional to $k^{d-1}$ as $k \to \infty$ times $\Vol(\p \Omega) |S|$. The estimate in \eqref{eq:number} follows from Theorem \ref{th: main theorem}, see Section \ref{sec:proofs}. To study the asymptotic distribution of the phase shifts, consider the measure $\mu_{k}$ on the circle $\mathbb{S}^{1}$, defined for continuous functions $f \colon \mathbb{S}^{1} \lra \mathbb{C}$ by \begin{equation} \label{eq:measure} \langle \mu_{k}, f \rangle = \Big{(} \frac{2\pi}{k}\Big{)}^{d - 1} \sum_{\sigma(S(k))} f(e^{i \beta_{k,n}}). \end{equation} Note that $ \langle \mu_{k}, f \rangle$ is finite if $1 \not \in \supp f$. The following theorem describes the behavior $\mu_k$ as $k\rightarrow \infty$, provided (\ref{eq: hypVol3}) holds, which is a standard assumption on the volume of the periodic points of the inside billiard map. Note that this assumption holds if our smooth convex obstacle, is generic, or is analytic (see the discussion at the end of Section \ref{sec:dynamics}). \begin{theorem}\label{th: main theorem} Let $\Omega\subset \R^d$ be a smooth strictly convex open set, such that (\ref{eq: hypVol3}) holds. Then for any $f \colon \mathbb{S}^{1} \lra \mathbb{C}$ with $\supp f \cap \set{1} = \varnothing$, we have \begin{equation} \label{eq:limit_measure} \lim \limits_{k\rightarrow \infty} \langle \mu_{k}, f \rangle = \frac{\Vol(\p \Omega) \omega_{d-1}}{2\pi} \int_{0}^{2\pi}f(e^{i\theta}) d\theta \end{equation} \end{theorem} \begin{remark} The factor in front of the integral in \eqref{eq:limit_measure} arises as the volume of the `interacting region' in phase space of incoming rays from the sphere at infinity that make contact with the obstacle. See Section \ref{sec:dynamics} for further description of the classical dynamics. In \cite{MR3335243}, in which the first author and collaborators studied the same problem for semiclassical potential scattering, they defined a measure $\mu_h$, depending on a semiclassical parameter $h \to 0$, analogously to the measure in \eqref{eq:measure} except they included the volume of the interacting region. Here we prefer not to, so that the dependence on the interacting region appears explicitly in the limit measure. \end{remark} As an application of the equidistribution of the measure $\mu_k$, we will give an alternative proof of the following result of Majda-Ralston, generalized by Melrose and then by Robert, regarding the asymptotic development of the total scattering phase \begin{equation} \label{eq:scattering-phase} s(k) = i \log \det S(k). \end{equation} The scattering phase $s(k)$ can be defined in a natural way so that $s(k) \in C^{\infty}((0, \infty))$. \begin{theorem}[\cite{MajdaRalston1978, MelroseWeylExterior, Robert1996}]\label{thm:scattering-phase} Let $\Omega$ be a smoothly bounded, strictly convex obstacle whose set of periodic billiard trajectories has measure zero. Then \begin{equation} \label{eq:scattering-phase-asymptotics} s(k) = \frac{\omega_d}{(2\pi)^{d-1}} \Vol(\Omega) k^d + \frac{\omega_{d - 1}}{4 (2\pi)^{d-2}} \Vol(\p \Omega) k^{d -1} + o(k^{d-1}). \end{equation} \end{theorem} In fact, Theorem \cite{MelroseWeylExterior,Robert1996} holds for \textit{all smoothly bounded, compact domains} satisfying the stated assumption on the periodic trajectories. As we describe in Section \ref{sec:proofs}, the novelty in our proof comes from its use of the explicit relationship between the counting function for the Dirichlet eigenvalues, \begin{equation} \label{eq:counting-function} N_D(\lambda_0) := \# \{ 0 < \lambda < \lambda_0 : \exists \phi \in L^2(\Omega),\ \phi\rvert_{\p \Omega} = 0,\ \Delta \phi = - \lambda^2 \phi, \phi \neq 0 \}. \end{equation} and the scattering phase which arises from the spectral duality result of Eckmann-Pillet \cite{EP1995}. Indeed, note that the leading order term in \eqref{eq:scattering-phase-asymptotics} is $2\pi$ times the leading order term in Weyl's law \cite{ivrii1980second}, which is to be expected since, as explained in Section \ref{sec:proofs}, `inside-outside' duality says that a phase shift makes a complete rotation of the unit circle for each Dirichlet eigenvalue of $\Omega$. The main technical ingredient in the proof of Theorem \ref{th: main theorem} is a trace formula for powers of the scattering amplitude \begin{equation} \label{eq:scattering-matrix-and-amplitude} A(k) := S(k) - \Id. \end{equation} \begin{proposition}\label{thm:trace-lemma} Suppose that (\ref{eq: hypVol3}) holds. Let $p \in \mathbb{Z}$. Then for all $\varepsilon>0$, we have \begin{equation}\label{eq: TracePowers} \mbox{Tr} A^p(k) = (-1)^p \Vol(\p \Omega) \omega_{d-1} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} + O(k^{d - 1 -1/3 + \varepsilon}). \end{equation} In particular, for any trigonometric polynomial $P$ vanishing at $1$ and for the measure $\mu_k$ in \eqref{eq:measure}, as $k \to \infty$, \begin{equation*} \langle \mu_k, P\rangle = \frac{\Vol(\p \Omega) \omega_{d-1}}{2\pi} \int_{\Sph^1} P(\theta) \mathrm{d}\theta + O(k^{-1/3+\varepsilon}) \end{equation*} \end{proposition} As we show in Section \ref{sec:proofs}, the trace formula in Proposition \ref{thm:trace-lemma}, together with $k$-dependent bounds for the number of eigenvalues at least a fixed exponentially small distance from $1$, imply Theorem \ref{th: main theorem}. \begin{remark} It may appear surprising that the bound on the error is much better in (\ref{eq: TracePowers}) than in (\ref{eq:limit_measure}). The reason for this is that in (\ref{eq: TracePowers}), the constant in the $O(k^{d - 1 -1/3 + \varepsilon})$ depends on the degree of the polynomial $P$, and it grows exponentially with it. Therefore, when we want to approach a continuous function by polynomials to prove Theorem \ref{th: main theorem}, our control on the remainder becomes much worse. Actually, we believe that, by paying attention to all the constants appearing in the remainders appearing in the proof of Proposition \ref{thm:trace-lemma}, it should be possible to obtain that, provided $f$ is regular enough, we have $$ \langle \mu_{k}, f \rangle = \frac{\Vol(\p \Omega) \omega_{d-1}}{2\pi} \int_{0}^{2\pi}f(e^{i\theta}) d\theta + O\big{(}(\log k)^{-\varepsilon}\big{)}, $$ for some $\varepsilon>0$. \end{remark} \medskip \subsubsection*{Relation to other works} Since the pioneering works of Birman, Sobolev, and Yafaev (see for example \cite{SY1985, BY1984}), there has been a wealth of literature on the asymptotic behavior of the scattering matrix at high energy, in particular about the distribution of phase shifts. In semi-classical potential scattering, an analogous result for compactly supported potentials was proven by the first author, Hassell, and Zelditch in \cite{MR3335243} for non-trapping potentials, and was generalized to trapping potentials by the second author in \cite{I2016}. See \cite{MR3335243} for a complete literature review of phase shift asymptotics for potential scattering. The behaviour of the phase shifts in the semi-classical limit has been studied in various settings: for magnetic potentials (\cite{BP2012}), for scattering by radially symmetric potentials, in \cite{DGHH2013}, near resonant energies in \cite{nakamura2014spectrum}... The idea of using trace formulae to analyze the asymptotics of the spectra comes from \cite{Z1992,Z1997}, and was the starting point of \cite{MR3335243}, \cite{I2016} and of the present paper. The main tool we use here is the Kirchhoff approximation, which was proven in its optimal form in \cite{Melrose-Taylor-near-peak}. Finally, our proof is simplified by describing the micro-local properties of the scattering matrix in terms of its action on Gaussian states, an approach which was introduced in \cite{I2016} for potential scattering. \subsubsection*{Organisation of the paper} In Section \ref{sec:dynamics}, we will recall a few facts about the classical scattering dynamics, and its links with the interior billiard dynamics. In Section \ref{sec: tools}, we will recall the main tools we use in the proof of Proposition \ref{thm:trace-lemma}. Proposition \ref{thm:trace-lemma} is then proved in Section \ref{sec : proof proposition}. Finally, we prove Theorems \ref{th: main theorem} and \ref{thm:scattering-phase} in Section \ref{sec:proofs}. The appendix contain elementary facts of semiclassical analysis, a proof of a resolution of identity formula on the sphere, as well as a cumbersome determinant computation. \section{Classical scattering dynamics and interior dynamics}\label{sec:dynamics} Let $\omega\in \Sph^{d-1}$ and $\eta\in \omega^\perp\subset \R^d$. We will always identify $(\omega,\eta)$ with a point in $T^*\Sph^{d-1}$. Consider the line $L_{(\omega,\eta)}:=\{t\omega +\eta, t\in \R\}$. By strict convexity of $\partial \Omega$, it intersects $\partial \Omega$ in zero, one or two points. We define the interaction region, \begin{equation}\label{eq: def I} \mathcal{I}:= \{ (\omega,\eta)\in T^*\Sph^{d-1} ; L_{(\omega,\eta)}\cap \partial \Omega \text{ contains two points} \}. \end{equation} If $(\omega,\eta)\in \mathcal{I}$, then there exists $t_1<t_2$ such that $t_i\omega+\eta \in \partial \Omega$ for $i=1,2$, we set (see Figure \ref{fig: kappa}) \begin{figure} \center \includegraphics[scale=0.4]{ScOb} \caption{The construction of the scattering map $\kappa$.}\label{fig: kappa} \end{figure} \begin{equation*} \begin{aligned} x(\omega,\eta)&:= t_1\omega+\eta\in \partial \Omega\\ \omega'(\omega,\eta)&:= \omega- 2 (\omega\cdot \nu_{x(\omega,\eta)}) \nu\\ \eta'(\omega,\eta) &:= x(\omega,\eta) - (\omega'\cdot x(\omega,\eta)) \omega', \end{aligned} \end{equation*} where $\nu_x$ is the outward pointing normal vector at the point $x \in \p \Omega$. We then set \begin{equation} \kappa(\omega,\eta) = (\omega',\eta').\label{eq:the-scattering-map} \end{equation} If $(\omega,\eta)\notin \mathcal{I}$, we shall set $\kappa(\omega,\eta) = (\omega,\eta)$. The map $\kappa$ may then be seen as a $C^0$ map $\kappa : T^*\Sph^{d-1} \rightarrow T^*\Sph^{d-1}$, which is smooth (and even symplectic) away from the glancing set $\partial \Omega$. The fact that interaction region satisfies $$\Vol(\mathcal{I}) = \Vol(\p \Omega) \omega_{d-1} $$ follows in a straightforward way from Cauchy's surface area formula. For $p\in \mathbb{Z}\backslash\{0\}$, we will denote by $\mathcal{P}_p\subset T^*\Sph^{d-1}$ the set of fixed points of $\kappa^p$. Note that we then have $$\mathcal{I}= T^*\Sph^{d-1}\backslash \mathcal{P}_1,$$ and that $\partial \mathcal{P}_1= \partial \mathcal{I}$ is exactly the `glancing set', i.e.\ the set of $( \omega, \eta)$ such that $L_{(\omega,\eta)}\cap \partial \Omega$ consists of a single point. We define \begin{equation} \label{eq:non-trivial-periodic} \mathcal{P}_p' := \mathcal{P}_p \setminus \mathcal{P}_1, \end{equation} the set of non-trivial glancing periodic points with period $p$, also an invariant subset. The sets $\mathcal{P}'_p$ will play a central role in our proof, and can be better understood in terms of the periodic points of the interior billiard map, as follows. (Indeed, the relationship between the interior billiard map and the scattering relation is a reflection of the relationship between the interior eigenvalue problem and the phase shifts exemplified in Eckmann-Pillet's inside-outside duality, as pointed out in \cite{Sm1992}.) Consider the set $\mathcal{O}:=\{(y,\xi)\in S^*\partial \Omega;~ \xi\cdot \nu_y<0\}$. If $(y,\xi)\in \mathcal{O}$, there will be a unique $t>0$ such that $y+t\xi\in \partial \Omega$. We shall then write $y'(y,\xi)= y+ t\xi$, and $\xi'(y,\xi) = \xi - 2 (\xi\cdot \nu_{y'}) \nu_{y'}$. We have $(x',\xi')\in \mathcal{O}$, and we may define $\kappa_{int} : \mathcal{O}\rightarrow \mathcal{O}$ by $\kappa_{int}(x,\xi) = (x',\xi')$. The map $\kappa_{int}$, and we shall denote by $\mathcal{P}_p^{int}$ the set of periodic points of period $p$ of $\kappa_{int}$. The following elementary lemma makes explicit the link between $\kappa$ and $\kappa_{int}$, as can be seen on Figure \ref{fig: billiard}. \begin{lemma} Let $(\omega,\eta)\in T^*\Sph^{d-1}\backslash \mathcal{P}_1$. We then have $$\kappa_{int} \big{(}(x(\omega,\eta), - \omega\big{)} = \big{(} x(\kappa(\omega,\eta), \omega'(\kappa(\omega,\eta)\big{)}.$$ \end{lemma} \begin{figure} \center \includegraphics[scale=0.4]{ScOb2} \caption{The scattering map and the billiard map.}\label{fig: billiard} \end{figure} As a consequence of this lemma, we see that $\mathcal{P}'_p$ is homeomorphic to $\mathcal{P}_p^{int}$. \subsection*{The volume of the set of fixed points} Let us denote by $\Vol$ the (symplectic) volume on $T^*\Sph^{d-1}$. We will always assume that we have \begin{equation}\label{eq: hypVol} \forall p\in \mathbb{Z}\backslash \{0\}, ~~ \Vol (\mathcal{P}'_p) = 0. \end{equation} Let us denote by $d$ the Riemannian distance on $T^*\Sph^{d-1}$. We will often make the following stronger hypothesis. \begin{equation}\label{eq: hypVol3} \forall p\in \mathbb{Z}\backslash \{0\}, ~~ \Vol \big{(\{}(\omega,\eta)\in T^*\Sph^{d-1}\backslash \mathcal{P}_1 \text{ such that } d(\kappa^p(\omega,\eta), (\omega,\eta)<\varepsilon)\big{\}}\big{)} = O_{\varepsilon\rightarrow 0} (\varepsilon). \end{equation} These conditions may of course be rephrased in terms of the dynamics of $\kappa_{int}$ on $\mathcal{O}$. If $\mu_\mathcal{O}$ is any Riemannian volume and $d_\mathcal{O}$ is any Riemannian distance on the manifold $\mathcal{O}$, Equation (\ref{eq: hypVol}) is equivalent to \begin{equation}\label{eq: hypVol2} \forall p\in \mathbb{Z}\backslash \{0\},~~ \mu (\mathcal{P}^{int}_p) = 0, \end{equation} while Equation (\ref{eq: hypVol3}) is equivalent to \begin{equation}\label{eq: hypVol4} \forall p\in \mathbb{Z}\backslash \{0\}, ~~ \mu_\mathcal{O}\big{(\{}(y,\xi)\in \mathcal{O} \text{ such that } d(\kappa_{int}^p(y,\xi), (y,\xi)<\varepsilon)\big{\}}\big{)} = O_{\varepsilon\rightarrow 0} (\varepsilon). \end{equation} Condition (\ref{eq: hypVol2}) is conjectured to hold for all domains $\Omega\subset \R^d$, not necessarily convex. This conjecture, known as Ivrii's conjecture, has implications in terms of remainders for the Weyl's law for the eigenvalues of the Laplacian (see \cite{ivrii1980second}). In the generic case, it was shown in \cite{petkov1988number} that $\mathcal{P}^{int}_p$ is finite for all $p\in \mathbb{Z}\backslash\{0\}$, so that (\ref{eq: hypVol2}) holds. Actually, the proof of Petkov and Stoyanov shows that the stronger property (\ref{eq: hypVol4}) also holds in the generic case. If the manifold $\partial \mathcal{O}$ is analytic, then the map $\kappa_{int}$ will be analytic, and we can show that (\ref{eq: hypVol4}) will hold (see for instance \cite{safarov1997asymptotic}). \section{Tools for the proof of Proposition \ref{thm:trace-lemma}}\label{sec: tools} Before proving Proposition \ref{thm:trace-lemma}, let us recall a few facts we will need in the proof. \subsection{An integral representation for the scattering amplitude} The operator $A(k)$ introduced in (\ref{eq:scattering-matrix-and-amplitude}) can also be defined as follows. Let $v(\cdot; \xi,k)$ be the unique solutions to \begin{equation} \label{eq:dirichletexterior} \begin{split} (\Delta + k^{2}) v &= 0 \\ v \rvert_{\p \Omega} &= -e^{i x \cdot \xi}, \end{split} \end{equation} satisfying the Sommerfeld radiation condition. $v$ may then be written as $$v(|x|\omega;\xi,k) = |x|^{-(d-1)/2}e^{i k|x|} a(\omega,\xi,k) + O(|x|^{-(d+1)/2}).$$ One can show (see for instance \cite{HR1976}, page 381) that $A(k)$ is given by an integral kernel \begin{equation} \label{eq:big-a} A(k)f(\omega)= \int_{\mathbb{S}^{d-1}} a(\omega, \theta, k) f(\theta) dVol_{\mathbb{S}^{d-1}}(\theta), \end{equation} where $a$ satisfies \begin{align} \label{eq:little-a} a(\omega, \theta, k) &= \frac{1}{2i} k^{d-2} (2\pi)^{1-d} \int_{\p \Omega} e^{ik \omega \cdot y} \frac{\p}{\p \nu} e^{-ik\theta \cdot y} dVol_{\p \Omega}(y) \\ &\qquad + \frac{1}{2i} k^{d-2} (2\pi)^{1-d} \int_{\p \Omega} e^{-ik \theta \cdot y} \frac{\p}{\p \nu} v(y, -k\omega) dVol_{\p \Omega}(y). \end{align} \subsection{The Kirchhoff approximation} The function $\p_\nu v$ was studied in \cite{Melrose-Taylor-near-peak}, where the authors write \begin{equation} \label{eq:K-definition} \frac{\p v(x, -k\omega)}{\p \nu} = K(\omega, x, k) e^{i k x \cdot \omega}. \end{equation} Their main results, or at least what we shall need from them, can be summed up as follows. The definition of the symbol classes $S_\delta$ is recalled in (\ref{eq:defsymbol}). \begin{theorem}[Melrose-Taylor, \cite{Melrose-Taylor-near-peak}]\label{th: meltay} $$ K(\omega,x,k) = - i k | \nu_x \cdot \omega| + k E(\omega,x,k), $$ where $\nu_x$ is the outward pointing normal vector at the point $x \in \p \Omega$, and where $E$ satisfies \begin{equation}\label{eq:symbolE} E\in k^{-1/3} S_{1/3}(\Sph^{d-1}\times \partial \Omega), \end{equation} Furthermore, for any $\varepsilon>0$, we have that \begin{equation}\label{eq:NoShadow} \nu_x\cdot \omega > k^{-1/3+\varepsilon} \Longrightarrow E(\omega,x,k) = O(k^{-\infty}). \end{equation} \end{theorem} In particular, we have \begin{equation}\label{eq:boundE} ||E(\cdot,\cdot,k) ||_{C^0}\leq C k^{-1/3}. \end{equation} We therefore have \begin{equation}\label{eq : Kirchhoff amplitude} \begin{aligned} a(\omega, \theta, k) &= -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \int_{\p \Omega}\Big{(} e^{i k (\omega - \theta) \cdot y} ( - \nu_y \cdot \theta + |\nu_y \cdot \omega| + E(\omega,y,k))\Big{)} dVol_{\p \Omega}(y). \end{aligned} \end{equation} \subsection{The use of Gaussian states} Let $(\omega_0,\eta_0) \in T^*\Sph^{d-1}$. We shall write\footnote{The $\chi$ is not very important here, and we could take another power of $k$ in it. It is just here to ensure that the integral in (\ref{eq:trace-gaussian-states}) makes sense.} $$\phi_{\omega_0,\eta_0}(\omega;k)= \chi \big{(} k^{1/3}|\omega-\omega_0|\big{)} e^{-ik \eta_0 \cdot\omega} e^{-\frac{k}{2} |\omega-\omega_0|^2}.$$ Note that $\|\phi_{\omega_0,\eta_0}\|_{L^2} = O(k^{(1-d)/4})$. We have that, for $T \in \mathcal{L}(L^2(\mathbb{S}^{d-1}))$ a trace class operator, then \begin{equation} \Tr (T) = c_k \int_{T^*\Sph^{d-1}}d\omega_0 d\eta_0 \langle \phi_{\omega_0,\eta_0}, A \phi_{\omega_0,\eta_0}\rangle_{L^2(\Sph^{d-1})}, \label{eq:trace-gaussian-states} \end{equation} where $$c_k= 2^{-(d-1)/2}\big{(}k/(2\pi) \big{)}^{3(d-1)/2} + O_{k\rightarrow \infty}(k^{(3d)/2-2}).$$ The proof of this formula, first given in \cite{ingremeau2016semi} will be recalled in appendix \ref{section: appendixGaussian}. Let $\mathrm{d}$ be some Riemannian distance on $T^*\Sph^{d-1}$. For any $\varepsilon>0$, the set \begin{equation}\label{eq: def C alpha} G_\varepsilon=\{(\omega,\eta) \in T^*\Sph^{d-1} ; \mathrm{d}\big{(}(\omega,\eta), \partial \mathcal{P}_1\big{)} < k^{-1/3+\varepsilon}\} \end{equation} has volume $O(k^{(2/3-\varepsilon) (d-1)})$. Therefore, since $\| S(k) \|_{L^2 \lra L^2} = 1$, we have $$I_\varepsilon (k) := c_k \int_{G_\varepsilon} d\omega_0 d\eta_0 \langle \phi_{\omega_0,\eta_0}, (S(k)-Id)^p \phi_{\omega_0,\eta_0}\rangle_{L^2(\Sph^{d-1})} = O (k^{(d-1)(2/3+\varepsilon)})$$ In particular, we have \begin{equation} \label{eq:alph-bound} I_\varepsilon(k) = O(k^{d - 1 - 1/3+\varepsilon}). \end{equation} The following lemma tells us that the set $G_\varepsilon$ is not far from being stable by $\kappa$. \begin{lemma}\label{lem: stable glancing} Let $\varepsilon>0$. There exists $k_\varepsilon$ such that for all $k>k_\varepsilon$, we have \begin{equation*} \begin{aligned} \kappa(G_\varepsilon)&\subset G_{2\varepsilon}\\ \kappa(T^*\Sph^{d-1} \backslash G_\varepsilon)&\subset T^*\Sph^{d-1}\backslash G_{\varepsilon/2}. \end{aligned} \end{equation*} \end{lemma} \begin{proof} Let $d \colon \mathcal{I} \lra \mathbb{R}_+$ denote $d(\omega, \eta) =\dist_{T^* \mathbb{S}^{d-1}}( (\omega, \eta),\p \mathcal{I})$, i.e.\ distance from $(\omega, \eta)$ to the glancing set. The lemma follows if we can establish the existence of a $C > 0$ such that $$C^{-1} d(\omega, \eta) \le d(\kappa(\omega, \eta)) < C d(\omega, \eta).$$ Since $\kappa$ is a homeomorphism of $\mathcal{I}$ to itself, it suffices to find a $C$ for which this holds in a neighborhood of the boundary, and we therefore restrict to a neighborhood on which $d$ is smooth. We will compare $d$ to the function $\wt{d} \colon \mathcal{I} \lra \mathbb{R}_+$ defined by $$ \wt{d}(\omega, \eta) = \inf_{(\omega, \eta') \in \p \mathcal{I}} | \eta - \eta'|, $$ i.e.\ the distance of rays $(\omega, \eta)$ parallel to $\omega$ to the part of the glancing set parallel to $\omega$. Note that $\wt{d}$ is smooth in a neighborhood of $\p \mathcal{I}$, vanishes on $\p \mathcal{I}$ and that moreover the derivative $D \wt{d}$ is non-zero on $\p \mathcal{I}$. If follows that in a neighborhood of the boundary there is a $C$ so that \begin{equation} {C}^{-1} d(\omega, \eta) \le \wt{d}(\omega, \eta) < {C} d(\omega, \eta), \end{equation} and thus the question reduces to proving \begin{equation} {C}^{-1} \tilde{d}(\omega, \eta) \le \tilde{d}(\kappa(\omega, \eta))< {C} \tilde{d}(\omega, \eta),\label{eq:actualdesire} \end{equation} the original inequality for $\wt{d}$ instead of $d$. Given $(\omega, \eta)$, let $\phi(\omega, \eta)$ denote the impact angle, i.e.\ the angle that the ray $t \omega + \eta$ makes with the tangent plane $T_q \p \Omega$ at the first point of impact $q$. We claim that there is a constant $C > 0$ such that \begin{equation} C^{-1} \phi(\omega, \eta)^2 < \wt{d}(\omega, \eta) < C \phi(\omega, \eta)^2.\label{eq:angleinequality} \end{equation} This inequality implies \eqref{eq:actualdesire} for the following reason, which is illustrated in Figure \ref{fig: stableglancing}. Let us write $(\omega',\eta'):= \kappa (\omega,\eta)$, and $(\omega'',\eta'') := (-\omega',\eta')$. We then have $\tilde{d}(\omega'',\eta'')= \tilde{d}(\omega',\eta')$. But, by \eqref{eq:angleinequality}, we have that $\tilde{d}(\omega'',\eta'')$ is equivalent to $\phi(\kappa(\omega'', \eta''))^2$, which is equal to $\phi(\kappa(\omega, \eta))^2$. Therefore, using (\ref{eq:angleinequality} once more, we have that $\tilde{d}(\omega'',\eta'')$, and hence $\tilde{d}(\kappa(\omega,\eta))$ is equivalent to $\tilde{d}(\omega,\eta)$. \begin{figure} \includegraphics[scale=0.65]{stableglancing} \caption{How to go from \eqref{eq:angleinequality} to \eqref{eq:actualdesire}.}\label{fig: stableglancing} \end{figure} We argue by comparison to parabolas. Indeed, assume without loss of generality, e.g.\ acting by a matrix in $O(d)$, that $(\omega, \eta)$ is incident at a point $q = (x'_0, x_0) \in \mathbb{R}_d = \mathbb{R}^{d-1}_{x'} \times \mathbb{R}_{x}$ with $x_0 = \max_{(x', x) \in \p \Omega} x$, we write $\p \Omega$ locally a graph $g(x') = x$ with maximum $x_0$ at $x'_0$. Then the curvature condition on $\p \Omega$ implies that the Hessian satisfies $ - r_1 \le \p^2 g \le - r_2$ for some $r_1 \ge r_2 > 0$. Then for any $1 > \delta > 0$, $ - (1 + \delta) r_1 |x'- x'_0|^2 \le g \le - r_2 / (1 - \delta) |x' - x'_0|^2$ for $(x', x)$ sufficiently close to $(x_0', x_0)$. For $\phi(\omega, \eta)$ sufficiently small, the point $\overline{q}$ in the glancing set of $\omega$ closest to the line $t \omega + \eta$ is bounded in distance by the corresponding glancing points of these two parabolas, and it is straightforward to check that one can obtain \eqref{eq:angleinequality} by taking $\delta$ small. \end{proof} \subsection{A useful change of variables}\label{sec: changevariable} For any fixed $\omega_0\in \Sph^{d-1}$, consider the map $$\pi_{\omega_0} : \partial \Omega\ni y\mapsto y- (y\cdot\omega_0)\omega_0\in \omega_0^\perp,$$ and the glancing, illuminated and shadow sets \begin{equation*} \begin{aligned} Y^{0}_{\omega_0}&:=\{ y\in \partial \Omega; \nu_y\cdot \omega_0= 0\}\\ Y^{-}_{\omega_0}&:=\{ y\in \partial \Omega; \nu_y\cdot \omega_0< 0\}\\ Y^{+}_{\omega_0}&:=\{ y\in \partial \Omega; \nu_y\cdot \omega_0>0\}. \end{aligned} \end{equation*} Note that $\pi_{\omega_0}(Y^{-}_{\omega_0})= \pi_{\omega_0}(Y^{+}_{\omega_0}) =: Z_{\omega_0}$, and that $\pi_{\omega_0}$ is a diffeomorphism from $Y^{-}_{\omega_0}$ to $Z_{\omega_0}$, and from $Y^{+}_{\omega_0}$ to $Z_{\omega_0}$. Let us denote by $y^{-}= y^-_{\omega_0}$ and $y^{+}= y^+_{\omega_0}$ its respective inverses. For any $z\in Z_{\omega_0}$, we have $$\Big{|}\det \Big{(} \frac{\partial y^{\pm}(z) }{\partial z}\Big{)}\Big{|} = |\omega_0\cdot \nu_{y^{\pm}(z)}|^{-1}.$$ \section{Proof of the Proposition \ref{thm:trace-lemma}}\label{sec : proof proposition} First of all, let us note that it is enough to show the result for $p>0$. Indeed, if we show that we have \begin{equation}\label{eq : tracepoly} \Tr P( S(k)) = \Vol (\mathcal{I}) \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} + O(k^{d-1-1/3+\varepsilon}) \end{equation} whenever $P$ is a polynomial of the form $P(X)= (X-Id)^p$, then by linearity, we have the result for any polynomial $P$ vanishing at 1. Now, since $S(k)$ is unitary, we have \begin{equation*} \begin{aligned} \Tr \big{(} S(k)^{-p}-Id\big{)} &= \sum_n \langle e_n, ( S(k)^{-p}-Id) e_n \rangle~~\text{ for any orthonormal basis } (e_n)\\ &= \sum_n \langle (S(k)^p - Id) e_n, e_n \rangle\\ &= \Tr (S(k)^p-Id), \end{aligned} \end{equation*} so that (\ref{eq : tracepoly}) holds when $P(X)= X^p-Id$ for any $p\in \mathbb{Z}\backslash \{0\}$, and, by linearity, for any trigonometric polynomial. Therefore, let us fix from now on $p\geq 1$. We have \begin{equation*} \big{(}A^p(k) \phi_{\omega_0,\eta_0}\big{)}(\alpha_p) = \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^p} \int_{(\partial \Omega)^p} e^{ik \Phi_p(\boldsymbol{\alpha}, \alpha_p,\boldsymbol{y})} a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \boldsymbol{y}, \end{equation*} where \begin{equation}\label{eq:defPhip} \Phi_p (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}):= \frac{i}{2} |\alpha_0-\omega_0|^2+ \alpha_0\cdot\eta_0 + \sum_{i=0}^{p-1} (\alpha_i-\alpha_{i+1})\cdot y_i, \end{equation} \begin{equation}\label{eq: def a} a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) := \chi (k^{1/3}|\alpha_0-\omega_0|) \prod_{i=0}^{p-1} \big{(} |\nu_{y_i}\cdot \alpha_i | - \nu_{y_i}\cdot \alpha_{i+1} + E(\alpha_i,y_i,k)\big{)}. \end{equation} Let us fix $\chi\in C^{\infty}(\R^+)$, such that $\chi(t)= 1$ if $t\leq 1$, and $\chi(t)=0$ if $t\geq 2$, and let us write \begin{equation}\label{eq: def rho} \rho(\boldsymbol{\alpha},\boldsymbol{y}) := \prod_{j=0}^{p-1} \big{(}1-\chi\big{(} k^{1/3} |\nu_{y_j}\cdot \alpha_j|\big{)}\big{)}. \end{equation} We shall write \begin{equation*} \begin{aligned} A_{glan}^{p,\omega_0,\eta_0} (\alpha_p) &:= \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^p} \int_{(\partial \Omega)^p} e^{ik \Phi_p(\boldsymbol{\alpha}, \alpha_p,\boldsymbol{y})} \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y})\big{)} a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \boldsymbol{y}\\ A_{non glan}^{p,\omega_0,\eta_0} (\alpha_p) &:= \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^p} \int_{(\partial \Omega)^p} e^{ik \Phi_p(\boldsymbol{\alpha}, \alpha_p,\boldsymbol{y})} \rho(\boldsymbol{\alpha},\boldsymbol{y}) a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \boldsymbol{y}, \end{aligned} \end{equation*} so that $$\big{(}A^p(k) \phi_{\omega_0,\eta_0}\big{)}(\alpha_p) = A_{glan}^{p,\omega_0,\eta_0} (\alpha_p) + A_{non glan}^{p,\omega_0,\eta_0} (\alpha_p) . $$ \begin{lemma}\label{lem : glancingset is burried} Suppose that $(\omega_0,\eta_0)\in T^*\Sph^{d-1}\backslash G_\varepsilon$, with $\varepsilon>0$ small enough. We then have $$\|A_{glan}^{p,\omega_0,\eta_0}\|_{C^0} = O(k^{-\infty}).$$ \end{lemma} We shall prove this lemma in Section \ref{section : killing the glancing}. Let us now describe the critical set of the phase $\Phi_p$. The following lemma is elementary, and its result is depicted on Figure \ref{fig: criticalObs}. \begin{lemma}\label{lem: critical3} Let $(\alpha,\eta)\in \mathcal{I}$. Then there are exactly four points $(\alpha',y')\in \Sph^{d-1}\times \partial \Omega$ which satisfy \begin{equation*} \begin{split} \eta - y' \in \mathbb{R}\alpha,\\ \alpha - \alpha' \in \mathbb{R}\nu_{y'}. \\ \end{split} \end{equation*} We shall denote them by $(\alpha',y') =: \tau^{\epsilon_1,\epsilon_2}(\alpha,\eta)=: \big{(}\alpha^{\epsilon_1,\epsilon_2}(\alpha,\eta),y^{\epsilon_1,\epsilon_2}(\alpha,\eta)\big{)}$, where $\epsilon_1,\epsilon_2\in \{\pm\}$, and they are as follows: \begin{equation*} \begin{aligned} &\epsilon_1=+ \Longrightarrow \nu_{y'}\cdot \alpha > 0\\ &\epsilon_1=- \Longrightarrow \nu_{y'}\cdot \alpha < 0\\ &\epsilon_2=+ \Longrightarrow |\alpha' - \alpha| = | \nu_{y'}\cdot \alpha|>0\\ &\epsilon_2=- \Longrightarrow \alpha'= \alpha. \end{aligned} \end{equation*} In any case, we have \begin{equation}\label{eq:equalangles} |\alpha'\cdot \nu_{y'}| = |\alpha\cdot \nu_{y'}|. \end{equation} Furthermore, let us write $$\tilde{\kappa}^{\epsilon_1,\epsilon_2}(\alpha,\eta) := \big{(}\alpha',\pi_{\alpha'}(y')\big{)}. $$ Then for all $(\alpha,\eta)$, we have \begin{equation}\label{eq:DifferentKappas} \begin{aligned} \tilde{\kappa}^{\pm,-}(\alpha,\eta) = (\alpha,\eta)\\ \tilde{\kappa}^{-,+}(\alpha,\eta) = \kappa(\alpha,\eta)\\ \tilde{\kappa}^{+,+}(\alpha,\eta) = \kappa^{-1}(\alpha,\eta), \end{aligned} \end{equation} with $\kappa$ as in (\ref{eq:the-scattering-map}). \end{lemma} \begin{figure} \includegraphics[scale=0.9]{criticalObstacle} \caption{The solutions to (\ref{eq:equalangles}).}\label{fig: criticalObs} \end{figure} For all $(\alpha, y)\in \Sph^{d-1}\times \partial \Omega$ such that $\alpha\cdot \nu_y\neq 0$, we shall write $$\tilde{\tau}^{\epsilon_1,\epsilon_2} (\alpha,y):= \tau^{\epsilon_1,\epsilon_2}(\alpha,\pi_{\alpha}(y)).$$ We shall write $$\Sigma_p := \big{(}\{+,-\} \times \{+,-\}\big{)}^p.$$ If $\sigma=(\sigma_0,...,\sigma_{p-1})\in \Sigma_p$, we will write \begin{equation}\label{eq:deftildekappa} \tilde{\kappa}^\sigma := \tilde{\kappa}^{\sigma_{p-1}}\circ ... \circ \tilde{\kappa}^{\sigma_0}. \end{equation} \begin{lemma}\label{lem : critical2} Let $(\omega_0,\eta_0)\in \mathcal{I}$, and let $\sigma\in \Sigma_p$. There exists a unique $(\boldsymbol{\alpha}^\sigma,\alpha_p^\sigma,\boldsymbol{y}^\sigma)$ such that \begin{equation*} \begin{aligned} \alpha^\sigma_0&= \omega^\sigma_0\\ (\alpha^\sigma_1,y^\sigma_0) &= \tau^{\sigma_0} (\alpha^\sigma_0, \eta_0)\\ \forall k=1,...,p-1,~~ (\alpha^\sigma_{k+1},y^\sigma_k) &= \tilde{\tau}^{\sigma_k} (\alpha^\sigma_k, y^\sigma_{k}). \end{aligned} \end{equation*} We have $$ \{ (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y})\in \big{(}\Sph^{d-1}\big{)}^{p+1} \times \big{(} \partial \Omega \big{)}^p ; \Phi_p (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y})=0 \} = \bigcup_{\sigma \in \Sigma_p} (\boldsymbol{\alpha}^{\sigma},\alpha_p^\sigma,\boldsymbol{y}^{\sigma}).$$ Furthermore, for each $\sigma\in \Sigma_p$, we have $\partial \Phi_p (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{y}^\sigma)= 0$ if and only if we have \begin{equation*} \begin{aligned} &\alpha_p=\omega_0\\ &\eta_0 = \pi_{\omega_p} (y_{p-1}). \end{aligned} \end{equation*} Note that this condition is equivalent to \begin{equation} (\omega_0,\eta_0)= \tilde{\kappa}^{\sigma}(\omega_0,\eta_0). \end{equation} \end{lemma} \begin{proof} Clearly $\alpha_0 = \alpha_p = \omega_0$. The criticality condition for $\alpha_0$ and $\alpha_p$ give that $y_0, y_{p-1} \in \mathbb{R}\omega_0 - \eta_0$, and the other equations in $\p \Phi = 0$ imply \begin{equation*} \begin{split} \alpha_i - \alpha_{i + 1} \in \mathbb{R}\nu_{y_i},\ \mbox{ for } i = 0, \dots, p-1, \\ y_{i -1} - y_{i} \in \mathbb{R}\alpha_{i},\ \mbox{ for } i = 0, \dots, p-2 \end{split} \end{equation*} The lemma then follows from Lemma \ref{lem: critical3}. \end{proof} In the sequel, we shall write $$\sigma_0 := (-, - , - , ..., -).$$ Thanks to (\ref{eq:equalangles}) and to (\ref{eq: def rho}), we see that if $(\omega_0,\eta_0)\in \mathcal{I}\backslash G_\varepsilon$ and $\sigma \neq \sigma'\in \Sigma_p$, then if $\mathrm{d}_p$ is a Riemanian distance on $\big{(}\Sph^{d-1}\big{)}^{p+1} \times \big{(} \p \Omega\big{)}^p$, there exists $C>0$ such that $$\mathrm{d}\Big{(}(\boldsymbol{\alpha}^{\sigma},\alpha_p^\sigma,\boldsymbol{y}^{\sigma}),(\boldsymbol{\alpha}^{\sigma'},\alpha_p^{\sigma'},\boldsymbol{y}^{\sigma'})\Big{)}\geq C k^{-1/3}.$$ We may therefore build a family of functions $\chi_\sigma \in S_{1/3} \big{(} \big{(}\Sph^{d-1}\big{)}^{p+1} \times \big{(} \p \Omega\big{)}^p \big{)}$ such that there exists $c>0$ satisfying $$\forall \sigma,\sigma'\in \Sigma_p, \forall x \in \big{(}\Sph^{d-1}\big{)}^{p+1} \times \big{(} \p \Omega\big{)}^p, \mathrm{d}_p(x, x_\sigma) < c k^{-1/3} \Longrightarrow \chi_{\sigma'}(x) = \delta_{\sigma,\sigma'},$$ and such that \begin{equation}\label{eq:conditionsupport} \forall \sigma = (\pm, \bullet)\in \Sigma_p, \forall (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^{\sigma})\in \mathrm{spt} \chi_{\sigma},~ \forall i=0,...,p-1, y_i\in Y^\pm_{\alpha^\sigma_i}, \end{equation} where $Y^\pm_{\alpha_i}$ is as in Section \ref{sec: changevariable}. In particular, if $(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^{\sigma})\in \mathrm{spt} \chi_{\sigma_0}$, then $y_i\in Y^-_{\omega_0}$ for all $i$. We then set $$\chi_\infty = 1- \sum_{\sigma\in \Sigma_p} \chi_\sigma.$$ We shall then write, for each $\sigma\in \Sigma_p\cup \{\infty\}$ \begin{equation*} \begin{aligned} A_{\sigma}^{p,\omega_0,\eta_0} (\alpha_p) &:= \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^p} \int_{(\partial \Omega)^p} e^{ik \Phi_p(\boldsymbol{\alpha}, \alpha_p,\boldsymbol{y})} \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y})\big{)} \chi_{\sigma}(\boldsymbol{\alpha},\boldsymbol{y}) a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \boldsymbol{y}. \end{aligned} \end{equation*} Lemma \ref{lemm: nonstat} implies that $A_\infty^{p,\omega_0,\eta_0}= O(k^{-\infty})$, so that we have \begin{equation*} \begin{aligned} \Tr ((S(k)-Id)^p) & = c_k \int_{\mathcal{I}\backslash G_\varepsilon} d\omega_0 d\eta_0 \langle A^{p,\omega_0,\eta_0}_{\sigma_0}, \phi_{\omega_0,\eta_0}\rangle + \sum_{\sigma\in \Sigma_p\backslash \{\sigma_0\} } c_k \int_{\mathcal{I}\backslash G_\varepsilon}d\omega_0 d\eta_0 \langle A^{p,\omega_0,\eta_0}_{\sigma_0}, \phi_{\omega_0,\eta_0}\rangle \\ &+ c_k \int_{\mathcal{I}\backslash G_\varepsilon} d\omega_0 d\eta_0 \langle A^{p,\omega_0,\eta_0}_{glan}, \phi_{\omega_0,\eta_0}\rangle \\ &+ c_k \int_{T^*\Sph^{d-1}\backslash(\mathcal{I}\cup G_\varepsilon)} d\omega_0 d\eta_0 \langle \phi_{\omega_0,\eta_0}, (S(k)-Id)^p \phi_{\omega_0,\eta_0}\rangle\\ &+ c_k \int_{ G_\varepsilon} d\omega_0 d\eta_0 \langle \phi_{\omega_0,\eta_0}, (S(k)-Id)^p \phi_{\omega_0,\eta_0}\rangle + O(k^{-\infty}). \end{aligned} \end{equation*} The proof of Proposition \ref{thm:trace-lemma} then follows from the following three lemmas, as well as Lemma \ref{lem : glancingset is burried} and equation (\ref{eq:alph-bound}). \begin{lemma}\label{lemm:noninteracting} We have $$\int_{\mathcal{P}_1\backslash G_\varepsilon} d\omega_0 d\eta_0 \langle \phi_{\omega_0,\eta_0}, (S(k)-Id)^p \phi_{\omega_0,\eta_0}\rangle = O(k^{-\infty}).$$ \end{lemma} \begin{proof} Let $(\omega_0,\eta_0)\in \mathcal{P}_1\backslash G_\varepsilon$. From (\ref{eq:defPhip}), we see that $|\partial_{\alpha_0} \Phi_p| = |\pi_{\alpha_0} (\eta_0-y_0)| + |\alpha_0-\omega_0| \geq C \mathrm{d}((\omega_0,\eta_0),\mathcal{I})$, where $\mathrm{d}$ is the Riemannian distance on $T^*\Sph^{d-1}$. Therefore, Lemma \ref{lemm: nonstat} implies that we have $$A^p(k)\phi_{\omega_0,\eta_0} = O\big{(} \big{(}k \mathrm{d}((\omega_0,\eta_0)\big{)}^{-\infty}\big{)}.$$ The lemma follows. \end{proof} The following two lemmas will be proven in sections \ref{sec:proof offdiagonal} and \ref{sec:Proofmainpart} respectively. \begin{lemma}\label{lem: offdiagonal} For any $\sigma\in \Sigma_p\backslash \{\sigma_0\}$, we have $$c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = O(k^{d-1-1/3+\varepsilon}).$$ \end{lemma} \begin{lemma}\label{lem: main part} $$c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma_0}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle =(-1)^p \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Vol(\mathcal{I}) + O(k^{d-1-1/3}).$$ \end{lemma} \section{Proofs of the technical lemmas} \subsection{Computation of the major contribution: Proof of Lemma \ref{lem: main part}}\label{sec:Proofmainpart} Recall that we have $$ \langle A_{\sigma_0}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^{p+1}} \int_{(\partial \Omega)^p} e^{ik \tilde{\Phi}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y})} \tilde{a}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \alpha_p \mathrm{d}\boldsymbol{y},$$ where \begin{equation*} \begin{aligned} \tilde{\Phi}_p (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y})&:= \frac{i}{2} |\alpha_0-\omega_0|^2+ \frac{i}{2} |\alpha_p-\omega_0|^2 + (\alpha_0- \alpha_p)\cdot \eta_0 + \sum_{i=0}^{p-1} (\alpha_i-\alpha_{i+1})\cdot y_i,\\ \tilde{a}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) &:= \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y})\big{)} \chi_{\sigma}(\boldsymbol{\alpha},\boldsymbol{y}) \chi (k^{1/3}|\alpha_0-\omega_0|) \chi (k^{1/3}|\alpha_p-\omega_0|) \\ &\qquad \qquad \prod_{i=0}^{p-1} \big{(} |\nu_{y_i}\cdot \alpha_i | - \nu_{y_i}\cdot \alpha_{i+1} + E(\alpha_i,y_i,k)\big{)}. \end{aligned} \end{equation*} Thanks to (\ref{eq:conditionsupport}), we may write $$\langle A_{\sigma_0}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^{p+1}} \int_{Y_{\omega_0}^{-}}...\int_{Y_{\omega_0}^{-}}e^{ik \tilde{\Phi}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y})} \tilde{a}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \alpha_p \mathrm{d}\boldsymbol{y}.$$ We may then carry out $p$ times the changes of variables $y^-$ to obtain $$\langle A_{\sigma_0}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p\int_{(\Sph^{d-1})^{p+1}} \int_{(Z_{\omega_0})^p} e^{ik \tilde{\Phi}^-_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})} \tilde{a}^-_p(\boldsymbol{\alpha},\alpha_p,z)) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \alpha_p \mathrm{d}\boldsymbol{z} + O(k^{-\infty}).$$ Here, we wrote $\boldsymbol{y}^-(\boldsymbol{z}):= (y^{-}(z_0),...,y^{-}(z_{p-1}))$, and \begin{equation*} \begin{aligned} \tilde{\Phi}^-_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})&:=\tilde{\Phi}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^-(\boldsymbol{z}))\\ \tilde{a}^-_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}))&:= \frac{\tilde{a}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^-(\boldsymbol{z}))}{|\alpha_0\cdot \nu_{y^{-}(z_0)}|... |\alpha_{p-1}\cdot \nu_{y^{-}(z_{p-1})}|}\\ &= \tilde{\chi} (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^-(\boldsymbol{z})) \frac{\prod_{i=0}^{p-1} \big{(} |\nu_{y^-(z_i)}\cdot \alpha_i | - \nu_{y^\sigma(z_i)}\cdot \alpha_{i+1} + E(\alpha_i,y^-(z_i),k)\big{)}}{|\alpha_0\cdot \nu_{y^{-}(z_0)}|... |\alpha_{p-1}\cdot \nu_{y^{-}(z_{p-1})}|}, \end{aligned} \end{equation*} where $\tilde{\chi} (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) = \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y})\big{)} \chi_{\sigma_0}(\boldsymbol{\alpha},\boldsymbol{y}) \chi (k^{1/3}|\alpha_0-\omega_0|) \chi (k^{1/3}|\alpha_p-\omega_0|).$ Note that we have $$\tilde{a}^-_p\in S_{1/3}.$$ Define the \textit{critical set} as $$\mathcal{C}^{-}_p:= \{(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})\in (\Sph^{d-1})^{p+1} \times (Z_{\omega_0})^p; \partial \tilde{\Phi}_p^- ( \boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^{-}(\boldsymbol{z}))= 0 \text{ and } \Im \tilde{\Phi}_p^- (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^{-}(\boldsymbol{z}))=0 \}.$$ By Lemma \ref{lem : critical2}, we have that $\mathcal{C}^{-}_p=\{(\omega_0,...,\omega_0, \eta_0,...,\eta_0)\}$. Note that we have \begin{equation}\label{eq: OnCrit} \begin{aligned} \tilde{\Phi}^-_p(\omega_0,...,\omega_0,\eta_0,...,\eta_0)&= 0\\ \tilde{a}^-_p(\omega_0,...,\omega_0,\eta_0,...,\eta_0) &= \frac{ \big{(} 2\nu_{y^-}(\eta_0)\cdot \omega_0 + E(\omega_0,y^-(\eta_0),k)\big{)}^p}{|\nu_{y^-(\eta_0)}\cdot \omega_0|^p} \\ &= 2^p + R_p(\omega_0,\eta_0), \end{aligned} \end{equation} where $$R_p(\omega_0,\eta_0) = \sum_{l=1}^p C_l^p \frac{E(\omega_0,y^-(\eta_0),k)^l}{|\nu_{y^-(\eta_0)}\cdot \omega_0|^l}.$$ \subsubsection*{Computation of the Hessian} \begin{lemma}\label{lem: Hessian} $$\det \partial^2 \tilde{\Phi}_p^- (\omega_0,...,\omega_0, \eta_0,...,\eta_0) = (2i)^{d-1} (-1)^{p(d-1)}.$$ \end{lemma} \begin{proof} We have $$\frac{\partial^2 \tilde{\Phi}_p^-}{\partial z_i \partial z_{i'}} = (\alpha_i-\alpha_{i+1})\cdot \frac{\partial^2 y^{\beta_i}}{\partial z_i \partial z_{i'}},$$ which vanishes at $(\omega_0,...,\omega_0, \eta_0,...\eta_0)$. We also have $$\frac{\partial^2 \tilde{\Phi}_p^-}{\partial \alpha_i \partial \alpha_{i'}}(\omega_0,...,\omega_0, \boldsymbol{z}_\beta)= \delta_{i=0} \big{(} i Id + B\big{)}+ \delta_{i'=p} \big{(}i Id - B\big{)},$$ where $B$ is the matrix $\frac{\partial^2}{\partial \alpha^2}\big{(} \alpha \cdot y^-(\nu_0)\big{)}$. For $i=0,...,p$, $i'=0,...,p-1$, let us define the matrix $$M^{i,i'} = \Big{(}\frac{\partial^2 \tilde{\Phi}_p^\beta}{\partial \alpha_{i,j} \partial z_{i',j'}}(\omega_0,...,\omega_0, \eta_0,...,\eta_0)\Big{)}_{1\leq j,j'\leq d-1}.$$ For $1\leq i\leq p-1$, we have $M^{i,i'}=0$ unless $i' = i$ or $i'=i-1$. We also have $M^{0,i}=0$ if $i\neq 0$, and $M^{p,i}=0$ unless $i=p-1$. For $0\leq i\leq p-1$, we have $M^{i,i} = \boldsymbol{1}_{d-1}$, while $M^{i,i-1}= - \boldsymbol{1}_{d-1}$. All in all, $ \partial^2 \tilde{\Phi}_p^\beta (\omega_0,...,\omega_0, \eta_0,...,\eta_0) $ can be written as follows, with $(2p+1)^2$ blocks of size $d'=d-1$. \begin{equation*} \begin{pmatrix} & & & & & & \boldsymbol{1}_{d'} & -\boldsymbol{1}_{d'} & 0 & ... & 0 & 0 & 0 \\ & & & & & & 0 & \boldsymbol{1}_{d'} & -\boldsymbol{1}_{d'} &... & 0 & 0 & 0\\ & &\bold{0}_{d'p\times d'p} && & & &&&... \\ &&&&&& 0 & 0 &0 &...& \boldsymbol{1}_{d'} & -\boldsymbol{1}_{d'} & 0\\ & & & & & & 0 & 0 & 0 & ... & 0 & \boldsymbol{1}_{d'} & -\boldsymbol{1}_{d'}\\ \boldsymbol{1}_{d'} & 0 & 0 & ...& 0 & 0 & i\bold{1}_{d'} +B & 0 & 0 & ... & 0 & 0 & 0\\ -\boldsymbol{1}_{d'}& \boldsymbol{1}_{d'} & 0& ... & 0 & 0 & \\ 0 & -\boldsymbol{1}_{d'} & \boldsymbol{1}_{d'} & ... & 0 & 0 & && & \bold{0}_{d'(p+1)\times d'(p-1)} \\ &&&... & & & & \\ 0 & 0 &0&... & -\boldsymbol{1}_{d'} & \boldsymbol{1}_{d'} & \\ 0 &0 &0 & ...& 0 & -\boldsymbol{1}_{d'} & 0 & 0 & 0 & ... & 0 &0 & i\bold{1}_{d'}-B \end{pmatrix}. \end{equation*} The proof then follows from Lemma \ref{lem:determinant}. \end{proof} Lemma \ref{lem: Hessian} tells us that $\det \big{(}i^{-1} \partial^2 \tilde{\Phi}_p^\beta (\omega_0,...,\omega_0, \eta_0,...,\eta_0)\big{)} = 2^{d-1}.$ Therefore, using Lemma \ref{lem: phistat} combined with (\ref{eq: OnCrit}), we get that $$T^p_{diag}(\omega_0,\eta_0,k) = \Big{(}\frac{2\pi}{k}\Big{)}^{(d-1)(2p+1)/2}2^{(d-1)/2} \big{(}2^p + R_p(\omega_0,\eta_0)\big{)} + O\big{(}k^{-(d-1)(2p+1)/2-1/3}\big{)}.$$ All in all, we have \begin{equation*} \begin{aligned} &c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma_0}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle\\ &= (-1)^p \Big{(}2^{-(d-1)/2}\big{(}k/(2\pi) \big{)}^{3(d-1)/2} (\frac{1}{2} k^{d-1} (2\pi)^{1-d} )^p \Big{(}\frac{2\pi}{k}\Big{)}^{(d-1)(2p+1)/2} 2^p \Vol(\mathcal{I})2^{(d-1)/2}\Big{)}\Big{(}1+O(k^{-1/3})\Big{)} \\&= (-1)^p \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Vol(\mathcal{I}) + O(k^{d-1-1/3}). \end{aligned} \end{equation*} \subsection{Ruling out the glancing rays: proof of Lemma \ref{lem : glancingset is burried}}\label{section : killing the glancing} Let us define the operator $S_{glan}$ by $$\big{(}S_{glan}\big{)} f (\theta)= -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \int_{\Sph^{d-1}} \int_{\p \Omega} \chi\big{(} k^{1/6} |\nu_{y}\cdot \alpha|\big{)} f(\alpha) a(\alpha,\theta,k) \mathrm{d}\alpha \mathrm{d}y,$$ where $a(\alpha,\theta,k)$ is the scattering amplitude as in (\ref{eq : Kirchhoff amplitude}). We shall also write $$\big{(}S_{nonglan} f\big{)}(\theta) = -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \int_{\Sph^{d-1}} \int_{\p \Omega} \big{(}1-\chi\big{(} k^{1/6} |\nu_{y}\cdot \alpha|\big{)}\big{)} f(\alpha) a(\alpha,\theta,k) \mathrm{d}\alpha \mathrm{d}y,$$ so that $$S(k)-Id = S_{glan} + S_{nonglan}.$$ In particular, we have that $$(S_{nonglan})^p \phi_{\omega_0,\eta_0} = A^{p,\omega_0,\eta_0}_{nonglan}.$$ \begin{lemma}\label{lem: dying glancingset} Let $\varepsilon>0$ be small enough, and let $(\omega_0,\eta_0)\in T^*\Sph^{d-1}\backslash G_{\varepsilon}$. Then $$\|S_{glan} \phi_{\omega_0,\eta_0} \|=O(k^{-\infty}).$$ \end{lemma} \begin{proof} We have $$S_{glan} \phi_{\omega_0,\eta_0}(\theta) = f_1 (\theta) + f_2 (\theta),$$ where $$f_1(\theta) = -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \int_{\Sph^{d-1}} \int_{\p \Omega} e^{i k (\alpha - \theta) \cdot y} \chi\big{(} k^{1/6} |\nu_{y}\cdot \alpha|\big{)} \chi \big{(} k^{1/3}|\alpha-\omega_0|\big{)} e^{-ik \eta_0 \cdot\alpha} e^{-\frac{k}{2} |\alpha-\omega_0|^2} \big{(}-\nu_y \cdot \theta + E(\alpha,y,k) \big{)} \mathrm{d}\alpha \mathrm{d}y$$ and $$f_2(\theta) = -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \int_{\Sph^{d-1}} \int_{\p \Omega} e^{i k (\alpha - \theta) \cdot y} \chi\big{(} k^{1/6} |\nu_{y}\cdot \alpha|\big{)} \chi \big{(} k^{1/3}|\alpha-\omega_0|\big{)} e^{-ik \eta_0 \cdot\alpha} e^{-\frac{k}{2} |\alpha-\omega_0|^2} |\nu_y \cdot \alpha | \mathrm{d}\alpha \mathrm{d}y$$ $f_1$ is an oscillatory integral, with an amplitude $$\chi\big{(} k^{1/6} |\nu_{y}\cdot \alpha|\big{)} \chi \big{(} k^{1/6}|\alpha-\omega_0|\big{)} \big{(}-\nu_y \cdot \theta + E(\alpha,y,k) \big{)}\in S_{1/3},$$ and a phase $$\varphi(\alpha,y; \theta, \omega_0,\eta_0) = \frac{i}{2} |\alpha-\omega_0|^2 + \alpha \cdot \eta_0 + (\theta-\alpha)\cdot y.$$ We have $|\partial_\alpha \Re \varphi| = |\pi_\alpha (y)- \eta_0|$, with $\pi_\alpha$ as in Section \ref{sec: changevariable}. By the assumption we made on $(\omega_0,\eta_0)$, we have $|\partial_\alpha \Re \varphi| \geq c k^{-1/3}$ for some $c>0$. Therefore, we may use Lemma \ref{lemm: nonstat} to obtain that $f_1=O(k^{-\infty})$. To deal with $f_2$, we may use the changes of variables $y^\pm$ introduced above, to write \begin{equation*} \begin{aligned} f_2(\theta)= -\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1}\sum_{\epsilon= \pm} \int_{\Sph^{d-1}} \int_{Z^\epsilon} e^{i k (\alpha - \theta) \cdot y^\pm(z)} \chi\big{(} k^{1/6} |\nu_{y^\pm(z)}\cdot \alpha|\big{)} \chi \big{(} k^{1/3}|\alpha-\omega_0|\big{)} e^{-ik \eta_0 \cdot\alpha} e^{-\frac{k}{2} |\alpha-\omega_0|^2} \mathrm{d}\alpha \mathrm{d}z. \end{aligned} \end{equation*} Each of the integrals $\int_{\Sph^{d-1}} e^{i k (\alpha - \theta) \cdot y^\pm(z)} \chi\big{(} k^{1/6} |\nu_{y^\pm(z)}\cdot \alpha|\big{)} \chi \big{(} k^{1/3}|\alpha-\omega_0|\big{)} e^{-ik \eta_0 \cdot\alpha} e^{-\frac{k}{2} |\alpha-\omega_0|^2} \mathrm{d}\alpha$ can be seen as an oscillatory integral with a parameter $z\in Z^\pm$. The amplitude $\alpha \mapsto \chi\big{(} k^{1/6} |\nu_{y^\pm(z)}\cdot \alpha|\big{)} \chi \big{(} k^{1/3}|\alpha-\omega_0|\big{)}$ is in $S_{1/3}$, with bounds on the semi-norms independent of the point $z\in Z^\pm$, while the phase $\varphi (\alpha; z, \theta) = (\alpha - \theta) \cdot y^\pm(z) + \eta_0 \cdot\alpha -\frac{i}{2} |\alpha-\omega_0|^2$ satisfies $|\partial_\alpha \Re \varphi| \geq c k^{-1/2 +\varepsilon}$, with a constant $c$ independent of the point $z$ in the support of the amplitude. We may therefore apply the non-stationary phase lemma \ref{lemm: nonstat} to conclude. \end{proof} \begin{corollary}\label{cor: dying glancing} Let $(\omega_0,\eta_0)\in T^*\Sph^{d-1}\backslash G_{4\varepsilon}$, and $(\omega_1,\eta_1)\in G_\varepsilon$. We then have $$\langle \phi_{\omega_1,\eta_1}, (S(k)-Id) \phi_{\omega_0,\eta_0} \rangle = O(k^{-\infty}).$$ \end{corollary} \begin{proof} By Lemma \ref{lem: stable glancing}, we have that $\kappa(\omega_0,\eta_0)\in T^*\Sph^{d-1}\backslash G_{2\varepsilon}$, so that \begin{equation}\label{eq: distant points} \mathrm{d}(\kappa (\omega_0,\eta_0), (\omega_1,\eta_1)) > k^{-1/2+\varepsilon/2}. \end{equation} By the preceding lemma, we have that \begin{equation*} \begin{aligned} &\langle \phi_{\omega_1,\eta_1}, (S(k)-Id) \phi_{\omega_0,\eta_0} \rangle = \langle \phi_{\omega_1,\eta_1}, S_{nonglan} \phi_{\omega_0,\eta_0} \rangle + O(k^{-\infty})\\ &= \int_{(\Sph^{d-1})^2} \int_{\p \Omega} \big{(}1-\chi\big{(} k^{1/3} |\nu_{y}\cdot \alpha_0|\big{)} a(\alpha_0,\alpha_1,y;k) e^{ik \varphi(\alpha_0,\alpha_1,y)} \mathrm{d}\alpha_0\mathrm{d}\alpha_1 \mathrm{d}y + O(k^{-\infty}), \end{aligned} \end{equation*} where $\varphi(\alpha_0,\alpha_1,y) =\frac{i}{2}|\alpha_0-\omega_0|^2 +\frac{i}{2}|\alpha_1-\omega_1|^2 + (\eta_1-y)\cdot \alpha_1+ (y-\eta_0)\cdot \alpha_0$. By (\ref{eq: distant points}), we have that $|\partial \varphi |\geq k^{-1/2+\varepsilon/2}$, and the corollary follows from Lemma \ref{lemm: nonstat}. \end{proof} We may now prove Lemma \ref{lem : glancingset is burried}, which follows from (\ref{eq: glancingdead}) below. \begin{corollary} Let $(\omega_0,\eta_0)\in T^*\Sph^{d-1}\backslash G_{\varepsilon}$. For any $p\geq 1$, we have \begin{equation} \label{eq: glancingdead2} \forall (\omega_1,\eta_1)\in G_{2^{-p-1}\varepsilon}, \langle \phi_{\omega_1,\eta_1}, (S(k)-Id)^p \phi_{\omega_0,\eta_0} = O(k^{-\infty}) \end{equation} \begin{equation}\label{eq: glancingdead} (S(k)-Id)^p \phi_{\omega_0,\eta_0} = S_{nonglan}^p \phi_{\omega_0,\eta_0} + O(k^{-\infty}). \end{equation} In particular, we have $$\|A_{glan}^{p,\omega_0,\eta_0}\|_{C^0} = O(k^{-\infty}).$$ \end{corollary} \begin{proof} Let us prove (\ref{eq: glancingdead2}) and (\ref{eq: glancingdead}) by induction. Lemma \ref{lem: dying glancingset} and Corollary \ref{cor: dying glancing} imply that (\ref{eq: glancingdead2}) and (\ref{eq: glancingdead}) are satisfied for $p=1$. Suppose that they are satisfied for some $p\geq 1$. By Lemma \ref{lemma: resolution3}, we know that $(S(k)-Id)^{p-1} \phi_{\omega_0,\eta_0}$ can be written as $\int_{T^*\Sph^{d-1}} f_p(\omega,\eta; k) \phi_{\omega,\eta}\mathrm{d}\omega \mathrm{d}\theta$ for a certain function $f_p$. Now, (\ref{eq: glancingdead2}) implies that $f_p(\omega,\eta; k) = O(k^{-\infty})$ for $(\omega,\eta)\in G_{2^{-p}\varepsilon}$. We therefore have \begin{equation} \begin{aligned} (S(k)-Id)^p \phi_{\omega_0,\eta_0} = \int_{T^*\Sph^{d-1}\backslash G_{2^{-p}\varepsilon}} f_p(\omega,\eta) (S(k)-Id) \phi_{\omega,\eta}. \end{aligned} \end{equation} We then deduce (\ref{eq: glancingdead2}) and (\ref{eq: glancingdead}) by applying Lemma \ref{lem: dying glancingset} and Corollary \ref{cor: dying glancing}. \end{proof} \subsection{Proof of Lemma \ref{lem: offdiagonal}}\label{sec:proof offdiagonal} Recall that the statement of Lemma \ref{lem: offdiagonal} is the following. \begin{lemma} For any $\sigma\in \Sigma_p\backslash \{\sigma_0\}$, for any $\varepsilon>0$, we have $$c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = O_\varepsilon(k^{d-1-1/3+\varepsilon}).$$ \end{lemma} \begin{proof} For each $i=0,...,p-1$, if $\sigma_i =(\pm, \bullet)$, we set $\beta_i= \pm$. By (\ref{eq:conditionsupport}), we have that \begin{equation*} \begin{aligned} A_{\sigma}^{p,\omega_0,\eta_0} (\alpha_p) &= \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \int_{(\Sph^{d-1})^p} \int_{Y^{\beta_0}}... \int_{Y^{\beta_{p-1}}}e^{ik \Phi_p(\boldsymbol{\alpha}, \alpha_p,\boldsymbol{y})} \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y})\big{)} \chi_{\sigma}(\boldsymbol{\alpha},\boldsymbol{y}) a_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}) \mathrm{d}\boldsymbol{\alpha} \mathrm{d} \boldsymbol{y}\\ &= \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p\int_{(\Sph^{d-1})^{p}} \int_{Z_{\alpha_0^\sigma}}... \int_{Z_{\alpha_{p-1}^\sigma}} e^{ik \Phi^\sigma_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z)) \mathrm{d}\boldsymbol{\alpha} \mathrm{d}\boldsymbol{z} \end{aligned} \end{equation*} Here, we wrote $\boldsymbol{y}^\sigma(\boldsymbol{z}):= (y_{\alpha_0}^{\beta_0}(z_0),...,y_{\alpha_{p-1}}^{\beta_{p-1}}(z_{p-1}))$, and \begin{equation*} \begin{aligned} \Phi^\sigma_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})&:=\Phi_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^\sigma(\boldsymbol{z}))\\ &= \frac{i}{2} |\alpha_0-\omega_0|^2+ \alpha_0\cdot \eta_0 + \sum_{i=0}^{p-1} (\alpha_i-\alpha_{i+1})\cdot y_i^\sigma(z_i),\\ \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}))&:= \frac{\tilde{a}_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^\sigma(\boldsymbol{z}))}{|\alpha_0\cdot \nu_{y^{\sigma}(z_0)}|... |\alpha_{p-1}\cdot \nu_{y^{\sigma}(z_{p-1})}|}\\ &= \tilde{\chi}_\sigma (\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}) \frac{\prod_{i=0}^{p-1} \big{(} |\nu_{y_i^\sigma(z_i)}\cdot \alpha_i | - \nu_{y_i^\sigma(z_i)}\cdot \alpha_{i+1} + E(\alpha_i,y_i^\sigma(z_i),k)\big{)}}{|\alpha_0\cdot \nu_{y^{\sigma}(z_0)}|... |\alpha_{p-1}\cdot \nu_{y^{\sigma}(z_{p-1})}|}, \end{aligned} \end{equation*} where $\tilde{\chi}_\sigma (\boldsymbol{\alpha},\alpha_p,\boldsymbol{y}^\sigma(\boldsymbol{z})) = \big{(}1- \rho(\boldsymbol{\alpha},\boldsymbol{y}^\sigma(\boldsymbol{z}))\big{)} \chi_{\sigma}(\boldsymbol{\alpha},\boldsymbol{y}^\sigma(\boldsymbol{z})) \chi (k^{1/3}|\alpha_0-\omega_0|) \chi (k^{1/3}|\alpha_p-\omega_0|).$ Note that we have $$\tilde{a}^\sigma_p\in S_{1/3}.$$ Let us write $\boldsymbol{z}^\sigma$ for $(\pi_{\alpha^\sigma_j} (y_j^\sigma))_{j=0,...,p-1}$. Note that, if $\mathrm{d}$ denotes the Riemannian distance on $(\Sph^{d-1})^{p+1}\times (\R^{d-1})^p$, we have \begin{equation}\label{eq:localised image} \begin{aligned} &\forall \varepsilon >0, \forall (\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}) \in \mathrm{spt} \tilde{\chi}_\sigma, \forall k \text{ large enough }\\ &\mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma)) > k^{-1/2+\varepsilon} \Longrightarrow |\partial \Phi_p^\sigma (\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}) | > k^{-\frac{1}{2}+\frac{\varepsilon}{2p}}. \end{aligned} \end{equation} If we write $A_{\sigma}^{p,\omega_0,\eta_0} (\alpha_p) =\Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p\big{(} A_\sigma^1 (\alpha_p)+ A_\sigma^2(\sigma_p)\big{)} $ with \begin{equation*} \begin{aligned} A_\sigma^1(\alpha_p) := &\int_{(\Sph^{d-1})^{p}} \int_{Z_{\alpha_0^\sigma}}... \int_{Z_{\alpha_{p-1}^\sigma}} \chi\big{(}k^{1/2-\varepsilon} \mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma))\big{)} e^{ik \Phi^\sigma_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z)) \mathrm{d}\boldsymbol{\alpha} \mathrm{d}\boldsymbol{z} \\ A_\sigma^2(\alpha_p) := &\int_{(\Sph^{d-1})^{p}} \int_{Z_{\alpha_0^\sigma}}... \int_{Z_{\alpha_{p-1}^\sigma}} \big{(}1-\chi\big{(}k^{1/2-\varepsilon} \mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma))\big{)}\big{)} e^{ik \Phi^\sigma_p(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z})} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z)) \mathrm{d}\boldsymbol{\alpha} \mathrm{d}\boldsymbol{z} , \end{aligned} \end{equation*} then (\ref{eq:localised image}) and Lemma \ref{lemm: nonstat} implies that $A_\sigma^2= O(k^{-\infty})$. On the other hand, the integrand in $A_\sigma^1$ is supported in a set of volume $k^{(-1/2+\varepsilon)2(d-1)p}$. Therefore, there exists $C,c>0$ such that \begin{equation*} \begin{aligned} c_k\|A_{\sigma}^{p,\omega_0,\eta_0}\|_{C^0} &\leq C k^{(-1/2+\varepsilon)2(d-1)p} k^{\frac{3}{2} (d-1)} k^{p(d-1)} \| \chi\big{(}k^{1/2-\varepsilon} \mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma))\big{)} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z))\|_{C_0}\\ &\leq C k^{\frac{3}{2}(d-1) + c\varepsilon} \| \chi\big{(}k^{1/2-\varepsilon} \mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma))\big{)} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z))\|_{C_0}. \end{aligned} \end{equation*} Let us first prove the lemma when there exists $0\leq i\leq p-1$ such that $\sigma_i = (+, \bullet)$. By assumption, we then have that $\nu_{y^\sigma_i(z_i)}\cdot \alpha^\sigma_i > 0$. Recall that the function $ \tilde{a}^\sigma_p$ can be written as $$\frac{ |\nu_{y_i^\sigma(z_i)}\cdot \alpha_i | - \nu_{y_i^\sigma(z_i)}\cdot \alpha_{i+1} + E(\alpha_i,y_i^\sigma(z_i),k)}{|\nu_{y_i^{\sigma}(z_i)}\cdot \alpha_i|} f,$$ with $f\in S_{1/3}$. Now, by (\ref{eq:NoShadow}), we have that $E(\alpha_i,y_i^\sigma(z_i),k)= O(k^{-\infty})$. On the other hand, by the assumption we made on $\sigma$, we have $|\nu_{y_i^\sigma(z_i^\sigma)}\cdot \alpha^\sigma_i |= \nu_{y_i^\sigma(z_i^\sigma)}$. The fact that $\mathrm{d}((z_i,\alpha_i), (z_i^\sigma,\alpha_i^\sigma)) \leq k^{-1/2+\varepsilon}$, combined with the fact that we work on a set where $|\nu_{y_i^{\sigma}(z_i)}\cdot \alpha_i|\geq C k^{-1/6}$ then implies that $$\frac{ |\nu_{y_i^\sigma(z_i)}\cdot \alpha_i | - \nu_{y_i^\sigma(z_i)}\cdot \alpha_{i+1} + E(\alpha_i,y_i^\sigma(z_i),k)}{|\nu_{y_i^{\sigma}(z_i)}\cdot \alpha_i|} =O(k^{-1/3+\varepsilon}).$$ Therefore, we obtain that $$\| \chi\big{(}k^{1/2-\varepsilon} \mathrm{d}(\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}), (\boldsymbol{\alpha}^\sigma,\alpha^\sigma_p,\boldsymbol{z}^\sigma))\big{)} \tilde{a}^\sigma_p(\boldsymbol{\alpha},\alpha_p,z))\|_{C_0} = O(k^{-1/3 + c' \varepsilon}).$$ All in all, we have that $$c_k \Big{(}-\frac{1}{2} \Big{(}\frac{k}{2\pi}\Big{)}^{d-1} \Big{)}^p \|A_\sigma^{2}\|_{C^0} \leq C k^{\frac{3}{2} (d-1) - 1/3 + c''\varepsilon}.$$ Using the fact that $\|\phi_{\omega_0,\eta_0}\|_{L^1} = O(k^{(d-1)/2})$, we get that $$c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = O(k^{d-1-1/3 + c''\varepsilon}),$$ which proves the lemma in this case. Let us now suppose that $\forall i=0,...,p-1$, we have $\sigma_i=(-,\bullet)$. From (\ref{eq:DifferentKappas}), we see that $\tilde{\kappa}^\sigma(\omega_0,\eta_0) = \kappa^{n_0}(\omega_0,\eta_0)$, for some $n_0\in \mathbb{N}$, where $\tilde{\kappa}^\sigma$ is as in (\ref{eq:deftildekappa}). The fact that $\sigma\neq \sigma_0$ implies furthermore that $n_0>0$. Suppose that $(\omega_0,\eta_0)$ is such that for all $n_0=1,...,p-1$, we have $\mathrm{d}((\omega_0,\eta_0),\kappa^{n_0}(\omega_0,\eta_0))> k^{-1/3}$. Then, using (\ref{eq:localised image}) and computing $\partial_{\alpha_p} \Phi_p^\sigma$, we see that we have \begin{equation*} \begin{aligned} &\forall \varepsilon >0, \forall (\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}) \in \mathrm{spt} \tilde{\chi}_\sigma, \forall k \text{ large enough },~~ |\partial \Phi_p^\sigma (\boldsymbol{\alpha},\alpha_p,\boldsymbol{z}) | > k^{-\frac{1}{2}+\frac{\varepsilon}{2p}}. \end{aligned} \end{equation*} Therefore, by Lemma \ref{lemm: nonstat}, we have that $$\langle A_\sigma^{p,\omega_0,\eta_0},\phi_{\omega_0,\eta_0}\rangle = O(k^{-\infty}).$$ Therefore, if we write $$\mathcal{I}^p_{per}(k) :=\{(\omega_0,\eta_0)\in \mathcal{I} \text{ such that } \exists 1\leq n \leq p-1, \mathrm{d}((\omega_0,\eta_0), \kappa^n(\omega_0,\eta_0))< k^{-1/3}\},$$ we have that $$c_k\int_{\mathcal{I}\backslash G_\varepsilon} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle =c_k\int_{\mathcal{I}_{per}^p(k)} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle + O(k^{-\infty}).$$ Now, thanks to (\ref{eq: hypVol3}), we know that $\Vol (\mathcal{I}^p_{per}(k)) = O(k^{-1/3})$. We deduce from this that $$c_k\int_{\mathcal{I}_{per}^p(k)} \mathrm{d}\omega_0\mathrm{d}\eta_0 \langle A_{\sigma}^{p,\omega_0,\eta_0}, \phi_{\omega_0,\eta_0} \rangle = O(k^{d-1-1/3}).$$ This concludes the proof of the lemma. \end{proof} \section{Proofs of theorems \ref{th: main theorem} and \ref{thm:scattering-phase}} \label{sec:proofs} We now prove the theorems from the introduction using the trace formula in Proposition \ref{thm:trace-lemma}. The proof of Theorem \ref{th: main theorem} from Proposition \ref{thm:trace-lemma} follows is the same as in \cite[\S 5]{I2016}, but we recall it for the reader's convenience. \subsection{Phase shift asymptotics} Let us define, for any $\alpha>0$, \begin{equation*} C^0_{\alpha}(\mathbb{S}^1)= \{f\in C^0(\mathbb{S}^1;\mathbb{C}); f(z)\big{|}\log |z-1|\big{|}^{\alpha} \text{ is continuous } \}. \end{equation*} \begin{equation*} \|f\|_{\alpha} = \sup_{|z|=1, z\neq 1} \big{|}\log |z-1|\big{|}^{\alpha} |f(z)|~~ \text{ for } f\in C^0_{\alpha}(\mathbb{S}^1). \end{equation*} Note that $C^0_{\alpha}\subset C^0_{\alpha'}$ if $\alpha>\alpha'$. We will now prove the following theorem, which is a slightly refined version of Theorem \ref{th: main theorem}. \begin{theorem} \label{th: main theorem 2} Let $\Omega\subset \R^d$ be a smooth strictly convex open set, such that (\ref{eq: hypVol3}) holds. Let $\alpha>d$ and let $f\in C^0_{\alpha}(\mathbb{S}^1)$. Then we have \begin{equation*} \langle \mu_{k}, f \rangle = \frac{\Vol(\p \Omega) \omega_{d-1}}{2\pi} \int_{0}^{2\pi}f(e^{i\theta}) d\theta + o(1). \end{equation*} \end{theorem} Before writing the proof, let us state two technical lemmas. Recall that we denote the eigenvalues of $S(k)$ by $e^{i\beta_{k,n}}$. We shall from now on take the convention that $|e^{i\beta_{k,n}}-1|\geq |e^{i\beta_{k,n+1}}-1|$. For any $L\geq1$, we shall denote by $N_{L,k}$ the number of $n\in \mathbb{N}$ such that $|e^{i\beta_{k,n}}-1|\geq e^{-Lk}$. \begin{lemma}\label{lem : bound big shifts} There exists $C_0>0$ such that for any $L\geq1$ and $k\geq 1$, we have $N_{L,k}\leq C_0 \big{(}Lk\big{)}^{d-1}$. \end{lemma} \begin{proof} Thanks to equation (2.3) in \cite{christiansen2015sharp} (which relies on the methods developed in \cite{zworski1989sharp}), we have that there exists $C>0$ independent of $k$ and $n$ such that \begin{equation}\label{eq:christ} |e^{i\beta_{k,n}}-1|\leq Ck^d \exp \Big{(} Ck- \frac{n^{1/(d-1)}}{C}\Big{)}. \end{equation} In particular, we have that for any $N\geq 1$, \begin{equation*} \begin{aligned} \prod_{n=1}^N |e^{i\beta_{k,n}}-1| &\leq \Big{(}C k^d\Big{)}^N \exp \Big{(} \frac N C k- \frac{1}{C} \sum_{n=1}^N n^{1/(d-1)}\Big{)}\\ &\leq \big{(}C k^d\big{)}^N \exp \Big{(} N Ck- C' N^{d/(d-1)}\Big{)}, \end{aligned} \end{equation*} for some $C'>0$ independent of $k,N$. Therefore, we have that \begin{equation*} \begin{aligned} e^{-k LN_{L,k}}&\leq \prod_{n=1}^{N_{L,k}} |e^{i\beta_{k,n}}-1|\\ &\leq \Big{(}C k^d\Big{)}^{N_{L,k}} \exp \Big{(} CkN_{L,k} - C' {N_{L,h}}^{d/(d-1)}\Big{)}. \end{aligned} \end{equation*} By taking logarithms, we get \begin{equation*} \begin{aligned} -kLN_{L,k} &\leq {N_{L,k}}\log\Big{(}Ck^d\Big{)} + kC N_{L,k} - C' {N_{L,k}}^{d/(d-1)}. \end{aligned} \end{equation*} The first term in the right hand side is negligible, so we get, by possibly changing slightly the constant $C'$, \begin{equation*} \begin{aligned} C' {N_{L,k}}^{d/(d-1)} &\leq k{N_{L,k}} (C+L). \end{aligned} \end{equation*} Therefore, $N_{L,k} \leq \Big{(}\frac{k(C+L)}{C'}\Big{)}^{d-1}\leq C_0 (Lk)^{d-1}$ for some $C_0>0$ large enough, but independent of $L$ and $k$, which concludes the proof of the lemma. \end{proof} \begin{lemma}\label{lem : boundmeasure} For any $\alpha>d$, there exists $C_\alpha> 0$ such that for any $f\in C^0_{\alpha}(\mathbb{S}^1)$, we have \begin{equation*} |\langle\mu_{k},f\rangle| \leq C \|f\|_{\alpha} \end{equation*} \end{lemma} \begin{proof} We have \begin{equation}\label{eq : mu k} \begin{aligned} |\langle\mu_{k},f \rangle| &= \Big{(}\frac{2\pi}{k}\Big{)}^{d-1} \Big{|}\sum_{n\in \mathbb{N}} f(e^{i\beta_{k,n}})\Big{|}\\ &\leq \Big{(}\frac{2\pi}{k}\Big{)}^{d-1}\sum_{|e^{i\beta_{k,n}}-1|\geq e^{-k}} |f(e^{i\beta_{k,n}})| + \Big{(}\frac{2\pi}{k}\Big{)}^{d-1} \sum_{|e^{i\beta_{k,n}}-1|< e^{-k}} |f(e^{i\beta_{k,n}})|. \end{aligned} \end{equation} Let us consider the first sum. By Lemma \ref{lem : bound big shifts}, it has at most $C_0 k^{d-1}$ terms. Hence, it is bounded by \begin{equation} \Big{(}\frac{2\pi}{k}\Big{)}^{d-1} \sum_{|e^{i\beta_{k,n}}-1|\geq e^{-k}} |f(e^{i\beta_{k,n}})|\leq \Big{(}\frac{2\pi}{k}\Big{)}^{d-1} C_0 k^{d-1}\|f\|_{C^0}\leq C \|f\|_{\alpha}, \end{equation} for some $C>0$. Let us now consider the second term in (\ref{eq : mu k}). For each $p\geq 1$, we denote by $\sigma_{p,k}$ the set of $n\in \mathbb{N}$ such that $e^{-k(p+1)}\leq |e^{i\beta_{k,n}}-1|< e^{-pk}$. By Lemma \ref{lem : bound big shifts}, $\sigma_{p,k}$ contains at most $C_0 \big{(} k(p+1)\big{)}^{d-1}$ elements. On the other hand, for each $n\in \sigma_{p,k}$, we have $$|f(e^{i\beta_{k,n}})| \leq \|f\|_\alpha \big{|}\log (e^{-pk})\big{|}^{-\alpha}=\frac{\|f\|_\alpha}{(kp)^{\alpha}} .$$ Therefore, we have \begin{equation*} \begin{aligned} \Big{(}\frac{2\pi}{k}\Big{)}^{d-1}\sum_{|e^{i\beta_{k,n}}-1|< e^{-k}} |f(e^{i\beta_{k,n}})| &= \Big{(}\frac{2\pi}{k}\Big{)}^{d-1}\sum_{p=1}^{+\infty} \sum_{n\in \sigma_{p,k}} |f(e^{i\beta_{k,n}})| \\ &\leq \Big{(}\frac{2\pi}{k}\Big{)}^{d-1}\sum_{p=1}^{+\infty} C_0 \big{(} p(k+1)\big{)}^{d-1} \frac{\|f\|_\alpha}{(kp)^{\alpha}}\\ &\leq C k^{-\alpha} \|f\|_\alpha, \end{aligned} \end{equation*} for some $C$ independent of $k$. This concludes the proof of the lemma. \end{proof} \begin{proof}[Proof of Theorem \ref{th: main theorem 2} and \eqref{eq:number}] We have proved the result for all trigonometric polynomials vanishing at 1 in Proposition \ref{thm:trace-lemma}. Let $\alpha>\alpha'>d$, and let $f\in C^0_\alpha\subset C^0_{\alpha'}$. Let us show that $f$ can be approximated by trigonometric polynomials vanishing at 1 in the $C^0_{\alpha'}$ norm, which will conclude the proof of the theorem thanks to Lemma \ref{lem : boundmeasure}. Since $f (z) \big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{1/2}$ is continuous, we may find a sequence $P_n$ of polynomials such that $$\big{\|}P_n - f (z) \big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{1/2}\big{\|}_{C^0}\leq 1/n.$$ Since $f(0)=0$, we may suppose that $P_n(1)=0$. We may also suppose that $P_n'(1)=0$ (for a proof of this fact, see for example \cite[Theorem 8, \S 6]{duren2012invitation}). Since the function $\big{|}\log |z-1|\big{|}^{\alpha'}\big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{-1/2}$ is continuous, we have that \begin{equation}\label{approche} \big{\|}P_n \big{|}\log |z-1|\big{|}^{\alpha'}\big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{-1/2} - f (z) \big{|}\log |z-1|\big{|}^{\alpha'}\big{\|}_{C^0}\leq C/n. \end{equation} Now, since $P_n(1)=P_n'(1)=0$, the function $P_n /\big{(}(z-1)\big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{1/2}\big{)} $ is continuous, and we may find a polynomial $Q_n$ such that $$ \Big{\|} \frac{P_n}{(z-1)\big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{1/2}} - Q_n\Big{\|}_{C^0}\leq 1/n$$ Since the function $ (z-1)\big{|}\log |z-1|\big{|}^{\alpha'}$ is continuous, we obtain that \begin{equation}\label{approche2} \Big{\|} P_n\big{|}\log |z-1|\big{|}^{\alpha'}\big{(}1+\big{|}\log |z-1|\big{|}^{2\alpha}\big{)}^{-1/2} - Q_n(z-1)\big{|}\log |z-1|\big{|}^{\alpha'}\Big{\|}_{C^0}\leq C'/n \end{equation} Combining (\ref{approche}) and (\ref{approche2}), we obtain that $f$ can be approached by $(z-1)Q_n$ in the $C^0_{\alpha'}$ norm. This concludes the proof of Theorem \ref{th: main theorem 2}. \end{proof} \subsection{Total scattering phase} We now give our alternative proof of the scattering phase asymptotics in Theorem \ref{thm:scattering-phase}. We begin by recalling that the scattering phase $s(k)$ can be defined continuously in such a way that $\lim_{k \to 0^+} s(k) = 0$ and thus defined is in fact smooth for all $k > 0$. We define the `reduced' scattering phase by the sum $$ s_{2\pi}(k) = -\sum_{e^{i\beta_{k,n}} \in \sigma(S(k))} \beta_{k,n} $$ where the logarithms of the eigenvalues, the $\beta_{k,n}$ are chosen to take values in $(-2\pi, 0]$. For fixed $k$ the eigenvalues accumulate at $1$ from the bottom half plane and thus contribute positive values to the sum, which is nonetheless finite. A result of Eckmann-Pillet \cite{EP1995} shows that eigenvalues approach $1$ with positive imaginary part if and only if $k$ approaches a Dirichlet eigenvalue of $\Omega$. In fact, with $N_D(k)$ as in \eqref{eq:counting-function}, we have $$ s(k) = 2 \pi N_D(k) + s_{2\pi}(k). $$ Under the hypothesis that the measure of the periodic billiard trajectories in $\Omega$ is zero, it is known \cite{ivrii1980second} that $$ N_D(k) = \frac{\omega_d}{(2\pi)^{d}} \Vol(\Omega) k^d - \frac{\omega_{d - 1}}{4 (2\pi)^{d-1}} \Vol(\p \Omega) k^{d -1} + o(k^{d-1}). $$ Note that this is the same asymptotic expansion as for the scattering phase, with the sign of the second term reversed. We claim that $$ s_{2\pi}(k) = s(k) - N_D(k) = \frac{\omega_{d - 1}}{2 (2\pi)^{d-2}} \Vol(\p \Omega) k^{d -1} + o(k^{d-1}). $$ We will prove this by breaking up the unit circle into $M \in \mathbb{N}$ sectors of size $2\pi/M$ estimating the sum defining $s_{2\pi}(k)$ in these sectors. Namely, let $A_{M,k}(j) = \{- 2\pi j/M < \beta_{k,n} < - 2 \pi (j + 1)/M \}$ and $\alpha_{M, k}(j) = \sum_{A_{M, k}(j)} - \beta_{M,k}$, so that $$ s_{M, k} = \sum_{j = 0}^{M - 1} \alpha_{M, k}(j). $$ We begin with $j = 0$, which is distinct from $j > 0$ since there are infinitely many phase shifts in $A_{M, k}(0)$. We claim that $$ |a_{M, k}(0)| \le \frac{C}{M} k^{d - 1}. $$ Indeed, using \eqref{eq:christ} and a constant $C> 0$ whose value changes from line to line, we see that \begin{equation}\label{eq:1} \begin{split} a_{M, k}(0) &\le \sum_{j = 0}^\infty \sum_{2^{-j}/M \ge |\beta_{k,n}|/(2\pi) \ge 2^{-(j + 1)}/M } | e^{i \beta_{k,n}} - 1 | \\ &\le \sum_{j = 0}^\infty \left( \frac{1}{M} 2^{-j} \right) \sum_{2^{-j}/M \ge |\beta_{k,n}|/(2\pi) \ge 2^{-(j + 1)}/M } 1 \\ &\le \sum_{j = 0}^\infty \left( \frac{1}{M} 2^{-j} \right) C (k + (j + 1)/k)^{d-1} \\ &\le \frac{k^{d-1}}{M} \sum_{j = 0}^\infty \left( 2^{-j} \right) C (1 + (j + 1)/k^2)^{d-1} \le C\frac{ k^{d-1}}{M} \end{split} \end{equation} For $j > 0$, we estimate $\alpha_{M, k}(j)$ from above and below, and clearly \begin{equation} \begin{split} \frac{2\pi j}{M} | A_{M, k}(j) | \le a_{M, k}(j) \le \frac{ 2 \pi (j + 1)}{M} | A_{M, k}(j) | \end{split} \end{equation} It follows from the \eqref{eq:number}, since our sectors are size $2\pi/M$, that for $0 < j \le M -1$, for any $\delta > 0$ and $k > k_{M, \delta}$, there is a constant $C > 0$ such that $$ \left( \frac{\omega_{d-1}}{(2\pi)^{d-1}} \frac{2\pi}{M} \Vol(\p \Omega) - \delta \right) k^{d-1} \le |A_{M, k}(j)| \le \left( \frac{\omega_{d-1}}{(2\pi)^{d-1}} \frac{2\pi}{M} \Vol(\p \Omega) + \delta \right) k^{d-1} $$ Since $\sum_{j = 1}^{M-1} (j + 1) = M(M - 1)/2$ we have \begin{equation} \begin{gathered} \left( \frac{\omega_{d-1}}{(2\pi)^{d-2}} \frac 12 \Vol(\p \Omega) - C(1/M + \delta M) \right) k^{d-1} \le s_{M, k} \\ s_{M, k} \le \left( \frac{\omega_{d-1}}{(2\pi)^{d-2}} \frac 12 \Vol(\p \Omega) + C(1/M + \delta M) \right) k^{d-1}, \end{gathered} \end{equation} and thus \begin{equation} \begin{split} \limsup_{k \to \infty} s(k) k^{d-1} &\le \frac{\omega_{d-1}}{(2\pi)^{d-2}} \frac 12 \Vol(\p \Omega) + C(1/M + \delta M), \\ \liminf_{k \to \infty} s(k) k^{d-1} &\ge \frac{\omega_{d-1}}{(2\pi)^{d-2}} \frac 12 \Vol(\p \Omega) - C(1/M + \delta M) \end{split} \end{equation} for any $M \in \mathbb{N}, \delta > 0$. Taking $\delta = 1/M^2$ and sending $M \to \infty$ gives the result.
1,116,691,501,364
arxiv
\section{Introduction} The density of matter varies enormously in neutron stars, from about the density of iron (7.86$\,\rm{g/cm^{3}}$) on the the stellar surface to values higher than the nuclear saturation density at the stellar the core \citep{weber1999pulsars,shapiro2008black}. In the absence of an exact theory of superdense matter, different theoretical models predict different equations of state (EoS) and compositions to describe neutron stars (NS). Furthermore, the structure of NS's can be divided into the surface, composed of ions and non-relativistic electron; the outer crust, where the ions form a solid Coulomb lattice at densities lower than the neutron drip density $n_{drip}\sim4.3\times 10^{11}\,\rm{g/cm^{3}}$; the inner crust region beyond neutron drip density, where neutrons leak out of nuclei up to densities $\sim 10^{14}\,\rm{g/cm^{3}}$ and, finally, the core region, typically composed of electrons, protons and neutrons forming a relativistic fluid. It is also in the core that exotic degrees of freedom as hyperons \citep{glendenning1987hyperons, bednarek2012hyperons, vidana2013hyperons, gomes2015many}, quark matter \citep{Franzon:2012in, weber1999quark, baldo2004quark, alford2007astrophysics, Franzon:2016urz} and superconducting phases might appear inside these objects \citep{baldo2003neutron, kaplan2002novel, lugones2003high}. With approximately 1 km in thickness, the crust region of neutron stars has an equation of state relatively well-known, see \cite{chamel2008physics, lorenz1993neutron, lattimer2001neutron}. In general, the composition, the structure and the equation of state of the outer crust are determined by finding the ground state of cold ionic matter. In other words, this corresponds to minimizing the Gibbs energy per nucleon at a given pressure. In this case, one nucleus occupies one neutral unit Winger-Seitz cell, which together with the nucleus and the electrons, contributes also to the total energy and pressure of the system. Here, we describe the ground state of matter in the outer crust of neutron stars by using the classical work formulated by Baym, Pethick and Sutherland \citep{baym1971ground}. The inner crust of neutron stars begins when neutrons start to drip out of the nuclei at densities about $n_{drip}$. From this value to densities at the crust-core transition point, one has very neutron rich nuclei immersed in a gas of neutrons. In this case, the equation of state is usually obtained with many-body techniques as Hartree-Fock (HF), Thomas Fermi (TF) approximation, and the Compressible Liquid Drop Model (CLDM). In this context, we follow the prescription of \cite{douchin2001unified} to describe the structure and composition of the inner neutron-star. The authors in \cite{douchin2001unified} calculated the ground state of matter within the CLDM with SLy effective nucleon-nucleon interaction. It is worth to mention that other equations of state for this regime can be found in \cite{negele1973neutron, shen2002complete}. However, the choice of a particular EoS does not alter our conclusions. In addition, it is generally accepted that a pasta phase may appear in the crust-core transition \citep{grill2014equation,watanabe2000thermodynamic,ravenhall1983structure}. Although the properties of the inner crust is modified in the presence of pasta phases, we do not take them into account in this work. Due to the its low density regime, the crust has just a small contribution to the total mass in neutron stars \citep{haensel2001neutron}. Notwithstanding, the crust region is crucial not only for determining the stellar radius, which is of major present importance due to large uncertainties in the measurements of radii in NS's, but also the crust plays a crucial role in neutron star evolution, its dynamics and observation. For example, the crust is related to phenomena as glitches \citep{ruderman1998neutron}, braking index \citep{cheng2002phase}, torsion modes \citep{levin2011excitation,sotani2007torsional,hansen1980torsional}, magnetic field evolution \citep{pons2007magnetic,aguilera2008impact,cumming2004magnetic}, thermal relaxation \citep{gnedin2001thermal} and cooling of neutron stars \citep{negreiros2012thermal,heinke2010direct,page2009neutrino}. Certain classes of neutron stars are associated with very strong magnetic fields. According to observation of Soft Gamma-ray Repeaters (SGR) and Anomalous X-ray Pulsars (AXP), such stars show surface magnetic fields up to $10^{15}$ G \citep{vasisht1997discovery,kouveliotou1998x}. These strong magnetic fields might be generated by dynamo processes in newly born neutron stars \citep{Thompson:1993hn}, although the exact origin of such high magnetic fields is still the subject of much debate. Moreover, according to the virial theorem, the magnetic field can reach values of $\sim 10^{18}$ G in the stellar core. According to \cite{lai1991cold,chakrabarty1997dense}, strong magnetic fields modify the equilibrium nuclear composition and the equation of state in neutron stars. However, as already shown in \cite{Franzon:2015sya,Chatterjee:2014qsa}, the global properties of compact stars, as the mass and the radius, do not chance significantly with the inclusion of magnetic field effects in the equation of state of the dense matter. On the other hand, it was shown in \cite{Franzon:2015sya, Franzon:2016iai} that the particle degrees of freedom at the core of stars change drastically with the inclusion of magnetic fields. Similarly, modifications of the crust properties and composition induced by magnetic fields can be seen in \cite{nandi2011neutron}. Strong magnetic fields are also known to change considerably the structure of neutron stars. The authors in \cite{Chatterjee:2014qsa,Bocquet:1995je,cardall2001effects,Franzon:2015sya,mallick2014deformation} evaluated magnetized models of stars endowed with strong poloidal magnetic fields. In this case, the Lorentz force induced by the magnetic field makes stars more massive and they become oblate with respect to the symmetry axis. Moreover, effects of toroidal magnetic fields were addressed in \cite{frieben2012equilibrium,kiuchi2008equilibrium}. In this case, the magnetized stars become more prolate with respect to the non-magnetized case. Nonetheless, these works did not address the effects of strong magnetic fields on the global properties of the neutron star crust. In this work, we construct equilibrium configuration of magnetized stellar models by using the same approach as in \cite{Bocquet:1995je,Bonazzola:1993zz}. We make use of spherical polar coordinates $(r, \theta, \phi)$ with origin at the stellar center and the pole located along the axis of symmetry. We focus on the size and geometry of the crust of highly deformed strongly magnetized neutron stars. In other words, we consider the different effects of the Lorentz force according to the angle and radius distribution inside the star. The Lorentz force is related to the macroscopic currents that create the magnetic field, acting on the matter which can be pushed outward or inward. In the first case, we have the standard and expected effect of the Lorentz force, which acts against gravity, pushes the matter off-center making the star bigger on the equatorial plane and smaller at the pole. However, as we will see, the Lorentz force reverses direction inside the star, acting inward in the outer layers of the neutron star. It is important to notice that in addition to the just described Lorentz force, the magnetic field will also contribute to the curvature of space-time via the energy it stores. Note that, once the spherical symmetry is broken in highly magnetized neutron stars, the crust thickness depends both on the coordinate radius $r$ and on the angular direction $\theta$. The plan of the paper is as follows. In Section II we give a general overview of the Einstein-Maxwell-Equations that are required to be solved numerically. In Section III, we present our results for the crust thickness in strongly magnetized stars. Sec. IV contains our results for the thermal relaxation time of the stars discussed in Sec. III. Our final remarks and conclusions can be found in Sec. IV. \section{Stellar models with axisymmetric magnetic field} In this work, we construct models of stationary highly magnetized neutron stars. Details of the Einstein-Maxwell equations, numerical procedure and tests can be found in \cite{Bonazzola:1993zz,Bocquet:1995je}. We show here only the key equations that are solved numerically for the sake of completeness and better understanding for the reader. Equilibrium stellar configuration are obtained in general relativity by solving the Einstein equations: \begin{equation} R_{\mu\nu} -\frac{1}{2}\mathrm{g}_{\mu\nu} R = kT_{\mu\nu}, \label{einstein} \end{equation} with $R={g}_{\mu\nu}R^{\mu\nu}$, being $R_{\mu\nu}$ the Ricci tensor, ${g}_{\mu\nu}$ the metric tensor, $k$ a constant and $T_{\mu\nu}$ the energy-momentum tensor of the system. As we will be dealing with macroscopic structure of neutron stars endowed with magnetic fields, the energy-momentum tensor of the system is given by \begin{equation} T_{\mu\nu} = \left( \mathcal{E} + P \right) u_{\mu}u_{\nu} + P\, g_{\mu\nu} + \frac{1}{ \mu_{0}} \left( F_{\mu \alpha} F^{\alpha}_{\nu} - \frac{\mathrm{g}_{\mu\nu}}{4} F_{\alpha\beta} F^{\alpha\beta} \right), \label{emt} \end{equation} where the first term in Eq. \eqref{emt} is the perfect fluid contribution, with the matter energy density $\mathcal{E}$, the isotropic fluid pressure $P$ and the 4-vector fluid velocity $u_{\mu}$. The second term represents the purely Maxwell stress tensor, with $F_{\alpha\beta}$ being the usual Faraday tensor defined in terms of the magnetic vector potential $A_{\alpha}$ as $F_{\alpha\beta}=\partial_{\alpha}A_{\beta} - \partial_{\beta}A_{\alpha}$. According to \cite{Bonazzola:1993zz, Bocquet:1995je, gourgoulhon20123+}, the metric tensor can be expressed in spherical like coordinates $(r, \theta, \phi)$ within the 3+1 formalism as: \begin{align} ds^{2} = &-N^{2} dt^{2} + \Psi^{2} r^{2} \sin^{2}\theta (d\phi - N^{\phi}dt)^{2} \nonumber \\ &+ \lambda^{2}(dr^{2} + r^{2}d\theta^{2}), \label{line} \end{align} with N, $N^{\phi}$, $\Psi$ and $\lambda$ being functions of the coordinates $(r, \theta)$. The equation of stationary motion ($\nabla_{\mu}T^{\mu\nu}= 0$) for perfect fluid with pure poloidal field can be expressed as \citep{Bocquet:1995je}: \begin{equation} H \left(r, \theta \right) + {\rm{ln}}N\left(r, \theta \right) + M \left(r, \theta \right) = const, \label{equationofmotion} \end{equation} where $H(r,\theta)$ is the logarithm of the dimensionless relativistic enthalpy per baryon and $M(r,\theta)$ the magnetic potential, which determines the magnetic field configuration: \begin{equation} M \left(r, \theta \right) = M \left( A_{\phi} \left(r, \theta \right) \right): = - \int^{0}_{A_{\phi}\left(r, \theta \right)} f\left(x\right) \mathrm{d}x, \end{equation} with a current function $f(x)$ as defined in \citep{Bonazzola:1993zz}. The Lorenz force induced by the magnetic field is proportional to $-\nabla M(r,\theta)$. Magnetic stellar configurations are determined by choosing a constant current function $f_{0}$. The magnetic field strength in the star increases proportionally to $f_{0}$. Moreover, the macroscopic electric current scale with $f_{0}$ as $j^{\phi}= (\mathcal{E}+p)f_{0}$. Note that, in \cite{Bocquet:1995je} other possibilities for $f(x)$ were discussed, but the general conclusions remain the same. The equations described above are solved numerically for different values of $f_0$ leading to neutron stars with different magnetic fields and thus different structures. \section{Crust thickness of strongly magnetized stars} We describe the inner crust with the Skyrme (Sky) EoS, which is based on the effective nuclear interaction SLy of the Skyrme type. For more details on the composition and EOS calculation see \cite{douchin2001unified}. The structure of the inner crust, and its EoS, was taken from Baym, Pethick, Sutherland (BPS), based upon the Reid potential \citep{baym1971ground}. To describe the matter in the neutron star interior in the T = 0 approximation, we choose the APR EoS for the core \citep{akmal1998equation}, which is composed of protons, neutrons, electrons and muons. The equilibrium state of magnetized objects was discussed many years ago in\cite{ferraro1954equilibrium,chandrasekhar1956equilibrium}. More recently, many authors shown that the stellar radius increases due to magnetic fields, where the star expands in the equatorial direction and contracts at the pole \citep{mallick2014deformation,cardall2001effects,Bonazzola:1993zz,Bocquet:1995je,Chatterjee:2014qsa,Franzon:2015sya}. As already discussed in \cite{cardall2001effects}, the Lorentz force induced by magnetic fields reverses direction on the equatorial plane $\theta=\pi/2$ of the star. However, the consequence of such reversion on the outer layers of stars was not addressed in \cite{cardall2001effects}. Inspecting the outer layers, one observes that the crust can decrease or increase in size depending on the polar angle $\theta$, while the core expands in all directions. The crust thickness is defined as the difference between the stellar surface radius and the radius at the base of the crust where the crust-core transition takes place. As already calculated in \cite{grill2014equation, xu2009nuclear}, the symmetry energy affects the size of the inner crust considerably. In addition, in \cite{fortin2016neutron} the importance of a consistent matching between the core and the crust regions was shown. Although a more thorough study along this line would certainly be of interest, for the purposes of our studies it is sufficent to use the the Sly and BPS results, in which the baryon number density at the crust-core transition is 0.076$ \,\rm{fm^{-3}}$. In order to illustrate the effects of strong magnetic fields on the neutron-star crust thickness, we shown in Fig.~\ref{thickness_bfield} the crust thickness in the equatorial plane ($\theta=\pi/2$), $\Delta r_{eq}$, as function of the central magnetic field, $B_{c}$, for stars at fixed baryon masses of $M_{B}=1.40\,M_{\odot}$ and $M_{B}=2.00\,M_{\odot}$, respectively. According to Fig.~\ref{thickness_bfield}, the maximum magnetic field reached at the center of the star $M_{B}=2.00\,M_{\odot}$ is 1.3$\times 10^{18}$ G, while a star with $M_{B}=1.40\,M_{\odot}$ has a central magnetic field of 0.9$\times 10^{18}$ G. As one can see in Fig.~\ref{thickness_bfield}, the crust thickness in the equatorial plane always reduces with the magnetic field. Note that, stars with lower masses have a higher crust deformation. This is due to the fact that these stars have larger crusts sprawled over a larger radius, and thus are easier deformed through the magnetic fields. On the other hand, the lower the stellar mass the lower the magnetic field at the stellar center. This is because in a perfectly conducting fluid, the magnetic field lines move with the fluid, i.e., the magnetic field lines are `frozen' into the plasma and, therefore, the magnetic field strength is proportional to the local mass density of the fluid. \begin{figure} \includegraphics[height=8cm, angle=-90]{thickness_bfield-eps-converted-to.pdf} \caption{Thickness of the crust $\Delta r_{eq}$ in the equatorial plane ($\theta=\pi/2$) as a function of central magnetic fields for stars at fixed baryon masses of $M_{B}=1.40\,M_{\odot}$ and $M_{B}=2.00\,M_{\odot}$. } \label{thickness_bfield} \end{figure} In order to study the effects of the Lorenz force on the crust of neutron stars, we show in Fig.~\ref{thickness_angle} the crust thickness as a function of the polar angle $\theta$ for the most magnetized stars obtained in Fig.~\ref{thickness_bfield}. The horizontal lines in Fig.~\ref{thickness_angle} correspond to the crust thickness for stars with baryon masses of $M_{B}=1.40\,M_{\odot}$ and $M_{B}=2.00\,M_{\odot}$, but without magnetic fields. In this case, the values for the crust thickness are $\Delta r_{0}^{({1.40})}$=0.994 km and $\Delta r_{0}^{({2.00})}$=0.604 km, respectively. \begin{figure} \includegraphics[height=8cm, angle=-90]{thickness_angle-eps-converted-to.pdf} \caption{Crust thickness $\Delta r_{mag}$ in different angular directions $\theta$ for highly magnetized stars. These objects are the most magnetized stars as depicted in Fig.\ref{thickness_bfield}. For at a fixed baryon mass of $M_{B}=1.40\,M_{\odot}$, the central magnetic field is $\sim 0.9\times 10^{18}$ G, while $M_{B}=2.00\,M_{\odot}$ has $\sim 1.3\times 10^{18}$ G. The horizontal lines represent the crust thickness for spherical solutions (without magnetized fields), $\Delta r_{0}^{({1.40})}$=0.994 km and $\Delta r_{0}^{({2.00})}$=0.604 km for a star with $M_{B}=1.40\,M_{\odot}$ and $M_{B}=2.00\,M_{\odot}$, respectively.} \label{thickness_angle} \end{figure} In Fig.~\ref{thickness_angle}, we depict stars that are deformed due to magnetic fields. As it was already calculated in \cite{cardall2001effects,Bocquet:1995je}, the stellar configurations can strongly deviate from spherical symmetry due to the anisotropy of the energy-momentum tensor in presence of strong magnetic fields. According to Fig.~\ref{thickness_angle}, the crust has a maximum expansion at $\theta=\pi/4$. For the less massive star ($M_{B}=1.40\,M_{\odot}$), the crust expands and becomes larger than its non-magnetized counterpart. From this point, the crust thickness reduces. At the pole ($\theta=0$), the Lorenz force is zero by symmetry (no electric current at the symmetry axis), but the crust thickness is smaller than in the non-magnetized case. This is a geometric effect due to the expansion of the star on the equatorial plane. On the other hand, the increase of the crust thickness followed by a reduction at diffrent polar angles is caused by the inversion of the direction of the Lorentz force inside the star. In other to show the change of the Lorentz force according to its angular and radius distributions, we calculate in Fig.~\ref{magpotentical} the magnetic potential $ M(r, \theta)$ as a function of the coordinate radius and at different polar angle directions for the same stars shown in Fig.~\ref{thickness_angle}. As a result, one sees from Fig.~\ref{magpotentical} that the magnetic potential presents a minimum at higher angles, for example, at $\theta=3\pi/8$ and $\theta=\pi/2$ (for the star with $M_{B}=1.40\,M_{\odot}$). These values correspond to angles for which the Lorentz force reverses sign and, as a consequence, changes its direction in the star. At lower polar angles, $M(r, \theta)$ decreases monotonically and, therefore, the Lorenz force increases throughout the star which leads to an expansion both of the inner and the outer layers of the star. \begin{figure} \includegraphics[height=8cm, angle=-90]{magpotential_angle-eps-converted-to.pdf} \caption{Magnetic potential $M$ as a function of the coordinate radius in different directions inside the stars. The corresponding stars are depicted in Fig.~\ref{thickness_angle}.} \label{magpotentical} \end{figure} We show in Fig.~\ref{thickness_angle_core} the size of the core for the same stars as those shown in Fig.~\ref{thickness_angle} and Fig.~\ref{magpotentical}. The results in Fig.~\ref{thickness_angle_core} indicate that highly magnetized stars expands their cores in all directions $\theta$. Note that the curves have a inflection point at $\theta=\pi/4$, which corresponds to the angle where the Lorenz force reverses direction inside these stars. Differently from the crust, which the increase or the decrease of the thickness depends on the polar angle $\theta$, the core always increases in size. This is due to the fact that the Lorentz force always acts outwards in the stellar core. \begin{figure} \includegraphics[height=8cm, angle=-90]{thickness_angle_core-eps-converted-to.pdf} \caption{Core thickness for the same stars as shown in Fig.~\ref{thickness_angle}.} \label{thickness_angle_core} \end{figure} Fig.~\ref{eqm} depicts the physical quantities corresponding to the equation of motion in Eq.~\eqref{equationofmotion} as a function of the circular equatorial radius $R_{circ}$ for a star with $M_{B}=2.00\,M_{\odot}$. $R_{circ}$ is defined as $R_{circ}=\lambda(r_{eq},\pi/2)\,r_{eq}$, with $\lambda$ being the metric potential in Eq.~\eqref{line} and $r_{eq}$ the coordinate equatorial radius. A detailed discussion about the coordinate system used in this work can be found in \cite{Bonazzola:1993zz}. The upper plot in Fig.~\ref{eqm} represents a spherical and non-magnetized stellar solution. The central plot shows the quantities from Eq.~\eqref{equationofmotion}, i.e., $C, \nu, M$ and $H$, but taking into account magnetic fields. This is the same star as depicted in Fig.~\ref{thickness_angle} and Fig.~\ref{thickness_angle_core}, respectively. In the bottom plot we highlight the magnetic potential $M(r, \theta)$ and show the radii where the Lorentz force acts inwards and outwards inside the star. In all cases, the vertical lines represent the core-crust transition point and the stellar surface. As one can see, the star becomes bigger due to magnetic fields. However, the size of the crust decreases in the equatorial plane (see also Fig.~\ref{thickness_angle}). For non-magnetized stars, the magnetic potential is $M(r,\theta)=0$. In addition, from Eq.~\ref{equationofmotion}, one has $H(r,\theta)+\nu(r,\theta) = const = C_{0}$. The constant $C_{0}$ can be calculated in every point in the star \citep{cardall2001effects}. Since the input to construct the stellar models are given at the stellar center, we choose $C_{0}=H(0,0)+\nu(0,0)$. \begin{figure} \begin{center} \includegraphics[height=8.8cm, angle=-90]{eqm_mb2_j0-eps-converted-to.pdf} \quad \includegraphics[height=8.8cm, angle=-90]{eqm_mb2_j48000-eps-converted-to.pdf} \includegraphics[height=8.8cm, angle=-90]{eqm_magpotential-eps-converted-to.pdf} \caption{Physical quantities presented in the equation of motion Eq.~\eqref{equationofmotion} as a function of the circular equatorial radius $R_{circ}$ for a star with $M_{B}=2.00\,M_{\odot}$. The upper plot represents a spherical and non-magnetized solution and the middle figure shows the same star, but highly magnetized. The central magnetic field is $1.3\times 10^{18}$ G. We highlight the direction of the Lorentz force in the star in the lower panel. The vertical lines are the crust line and the stellar surface. } \label{eqm} \end{center} \end{figure} In the upper plot in Fig.~\ref{eqm}, the surface of the star is found when the enthalpy goes to zero $H(r,\pi/2)=0$ and, therefore, from the equation of motion Eq.~\eqref{equationofmotion}, one gets $\nu=C_{0}$. In the magnetized case (center plot), we see that the enthalpy reduces throughout the star and it reaches zero on the surface when $\nu + M = C_{0}$. A similar analysis as those described above was already performed before for neutron stars in \cite{cardall2001effects} and for magnetized white dwarfs in \cite{Franzon:2015sya}. The bottom plot in Fig.~\ref{eqm} shows the magnetic potential and the position and the direction (red arrows) of the Lorentz force inside the star. From the stellar center to the reversion point (black point), the Lorentz force acts outwards and, therefore, this part of the star expands. From the reversion to the crust-core radius (full black vertical line), the Lorentz force points inwards. This also holds true for the region between the crust-core transition and the stellar surface (dashed black vertical line). As a result, the Lorentz force points always inwards in the crust region and, therefore, the crust reduces its size. As a net effect, the star becomes bigger in the equatorial plane due to the increase of the core region. \section{Thermal Relaxation of Magnetized Neutron Stars} The study and observation of the thermal evolution of neutron stars has been established as an important tool for probing the inner composition and structure of compact stars \citep{Page2004,Page2006,Page2010,Negreiros2010a,Negreiros2012a,Negreiros2013b}. Many efforts have been dedicated towards a better comprehension of the thermal processes that may take place inside of neutron stars as well as the macroscopic structure effects that could affect the thermal evolution of such objects \citep{Negreiros2012,Negreiros2013b}. Most thermal evolution calculations are performed under the assumption of spherical symmetry and static structure composition, although efforts have been taken towards a self-consistent description of axis-symmetric neutron stars \citep{Aguilera2008,Negreiros2012} and objects with a dynamic structure evolution \citep{Negreiros2013b}. The work in \cite{Negreiros2012} shows that the thermal evolution of axis-symmetric neutron stars may be substantially different from that of spherically symmetric objects. Even though in the aforementioned paper the breaking of spherical symmetry is brought on due to rotation, it is reasonable to expect that a similar effect occurs if the spherical symmetry is broken due to the magnetic field as long as the resulting system also has an axis-symmetric structure. A particularly interesting result, discussed in \cite{Negreiros2012}, is the modification of the core-crust coupling time in axis-symmetric neutron stars. The core-crust coupling time is given by the duration it takes for the core and the crust of neutron stars to become isothermal. Due to the difference in composition between the core (comprised of hadrons and leptons, and possibly of deconfined quark matter \citep{Negreiros2012a}) and the crust (mostly heavy-ions in a crystalline structure and unbound neutrons in the inner crust) these two regions of the star have very distinct thermal properties, with substantially different neutrino emissivities, thermal conductivity and specific heat \citep{Page2006}. Due to such differences ordinarily the crust acts as a blanket, keeping the star's surface warm while the core cools down due to stronger neutrino emission. Eventually the cold front, originated in the core arrives at the crust, cooling it off as it moves to the surface. At this moment a sudden drop in the stellar surface temperature is expected. Such drop signals the moment in which the neutron star interior (core and crust) is thermalized. The magnitude of the temperature drop will depend on whether or not fast cooling processes (mainly the Direct Urca process \citep{Page2004}) take place inside the neutron stars, as well as how pervasive superfluidity/superconductivity is in the core. The presence of fast cooling processes would lead to a deeper and sharper surface temperature drop, whereas the absence of fast processes (slow cooling) affects a smoother drop in surface temperature. The core-crust coupling time, also referred to as the cooling relaxation time, has been studied extensively in \cite{Gnedin2008}, in which the authors have found that the relaxation time, $t_w$, may be written as \begin{equation} t_w = \alpha t_1, \label{eq:tw} \end{equation} where $t_1$ is a characteristic time that depends solely on crustal microscopic properties such as thermal conductivity and heat capacity. It is also sensitive to neutron pairing, which may be present in the crust. It is important to notice, as pointed out in \cite{Gnedin2008}, the constant $t_1$ is almost independent of the neutron star model. This is reasonable since, regardless of the uncertainties with respect to the high density EoS, the composition of the crust is fairly known and understood. The constant $\alpha$ depends on stellar macroscopic properties and is given by \begin{equation} \alpha = \left(\frac{\Delta R_C}{1\textrm{km}}\right)^2 \left( 1 - \frac{r_g}{R}\right)^{-3/2} \label{eq:alpha} \end{equation} with $\Delta R_C$ being the crust thickness, and $r_g = 2G M/c^2$ is the gravitational radius, with $M$ being the gravitational mass. In \cite{Gnedin2008} it is found that the neutron star relaxation time ($t_w$) scales with the size of the crust according to eq.~(\ref{eq:tw}), more or less quickly, depending on how strong are the superfluid effects. Furthermore, it was also shown that the same conclusions hold for fast or slow cooling. Given the results put forth in \cite{Gnedin2008} and in \cite{Negreiros2012}, in addition to the results we show in this work regarding the crust properties of magnetized neutron stars, it is only natural to consider how the magnetic field, and the changes it brings about, would affect the relaxation time. For this reason, we follow the study of \cite{Gnedin2008} using the crust properties of magnetized neutron stars, as discussed in the sections above. It is important to notice that the study presented here only establishes an upper limit for the thermal relaxation time for magnetized neutron stars. The reason for this is that, whereas the results obtained in \cite{Gnedin2008} were for spherical symmetric stars, this is not the case for magnetized neutron stars, that have an axis-symmetric structure. In any the case, the change in the crust thickness should allow us to make a reasonably good estimate of the relaxation time of such objects. In order to estimate how the modification of the crust properties will affect the the relaxation time, we consider in Fig.~\ref{AV_thick} the average crust thickness, $\Delta R = \sum \Delta r (\theta)/N_{\theta}$, as a function of the stellar magnetic field. In other words, the average $\Delta R$ is calculated for each value of the magnetic field, where $\Delta r (\theta)$ is the angular-dependent ($0\leq \theta \leq 2\pi$) crust thickness and $N_{\theta}$ the number of points in $\theta$. \begin{figure} \includegraphics[height=8cm, angle=-90]{average_DR-eps-converted-to.pdf} \caption{Average crust thickness for different values of surface magnetic fields.} \label{AV_thick} \end{figure} As Fig.~\ref{AV_thick} shows, the crust becomes, on average, thinner for moderately high magnetic fields and thicker for the larger values of $B_s$. This is different than what happens in (also axially symmetric) rotating neutron star, whose crust always gets thicker with the increase of the rotational frequency \citep{Negreiros2012}. We believe that the reason for such difference is connected to the Lorentz force induced by the current distribution inside the star. For the magnetic fields studied in this paper the Lorentz force is attractive in the crust (same direction as the gravitational force), being stronger in the equatorial direction. This means that the crust will tend to become thinner on average for higher magnetic fields. On the other hand, one must note that the electromagnetic field in a general relativistic scenario has a dual role: it generates an electromagnetic force (in this case in the form of the Lorentz force, as just discussed) and with the electromagnetic energy it contributes to the curvature of space-time (see \cite{Negreiros2009b,PicancoNegreiros2010a} and references therein for a more detailed discussion). Therefore, there are two competing effects: one tending to thin the crust and another causing it to be thicker (both on average). For moderately high magnetic field the former one is stronger, and the crust becomes thinner (on average), whereas the latter is dominant for very high magnetic field, causing the crust to become thicker (also on average). Evidently, the star with 1.40 solar mass, which has a lower gravity (i.e. curvature) is more susceptible to magnetic field effects and thus has a more pronounced effect, as illustrated in Fig.~\ref{AV_thick}. Following the steps of \cite{Gnedin2008} and using the average crust thickness, we now estimate the upper limit for the relaxation time of magnetized neutron stars. For that, as in \cite{Gnedin2008}, we consider three situations, identified by three different values for the normalization constant $t_1$, namely: $t_1 = 28.8$ years (case 1), associated with absence of superfluidity in the crust; $t_1=11.1$ years, associated with weak superfluidity in the crust (case 2); and $t_1 = 8.2$ years for the case of strong crustal superfluidity (case 3). The results are shown in Figs.~(\ref{RelT14}) and (\ref{RelT20}) for the 1.4 and 2.0 solar mass stars, respectively. \begin{figure} \includegraphics[height=8cm, angle=-90]{REAL_TIME_ALL_14-eps-converted-to.pdf} \caption{Relaxation time for the three cases studied of the 1.40 solar mass star.} \label{RelT14} \end{figure} \begin{figure} \includegraphics[height=8cm, angle=-90]{relaxation_time_M0_20-eps-converted-to.pdf} \caption{Relaxation time for the three cases studied of the 2.0 solar mass star.} \label{RelT20} \end{figure} As expected due to the linear dependence on crust thickness, the relaxation time initially decreases as a function of the magnetic field and increases for higher values of $B$. Once again, the 1.40 solar mass star is more susceptible to the effects of the magnetic field. If one wants to eliminate the uncertainties associated with the normalized time ($t_1$, which is connected to the extent and intensity of superfluidity in the crust) one can evaluate $\tau_w/\tau_0$, where $\tau_0$ is the relaxation time for the case with $B_s = 0$. This result is shown in Fig.~\ref{norm_tau}. \begin{figure} \includegraphics[height=8cm, angle=-90]{normalized_REL_TIME-eps-converted-to.pdf} \caption{Relaxation time over the relaxation time at zero magnetic field as a function of surface magnetic fields for star at 1.4 and 2.0 solar masses.} \label{norm_tau} \end{figure} \section{Conclusion} In this work, we studied strong poloidal magnetic fields effects on the global structure of the crust in stationary highly magnetized neutron stars. We self-consistently account for the Lorentz force, with current density bounded within the star, by solving the coupled equilibrium equations for magnetic and gravitational fields consistently, taking into account the stellar deformation due to anisotropies induced by magnetic fields. We have employed typical and well-known equations of state to describe the inner and the outer crust of neutron stars. We found that that the size of the crust change according to its angular-dependent distribution inside the star. As already discussed in \cite{cardall2001effects} and later on in \cite{Franzon:2015gda} in the context of magnetized white dwarfs, the magnetic force is zero along the symmetry axis and its direction depends on the current distribution inside the star. Moreover, the magnetic field changes its direction and, therefore, the Lorenz force reverses the direction inside the star. In our case, this can be seen from the change in behaviour of the magnetic potential $M(r, \theta)$ around $\rm{R_{circ}} \sim$ 10 km in Fig.~\ref{eqm}. In this work, we have taken steps to estimate how the magnetic field, and the consequent modification of the crust and space-time of the neutron star may affect the thermal evolution of neutron stars. In addition to the expected effects that a magnetic field may have on the microscopic composition, we have shown that the change in crust geometry may be very relevant to the overall cooling of neutron stars. Using the crust average thickness as a parameter, we estimated the upper limit for the thermal evolution relaxation time, which is the time scale for the core-crust thermal coupling. We have found that the crust thickness (on average), as a function of the quantity responsible for the breaking of spherical symmetry (the magnetic field in this case) gets smaller before growing. This is substantially different from other axially symmetric neutron stars, such as rotating objects for instance. In the latter case the crust gets always thicker as function of the spherical symmetry breaking quantity (rotation/angular momentum in that case). We conclude that the reason for such behavior lies in the dual role of electromagnetic field in a general relativistic scenario, whose energy contribute to curvature in addition to the electromagnetic traditional interaction. We have found that for the stars studied, there are two competing effects, one is the Lorentz's force that tends to make the crust thinner, whereas the gravitational contribution of the magnetic field tends to make the crust thicker. For moderately high magnetic fields the former wins and the crust gets thinner on average, whereas for extreme values of $B$ the latter is dominant, making the crust thicker, overall. This behavior is reflected on our estimates of the core-crust coupling time, which, as a function of the surface magnetic field, gets initially smaller, and increases for higher values of $B$. Such result is interesting, since one would be inclined to believe that the relaxation time would increase monotonically with $B$. One also must note that the overall geometry of the star becomes more oblate with the increase of $B$, so such assumption would be reasonable. However, due to the Lorentz's force acting on the crust, its size is reduced for moderate values of $B$. Our results represent magnetostatic equilibrium conditions. The stability of these equilibria are beyond the scope of this initial discussion on the possible observable through the crust geometry in highly magnetized stars. Note that, purely poloidal or purely toroidal magnetic field configurations undergo intrinsic instabilities related to their geometries \citep{markey1973adiabatic, tayler1973adiabatic, wright1973pinch, flowers1977evolution, lander2012there, braithwaite2006stability, ciolfi2013twisted, lasky2011hydromagnetic, marchant2011revisiting, mitchell2015instability}. In this context, several calculations have also shown that stable equilibrium configurations are obtained for magnetic fields composed of both a poloidal and a toroidal component \citep{armaza2015magnetic, prendergast1956equilibrium, braithwaite2004fossil, braithwaite2006stable, akgun2013stability}. In addition, we obtained surface magnetic fields values above those observed so far in neutron stars. Nevertheless, according to the Virial theorem, the magnetic fields reached at the center of neutron stars are expected to be so high as the magnetic field values found in this work. Although we have restricted to purely poloidal magnetic field, which is not the most general case, we have shown, in a fully general relativity way, that strong magnetic field affects significantly the crust geometry and its size. As a result, the thermal properties of these objects as the cooling relaxation time is affected correspondingly. Evidently our calculations should be seen as an upper limit for the relaxation time, since full thermal evolution calculations such as in \cite{Negreiros2012} would be necessary. In any case, the interesting behavior of the crust geometry warrants further investigation and shows that the thermal behavior of magnetized neutron stars may not be straight forward. Studies in which the magnetic field changes over time may lead to even more interesting and unexpected behavior. Current efforts are being made towards the investigation of different current distributions (which may lead to the Lorentz's force having a different effect) as well as full 2D thermal evolution calculation of magnetized neutron stars. \section{Acknowledgements} B. Franzon acknowledges support from CNPq/Brazil, DAAD and HGS-HIRe for FAIR. R. Negreiros acknowledges the financial support from CAPES and CNPQ. S. Schramm acknowl- edges support from the HIC for FAIR LOEWE program. The authors wish to acknowledge the 'NewCompStar' COST Action MP1304.
1,116,691,501,365
arxiv
\section{Introduction} Ground based gravitational detectors, the Advanced LIGO (aLIGO), the Einstein Telescope and the Large-scale cryogenic gravitational wave detector (KAGRA) also the space missions LISA and LAGRANGE will detect gravitational waves (GW) from black hole binaries. For astrophysical black hole binaries there is no preferred mass ratio $\nu=m_{2}/m_{1}$, and for supermassive black hole binaries typically $\nu \in \left[ 0.3 , 0.03 \right]$\cite{spinflip1}. In this regime, at the end of the inspiral, the larger spin dominates over the orbital angular momentum, represented by the smallness of the parameter $\xi =\varepsilon ^{-1/2}\nu $. Here $\varepsilon =Gm/c^{2}r\approx v^{2}/c^{2}$ is the post-Newtonian (PN) parameter (with the orbital separation $r$, the orbital velocity $\ v$ of the reduced mass particle $\mu =m_{1}m_{2}/m$, the gravitational constant $G$, the speed of light $c$). Hence the corresponding spin-dominated waveforms (SDW) are simpler. The time spent by the GW as SDW in the sensitivity ranges of aLIGO, Einstein Telescope, LISA and LAGRANGE were presented in Ref.~\refcite{SDW}, while for eLISA in Ref.~\refcite{prague}. In this paper the respective time for the KAGRA detector will be also given. \section{Spin-dominated waveforms} For spinning black hole binaries, the ratio of the smaller and larger spin magnitude is of order $S_{2}/S_{1} = \nu^2\chi_{2}/\chi_{1}$\cite{spinflip1} (where $\chi_{i}$ represent the dimensionless larger spin, taken here close to the maximally allowed limit 1). Thus to first order in $\nu$, the smaller spin can be neglected. The ratio of the orbital angular momentum $L_{N}$ and $S_{1}$ is $L_{N}/S_{1} \approx \varepsilon ^{-1/2}\nu \chi^{-1}_{1}$. \cite{spinflip1} Generic waveforms to 1.5 PN order\cite{kidder,ABFO}, and 2 PN order\cite{BFH} accuracy were previously known. We employed a second taylor expansion in the parameter $\xi$. The obtained SDW has the following structure\cite{SDW}: \begin{eqnarray} h_{_{\times }^{+}} &=&\frac{2G^{2}m^{2}\varepsilon ^{1/2}\xi }{c^{4}Dr}\left[ h_{_{\times }^{+}}^{0}+\beta _{1}h_{_{\times }^{+}}^{0\beta }+\varepsilon ^{1/2}\left( h_{_{\times }^{+}}^{0.5}+\beta _{1}h_{_{\times }^{+}}^{0.5\beta }-2\xi h_{_{\times }^{+}}^{0}\right) +\varepsilon \left( h_{_{\times }^{+}}^{1}-4\xi h_{_{\times }^{+}}^{0.5}\right. \right. \notag \\ &&\left. +\beta _{1}h_{_{\times }^{+}}^{1\beta }+h_{_{\times }^{+}}^{1SO}+\beta _{1}h_{_{\times }^{+}}^{1\beta SO}\right) \left. +\varepsilon ^{3/2}\left( h_{_{\times }^{+}}^{1.5}+h_{_{\times }^{+}}^{1.5SO}+h_{_{\times }^{+}}^{1.5tail}\right) \right] ~, \end{eqnarray} D being the luminosity distance of the source. The $\xi$ and $\varepsilon$ orders of the contributions to $h_{_{\times }^{+}}$ are given in Table \ref{table1}. \begin{table} \tbl{SDW contributions of different $\xi$ and $\varepsilon$ orders. The SO terms contain the dominant spin.} {\begin{tabular}{@{}ccccc@{}} \toprule & $\varepsilon ^{0}$ & $\varepsilon ^{1/2}$ & $\varepsilon ^{1}$ & \varepsilon ^{3/2}$ \\ \colrule $\xi ^{0}$ & $h_{_{\times }^{+}}^{0}$ & $h_{_{\times }^{+}}^{0.5}$ & h_{_{\times }^{+}}^{1},h_{_{\times }^{+}}^{1SO}$ & $h_{_{\times }^{+}}^{1.5},h_{_{\times }^{+}}^{1.5SO},h_{_{\times }^{+}}^{1.5tail}$ \\ $\xi ^{1}$ & $h_{_{\times }^{+}}^{0\beta }$ & $h_{_{\times }^{+}}^{0.5\beta } $ & $h_{_{\times }^{+}}^{1\beta },h_{_{\times }^{+}}^{1\beta SO}$ & \\ \botrule \end{tabular} } \label{table1} \end{table} The phase of the gravitational waveform in the double expansion becomes:\cite{SDW} \begin{eqnarray} \phi _{c}-\phi &=&\frac{\varepsilon ^{-3}}{32\xi }\left\{ 1+2\varepsilon ^{1/2}\xi +\frac{1195}{1008}\varepsilon +\left( \allowbreak -10\pi +\frac{3925}{504}\xi +\frac{175}{8}\chi_{1}\cos \kappa _{1}\right) \varepsilon ^{3/2} \right. \notag \\ && \left. +\left[ -\frac{21\,440\,675}{1016\,064} +\chi _{1}^{2}\left( \frac{375}{16}-\allowbreak \frac{3425}{96} \sin ^{2}\kappa _{1}\right) \right] \varepsilon ^{2}\allowbreak \right\} ~. \end{eqnarray} Here $\phi _{c}$ is the phase at the coalescence, and $\kappa_{1}$ is the angle span by $S_{1}$ and $L_{N}$. \section{Limits of Validity} We impose $\xi \leq 0.1$, equivalent to a lower limit $\varepsilon _{1}=Gm/c^{2}r_{1}=100\nu ^{2}$ for the PN parameter. The end of the inspiral gives an upper limit for the PN parameter, chosen here as $\varepsilon _{2}=0.1$\cite{LEVIN}. This leads to an upper limit for the mass ratio $\nu_{\max }=0.0316\approx 1:32$. The time during which the binary evolves from $\varepsilon _{1}$ to $\varepsilon _{2}$ is\cite{SDW} \begin{equation} \Delta t=\frac{5Gm}{2^{8}c^{3}}\frac{(1+\nu )^{2}}{\nu }\left( \varepsilon _{1}^{-4}-\varepsilon _{2}^{-4}\right) ~. \end{equation} For a given SDW, $\varepsilon_{1}$ can be lower than the value of the PN parameter at the lower sensitivity bound $\varepsilon_{f_{\min }}$, hence the time spent in the best sensitivity range of the detector by an SDW is calculated from $\max \left( \varepsilon_{1} , \varepsilon_{f_{\min }}\right) $ to $\varepsilon _{2}$. We represented on Fig \ref{fig1} this time interval for the KAGRA detector, as function of $m$ and $\nu$. The lower sensitivity bound of the KAGRA detector is $f_{min}=10 Hz$\cite{KAGRA}, same as for aLIGO \cite{aLIGO}. The corresponding figure for aLIGO is Fig 1 of Ref.~\refcite{SDW}. Although the figures are similar, the detectors exhibit different shapes of the spectral noise density as function of frequency, leading to different signal-to-noise ratios for a waveform. \begin{figure}[t] \begin{center} \psfig{file=sdwido_kagra_v6.eps,width=4.5in} \end{center} \caption{The time during which SDWs are detectable by KAGRA is represented as function of the total mass $m$ and mass ratio $\nu$. The color code is logarithmic.} \label{fig1} \end{figure} We derived the upper limit of the total mass $m = 202~$M$_{\odot }$ from the lower frequency bound of the KAGRA. By assuming the smaller compact object has at least the mass of a neutron star ($1.4~$M$_{\odot }$) we found a total mass dependent lower limit $\nu_{\min}$, represented by the lower cutoff on Fig \ref{fig1}. \section{Concluding Remarks} For mass ratios smaller than $\nu_{\max}=1:32$ the larger spin $S_{1}$ dominates over the orbital angular momentum $L_{N}$ at the end of the inspiral. This is expressed by a second small parameter $\xi$ (beside the PN parameter $\varepsilon$). Expanding the PN waveforms in terms of $\xi$ (to first order, this also leads to the neglection of the secondary spin) leads to the spin-dominated waveforms. These waveforms are considerably shorter than the generic waveforms. The corresponding smaller parameter space will turn advantageous in gravitational wave detection. \bibliographystyle{ws-procs975x65}
1,116,691,501,366
arxiv
\section{Introduction} \label{sec:intro} After the observation of a trapped Bose-Einstein condensate (BEC) in $^{87}$Rb and $^{23}$Na alkali metal atoms at ultra-low temperature in a laboratory \cite{expt}, rotating trapped condensates hosting quantized vortices \cite{vortex} and large vortex lattices \cite{vorlat} were created, for small and large angular frequencies of rotation, respectively, under controlled conditions and studied experimentally. As suggested by Onsager \cite{onsager}, Feynman \cite{feynman} and Abrikosov \cite{abrikosov} these vortices have quantized circulation as in liquid He II \cite{fetter} \begin{equation} \label{cir} \frac{\widetilde m}{2\pi\hbar }\oint_{\cal C} {\bf v}.d{\bf r}= \pm l, \end{equation} where ${\bf v}({\bf r}, t)$ is the super-fluid velocity field at a space point ${\bf r}$ and time $t$, $\cal C$ is a generic closed path, $ l$ is the quantized angular momentum of an atom in units of $\hbar $ in the rotating BEC and $\widetilde m$ is the mass of an atom. For notational simplicity, the circulation (\ref{cir}) is scaled by a factor of $2\pi\hbar/\widetilde m$, so that its absolute value is equal to angular momentum $l$: a positive (negative) circulation corresponds to a vortex (anti-vortex). If $l\ne 0$, there are topological defects inside the closed path $\cal C$, which manifests in the form of a quantized vortex line \cite{fetter}. Quantized vortices of unit angular momentum were first observed in a uniform super-fluid He II in a rotating bucket \cite{heii}. Vorticity is the curl of the velocity field $\nabla _{\bf r} \times {\bf v}({\bf r}, t)$ and determines the direction of the angular momentum vector. London gave a qualitative explanation of quantization of circulation in He II \cite{london}. In addition, if we assume that the dynamics of the super-fluid is governed by a complex scalar field $\phi({\bf r}, t)\equiv | \psi({\bf r},t)|\exp [i\delta ({\bf r}, t) ]$ with ${\bf v}({\bf r}, t) = \nabla_{\bf r} \delta( {\bf r}, t)$, then $\psi({\bf r},t)$ is known to satisfy the mean-field Gross-Pitaevskii (GP) equation \cite{fetter} which has been used successfully to study the formation of vortex and vortex lattice \cite{vor-lat} in a BEC. A spinor BEC of $^{23}$Na atoms \cite{exptspinor} with hyper-fine spin $F=1$ has also been observed and, more recently, it has been possible to introduce an artificial synthetic SO coupling by Raman lasers that coherently couple the spin-component states in a spinor BEC \cite{thso}. Two common SO couplings are due to Rashba \cite{SOras} and Dresselhaus \cite{SOdre}. An equal mixture of these SO couplings has been realized in pseudo spin-1/2 $^{87}$Rb \cite{exptso} and $^{23}$Na \cite{exptsona} BECs containing only two spin components $F_z = 0, -1$ of total spin $F = 1$. Later, an equal mixture of Rashba and Dresselhaus SO couplings has also been created in a spin-1 ferromagnetic $^{87}$Rb BEC, containing all three spin components $F_z=\pm 1,0$ \cite{bosespin1}. Spinor BECs can show a rich variety of topological excitation \cite{thspinor,topo2,kita,kita2} not possible in a scalar BEC. The predicted Mermin-Ho \cite{mh} and Anderson-Toulouse \cite{at} vortices in $^3$He with a non-singular angular momentum structure, although not observed in $^3$He, might appear in a spinor BEC. It was later demonstrated \cite{kita} that, in a trapped slowly rotating ferromagnetic spinor BEC, the Mermin-Ho and Anderson-Toulouse vortices are thermodynamically stable. Such stable vortices appear in a ferromagnetic spinor BEC in the form of a state of type $(0,+1,+2)$ \cite{kita,kita2}, where the numbers in the parenthesis denote the circulation (angular momentum) of vortices in components $F_z=+1,0,-1$, respectively. For certain values of magnetization, $(+1,+1,+1)$ and $(+1,0,-1)$-type states are also demonstrated to appear \cite{kita} in a ferromagnetic spinor BEC. The $(+1,0,-1)$-type state hosts a vortex (anti-vortex of negative vorticity) in the component $F_z=+1$ ($F_z=-1$), {whereas the component $F_z=0$ remains vortex free}. However, such states in an anti-ferromagnetic BEC were not found to be stable \cite{kita}. The analogue of these states in a pseudo spin-1/2 system is the $(0,+1)$-type half-quantum state \cite{yy,yy2,yy1}. { In bosonic spin systems, SO-coupling leads to a variety of novel phenomena that are not possible in spinor BECs without SO coupling \cite{xyz}, for example \cite{rash}, the stripe phase \cite{stripe,st2}, the Rashba pairing bound states (Rashbons) \cite{rashbon}, spin Hall effect \cite{hall}, spintronics \cite{spint}, as well as the super-fluidity and Mott-insulator phases of SO-coupled quantum gases in optical lattice \cite{AB}.} A three-component SO-coupled spin-1 BEC is known to exhibit a rich variety of physical phenomena not possible in a two-component pseudo spin-1/2 BEC \cite{thspinor,stripe,prop}. A spin-1 spinor BEC is controlled by two interaction strengths, e.g., $c_0 \propto (a_0 + 2a_2)/3$ and $c_2 \propto (a_2 - a_0)/3,$ with $a_0$ and $a_2$ the scattering lengths in total spin 0 and 2 channels, respectively, and appears in two distinct phases: ferromagnetic ($c_2<0$) and anti-ferromagnetic or polar ($c_2>0$). In view of this, we investigate in this paper the formation of vortex lattice in a rapidly rotating Rashba SO-coupled ferromagnetic and anti-ferromagnetic trapped spin-1 spinor BEC, to see the effect of the above topological excitation in the generated vortex lattice, if any. Previously, the formation of vortex and vortex lattice in a Rashba SO-coupled trapped pseudo spin-1/2 spinor BEC was studied numerically \cite{yy,yy2} and analytically \cite{yy1}. Different ways of realizing a rotating SO-coupled spinor BEC have been suggested \cite{gai}. {We find that for a weakly Rashba SO-coupled quasi-two-dimensional (quasi-2D) non-rotating ferromagnetic spin-1 spinor BEC, the { lowest-energy circularly-symmetric state is a} $(0,+1,+2)$-type state \cite{cpc}. For a weakly Rashba SO-coupled quasi-2D non-rotating anti-ferromagnetic spinor BEC, the { lowest-energy circularly-symmetric} state is of the type $(-1,0,+1)$ \cite{cpc}. } In a rotating quasi-2D scalar BEC in the $x-y$ plane, the generated vortex-lattice structure is the same for vorticity of rotation along $z$ or $-z$ axis. However, the generated vortex lattice for vorticity of rotation along $z$ or $-z$ axis will be different for {a Rashba} SO-coupled spin-1 spinor BEC because of the above symmetry-breaking $(0,+1,+2)$ and $(-1,0,+1)$-type states. When subject to rotation, both ferromagnetic and anti-ferromagnetic spinor BECs form vortex lattices with a hexagonal or an {{\it approximate} ``square'' symmetry. Although, the square symmetry is often distorted, from a study of energies, distinct from a rotating scalar BEC, it was found that the lattice structure with square symmetry has the smaller energy. In case of a scalar BEC the vortex lattice with hexagonal symmetry has the smallest energy.} In the case of a ferromagnetic {Rashba} SO-coupled spin-1 spinor BEC, for rotation with vorticity along $z$ direction, the hexagonal or square vortex-lattice structure is built around the $(0,+1,+2)$-type state at the center: the central site of the three components $F_z=+1,0,-1$, respectively, host vortices of circulation $0,+1,+2$. For rotation with vorticity along $-z$ direction, an anti-vortex lattice is generated in both ferromagnetic and anti-ferromagnetic spinor BEC around a complex anti-vortex structure at the center. In Sec. \ref{II} we present the mean-field GP equation for a rotating quasi-2D SO-coupled spin-1 spinor condensate in the rotating frame. In Sec. \ref{III} we present {the numerical details for the solution of the GP equation as well as the} numerical results obtained from its solution for weak SO coupling using the split-time-step Crank-Nicolson discretization scheme. For rotation with angular momentum along $z$ and $-z$ directions, the generated vortex- and anti-vortex-lattice structures with hexagonal and square symmetries were studied for ferromagnetic and anti-ferromagnetic spinor BECs. Finally, in Sec. \ref{IV} we present a summary of our study. \section{The Gross-Pitaevskii equation for a rotating spin-1 condensate} \label{II} {We will consider a Rashba SO-coupled BEC with coupling between the spin and momentum given by $\gamma ( \Sigma_x p_y - \Sigma_y p_x)$ \cite{exptso}, where $\gamma$ is the strength of SO coupling, $p_x$ and $p_y$ are the $x$ and $y$ components of the momentum operator and $\Sigma_x$ and $\Sigma_y$ are the irreducible representations of the $x$ and $y$ components of the spin matrix, respectively, \begin{eqnarray} \Sigma_x=\frac{1}{\sqrt 2} \begin{pmatrix} 0 & 1 & 0 \\ 1 & 0 & 1\\ 0 & 1 & 0 \end{pmatrix}, \quad \Sigma_y=\frac{i}{\sqrt 2 } \begin{pmatrix} 0 & -1 & 0 \\ 1 & 0 & -1\\ 0 & 1 & 0 \end{pmatrix}. \end{eqnarray} } For the study of vortex-lattice formation in a rotating SO-coupled quasi-2D spin-1 spinor BEC, we consider a harmonic trap $V({\bf r})= \widetilde m\omega^2(x^2+y^2)/2 + \widetilde m\omega_z^2 z^2/2$ with tighter binding in the $z$ direction $(\omega_z\gg \omega)$, where $\omega_z$ is the angular frequency of the trap in the $z$ direction and $\omega$ that in the $x-y$ plane. The single-particle Hamiltonian of the condensate without atomic interaction and with Rashba \cite{SOras} SO coupling in this quasi-2D trap, in dimensionless variables, is \cite{exptso,zhai} \begin{equation} H_0 = -\frac{1}{2}\nabla^2_{\bf r}+\frac{x^2+y^2}{2}+\frac{\omega_z^2z^2}{2\omega^2} + \gamma( \Sigma_x p_y- \Sigma_y p_x ), \label{sph} \end{equation} where ${\bf r}=\{x,y,z\}$, { $\nabla^2_{\bf r}=-(p_x^2+p_y^2+p_z^2),$} $p_x = -i\partial_x, p_y = -i\partial_y$ and $p_z = -i\partial_z$ are the momentum operators along $x,y$ and $z$ axes, respectively, $\partial_x, \partial_y, \partial_z$ are partial space derivatives. All quantities in (\ref{sph}) and in the following are dimensionless; this is achieved by expressing length ($x,y,z$) in units of harmonic oscillator length $l_0\equiv \sqrt{\hbar/\widetilde m \omega}$, and energy in units of $\hbar \omega$. The formation of a vortex lattice in a rapidly rotating spinor BEC can be conveniently studied in the rotating frame, where the generated vortex-lattice state is a stationary one, that can be obtained by the imaginary-time propagation method \cite{fetter}. Such a dynamical equation in the rotating frame can be written if we note that the Hamiltonian in the rotating frame is given by $H =H_0 - \Omega_0 L_z$ \cite{landau}, where $H_0$ is the laboratory frame Hamiltonian, $\Omega_0$ is the angular frequency of rotation around the $z$ axis, and $L_z\equiv (xp_y-yp_x) = i(y\partial _x - x\partial _y)$ is the $z$ component of the angular momentum. In a trapped condensate, for rotation around $z$ axis, ordered vortex-lattice formation is possible for $\Omega_0<\omega$ \cite{fetter}. {As $\Omega_0$ is increased above $\omega$, the whole super-fluid moves away from the center towards the boundary because of an excess of centrifugal force, and the super-fluidity of the condensate breaks down \cite{fetter}. This was verified in our numerical calculation.} For tight harmonic binding along $z$ direction, assuming a Gaussian density distribution in the $z$ direction, after integrating out the $z$ coordinate following the procedure of \cite{quasi12d}, in the mean-field approximation, a quasi-2D rotating SO-coupled spin-1 spinor BEC is described by the following set of three coupled GP equations for $N$ atoms in dimensionless form for the hyper-fine spin components $F_z = \pm 1, 0$ \cite{thspinor,thspinorb,GA} \begin{align}\label{EQ1} i \partial_t & \psi_{\pm 1}({\boldsymbol \rho})= \left[{\cal H}+{c_2} \left(n_{\pm 1} -n_{\mp 1} +n_0\right) -\Omega L_z \right] \psi_{\pm 1}({\boldsymbol \rho})\nonumber \\ +&\left\{c_2 \psi_0^2({\boldsymbol \rho})\psi_{\mp 1}^*({\boldsymbol \rho})\right\} -i {\widetilde \gamma} (\partial_y\psi_{0} ({\boldsymbol \rho})\pm i \partial_x\psi_{0}({\boldsymbol \rho}) ) \, , \\ \label{EQ2} i \partial_t & \psi_0({\boldsymbol \rho})=\left[ {\cal H}+{c_2} \left(n_{+ 1}+n_{- 1}\right) -\Omega L_z\right] \psi_{0}({\boldsymbol \rho}) \nonumber \\ +&\left \{ {2} c_2 \psi_{+1}({\boldsymbol \rho})\psi_{-1}({\boldsymbol \rho})\psi_{0}^* ({\boldsymbol \rho})\right\} -i{\widetilde \gamma} [-i \partial_x \{\psi_{+1} ({\boldsymbol \rho}) \nonumber \\ -&\psi_{-1}({\boldsymbol \rho})\} + \partial_y \{\psi_{+1} ({\boldsymbol \rho}) + \psi_{-1}({\boldsymbol \rho})\}] \, , \\ {\cal H}=&-\frac{1}{2}\nabla^2_{\boldsymbol \rho}+V({\boldsymbol \rho})+c_0 n, \\ c_0 =& \frac{2N\sqrt{2\pi\kappa}(a_0+{2}a_2)}{3}, \quad c_2 = \frac{2N\sqrt{2\pi\kappa}(a_2-a_0)}{3}, \label{EQ4} \end{align} where ${\boldsymbol \rho}\equiv \{x,y \}$, $\nabla_{\boldsymbol \rho}^2\equiv (\partial_x^2+\partial_y^2)$, $\kappa=\omega_z/\omega$ $(\gg 1)$, $\Omega=\Omega_0/\omega$ $ (<1)$, $\widetilde \gamma = \gamma/\sqrt 2 $, $n_j = |\psi_j|^2$, $j= \pm 1 , 0$, are the component densities, in units of $l_0^{-2}$, of hyper-fine spin components $F_z=\pm 1,0$ and $n ({\boldsymbol \rho})= \sum_j n_j({\boldsymbol \rho})$ is the total density, $V({\boldsymbol \rho})\equiv (x^2+y^2)/2$ is the circularly symmetric confining trap in the $x-y$ plane, $\partial_t $ is the partial time derivative with time in units of $\omega^{-1}$, $a_0$ and $a_2$ are the s-wave scattering lengths, in units of $l_0$, in the total spin 0 and 2 channels, respectively, and the asterisk denotes complex conjugate. The maximum allowed value of angular frequency in (\ref{EQ1})-(\ref{EQ2}) for the formation of an ordered super-fluid vortex lattice is $|\Omega|=1$ \cite{fetter}. For notational compactness, the time dependence of the wave functions is not explicitly shown in ~(\ref{EQ1}) and (\ref{EQ2}). The normalization condition is $ \int n({\boldsymbol \rho})\, d{\boldsymbol \rho}=1. $ Equations (\ref{EQ1})-(\ref{EQ2}) can be derived from the energy functional \cite{thspinor,fetter} \begin{align}\label{energy} E[\psi(\Omega)] &= \frac{1}{2} \int d{\boldsymbol \rho} \Big\{ \sum_j |\nabla_{\boldsymbol \rho}\psi_j|^ 2+ 2 Vn+c_0n^2\nonumber \\ &+ c_2\big[n_{+1}^2 +n_{-1}^2 +2(n_{+1}n_0+n_{-1}n_0-n_{+1}n_{-1}\nonumber \\ & +\psi_{-1}^*\psi_0^2\psi_{+1}^* + \psi_{-1}\psi_0^{*2}\psi_{+1}) \big] -2i\widetilde \gamma\big[ \psi_0^* \partial_y(\psi_{+1}\nonumber \\ &+\psi_{-1} ) + (\psi_{+1}^*+\psi_{-1}^* ) \partial_y \psi_0 -i \psi_0^* \partial_x(\psi_{+1}-\psi_{-1} )\nonumber \\ &+i (\psi_{+1}^*-\psi_{-1}^* )\partial_x \psi_0 \big] - 2 \Omega \sum_j \psi_j^* L_z \psi_j \Big\}, \end{align} where the space dependence of different variables is not explicitly shown. The $\Omega$ dependence of the wave function is shown to recall that the energy functional is a function of the angular frequency of rotation. \section{Numerical Results} \label{III} To solve (\ref{EQ1}) and (\ref{EQ2}) numerically, we propagate these equations in time by the split-time-step Crank-Nicolson discretization scheme \cite{cpc,bec2009,bec2012,bec2017x} using a space step of 0.1 and a time step $\Delta$ of 0.001 to obtain the stationary state by imaginary-time simulation. There are different C and FORTRAN programs for solving the GP equation \cite{bec2009,bec2012} and one should use the appropriate one. These programs have recently been adapted to simulate the vortex lattice in a rapidly rotating BEC \cite{vor-lat} and we use these in this study. The imaginary-time propagation is started with an appropriate initial state consistent with symmetry for quick convergence. { In the ferromagnetic and anti-ferromagnetic phases, the circularly-symmetric ground states of the Rashba SO-coupled spin-1 BEC are of types $(0,+1,+2)$ and $(-1,0,+1)$ \cite{cpc}, respectively, with vortices in components. In numerical simulation of vortex lattice in a rotating SO-coupled spin-1 BEC we will include these vortices in the initial state. } For a final localized state without vorticity, a Gaussian initial state in each component is adequate: $\psi _j(\boldsymbol \rho) \sim \exp (-\rho^2/\alpha_j^2)$, where $\alpha_j$ is the width. However, for a $(0,+1,+2)$-type state with vorticity, we will take the initial functions $\ \psi _{+1}(\boldsymbol \rho) \sim \exp (-\rho^2/\alpha_j^2)$, $\ \psi _{0}(\boldsymbol \rho) \sim (x+iy)\exp (-\rho^2/\alpha_j^2)$, $\ \psi _{-1}(\boldsymbol \rho) \sim (x+iy)^2 \exp (-\rho^2/\alpha_j^2)$, Similarly, for a $(-1,0,+1)$-type state, we take the initial functions $\ \psi _{+1}(\boldsymbol \rho) \sim (x-iy) \exp (-\rho^2/\alpha_j^2)$, $\ \psi _{0}(\boldsymbol \rho) \sim \exp (-\rho^2/\alpha_j^2)$, $\ \psi _{-1}(\boldsymbol \rho) \sim (x+iy) \exp (-\rho^2/\alpha_j^2)$. With these initial states, with proper vorticity, the convergence of the imaginary-time propagation is quick. The parameters of the GP equation $c_0$ and $c_2$ are taken from the following realistic experimental situations. For the quasi-2D ferromagnetic BEC we use the following parameters of $^{87}$Rb atoms: $N=100,000, a_0=101.8a_B, a_2=100.4a_B, $ \cite{a02rb} $l_{z}\equiv l_0/\sqrt \kappa=2.0157$ $\mu$m, where $a_B$ is the Bohr radius. Consequently, $c_0\equiv 2N\sqrt{2\pi}(a_0+2a_2)/3l_{z} \approx 1327$ and $c_2\equiv 2N\sqrt{2\pi}(a_2-a_0)/3l_{z} \approx -6.15$. For the quasi-2D anti-ferromagnetic BEC we use the following parameters of $^{23}$Na atoms: $N=100,000, a_0=50.00a_B, a_2=55.01a_B$, \cite{baox} $l_{z}=2.9369$ $\mu$m. Consequently, $c_0 \approx 482$ and $c_2 \approx 15$. { The $^{87}$Rb and $^{23}$Na atoms naturally appear in ferromagnetic ($c_2<0$) and anti-ferromagnetic ($c_2>0$) phases, respectively. Hence, to simulate a ferromagnetic (anti-ferromagnetic) BEC we use the parameters of $^{87}$Rb ($^{23}$Na). Using a Feshbach resonance it is possible to change the sign of $c_2$, thus turning, for example, a ferromagnetic $^{87}$Rb BEC to an anti-ferromagnetic BEC. However, we will not consider this possibility in this paper.} \subsection{Classification of states and symmetries} { We will consider Rashba SO-coupled ferromagnetic and anti-ferromagnetic spinor BECs for weak SO coupling ($\gamma \lessapprox 0.75$) and an initial circularly-symmetric solution for the quasi-2D non-rotating spinor BEC. For large values of $\gamma$, the non-rotating SO-coupled spinor BEC has stripe (and other) pattern in density \cite{stripe,st2}, breaking circular symmetry. The rotation of such a state should lead to a complex vortex-lattice structure and will not be considered in this paper.} The formation of vortex lattice in a scalar BEC, without intrinsic vorticity in the absence of rotation, is different from that in an SO-coupled spin-1 spinor BEC with states of type $(0,+1,+2)$ or $(-1,0,+1)$ with intrinsic vorticity \cite{kita}. For a {weakly} SO-coupled spin-1 ferromagnetic spinor BEC of type $(0,+1,+2)$, rotating with the angular momentum vector parallel to the vorticity direction, a vortex lattice with hexagonal symmetry can be generated maintaining the states of circulation $+1$ and $+2$ at the center in components $j=0$ and $-1$, respectively, while the center of the $j=+1$ component is maintained vortex free. However, in this case, for rotation with the angular momentum vector anti-parallel to the vorticity direction, for small angular frequency of rotation, the state of type $(0,+1,+2)$ becomes one of type $(-2,-1,0)$. The $(-2,-1,0)$-type state results upon a superposition of the $(0,+1,+2)$-type state with a $(-2,-2,-2)$-type state: the latter is generated by rotation. { This is a two-step process. First a $(-1,0,+1)$-type state is formed upon the superposition of a $(-1,-1,-1)$-type state, generated by rotation, with the $(0,+1,+2)$-type state. Later another $(-1,-1,-1)$-type state superposed on the $(-1,0,+1)$-type state yields the $(-2,-1,0)$-type state. } With the increase of angular frequency of rotation, an anti-vortex lattice with hexagonal symmetry can be generated maintaining these central anti-vortices of circulation $-2$ and $-1$ in components $j=+1,$ and $0$, respectively. The anti-vortex lattice is really a vortex lattice with opposite vorticity. For a {weakly} SO-coupled spin-1 anti-ferromagnetic spinor BEC of type $(-1,0,+1)$, rotating with the angular momentum vector parallel to the vorticity $z$ direction, for small angular frequency of rotation, the state $(-1,0,+1)$ transforms into the state $(0,+1,+2)$. The $(0,+1,+2)$-type state results upon a superposition of the $(-1,0,+1)$-type state {with a $(+1,+1,+1)$-type state:} the latter is generated by rotation. In this case, upon the increase of angular frequency of rotation, a vortex lattice with hexagonal symmetry can be generated maintaining the central vortices of circulation $+1$ and $+2$ in components $j=0$ and $-1$, respectively. However, for a spin-1 SO-coupled spinor BEC of type $(-1,0,+1)$, rotating with the angular momentum vector anti-parallel to the vorticity direction, for small angular frequency of rotation, the state $(-1,0,+1)$ transforms into the state $(-2,-1,0)$. With the increase of angular frequency of rotation, an anti-vortex lattice with hexagonal symmetry can again be generated maintaining these central anti-vortices of circulation $-2$ and $-1$ in the respective components. In the case of a rotating SO-coupled spin-1 spinor condensate, we find that there are many different vortex-lattice states with different symmetry properties lying close to each other. Hence it is often difficult to find the vortex-lattice state with minimum energy by imaginary-time propagation and it is possible that, in some cases, the imaginary-time approach converges to a nearby excited vortex-lattice state, instead of the lowest-energy state for certain initial states. To circumvent this problem, we repeated the calculation with different initial states, so as to be sure that the converged vortex-lattice state is indeed the lowest-energy state. The use of an analytic initial function modulated by a random phase at different space points also increases the possibility of the convergence to the minimum-energy state \cite{vor-lat}. Unlike in a rotating scalar BEC, where the vortex lattice in the lowest-energy state always has a hexagonal symmetry, in the case of a rotating SO-coupled spin-1 spinor BEC, vortex- and anti-vortex-lattice states with an approximate square symmetry can also appear, in addition to those with the usual hexagonal symmetry. For hexagonal symmetry, vortices are arranged in concentric orbits containing a maximum of 6, 12, 18 ... vortices; for square symmetry these numbers are 8, 12, 16 ... Often, for the same angular frequency of rotation, it is possible to obtain both types of vortex-lattice states with all orbits containing the maximum number of vortices. When this happens, the vortex- and anti-vortex-lattice states for both ferromagnetic and anti-ferromagnetic SO-coupled spin-1 { BECs with square symmetry are found to have the smaller numerical energy, although we could not establish this fact theoretically. } The general scenario of vortex-lattice states remain unchanged for different numerical values of the non-linearity parameters $c_0$ and $c_2$. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1f.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1g.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1h.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1i.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1j.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1k.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig1l.png} \caption{(Color online) Contour plot of component densities $n_j(\boldsymbol \rho)\equiv | \psi _j(\boldsymbol \rho)|^2$ of vortex-lattice states with hexagonal symmetry of a rotating Rashba SO-coupled ferromagnetic spin-1 quasi-2D spinor BEC for angular frequencies $\Omega =0, 0.4, 0.65,$ and 0.835 in plots (a)-(c), (d)-(f), (g)-(i), and (j)-(l), respectively. The angular momentum of rotation is parallel to the vorticity direction of the non-rotating state in (a)-(c). The non-linearity parameters $c_0=1327, c_2=-6.15$, and SO-coupling strength $\gamma =0.5$. In all density plots of this paper, energy values, viz. (\ref{energy}), are displayed in the density of the $j=+1$ component. All results reported in this paper are in dimensionless units, as outlined in Section~\ref{II}.} \label{fig1} \end{figure} \begin{figure}[!t] \centering \includegraphics[width=.32\linewidth]{fig2a.png} \includegraphics[width=.32\linewidth]{fig2b.png} \includegraphics[width=.32\linewidth]{fig2c.png} \includegraphics[width=.32\linewidth]{fig2d.png} \includegraphics[width=.32\linewidth]{fig2e.png} \includegraphics[width=.32\linewidth]{fig2f.png} \includegraphics[width=.32\linewidth]{fig2g.png} \includegraphics[width=.32\linewidth]{fig2h.png} \includegraphics[width=.32\linewidth]{fig2i.png} \includegraphics[width=.32\linewidth]{fig2j.png} \includegraphics[width=.32\linewidth]{fig2k.png} \includegraphics[width=.32\linewidth]{fig2l.png} \caption{(Color online) (a)-(c) Contour plot of the phase $\delta({\boldsymbol \rho})$ of the wave function of the non-rotating Rashba SO-coupled ferromagnetic spin-1 quasi-2D spinor BEC of figures \ref{fig1}(a)-(c). (d)-(f) The same of the rotating ferromagnetic spinor BEC, with angular frequency $\Omega=0.4$, of figures \ref{fig1}(d)-(f). (g)-(i) The same of the rotating ferromagnetic spinor BEC, with angular frequency $\Omega=-0.05$, of figures \ref{fig4}(a)-(c). (j)-(l) The same of the rotating ferromagnetic spinor BEC, { with angular frequency $\Omega=-0.39$,} of figures \ref{fig5}(a)-(c). } \label{fig2} \end{figure} \subsection{Ferromagnetic condensate} We study the formation of vortex-lattice states with hexagonal symmetry in a rotating Rashba SO-coupled quasi-2D $^{87}$Rb ferromagnetic spin-1 BEC with SO-coupling strength $\gamma=0.5$, and non-linearities $c_0=1327$ and $c_2=-6.15$ for different angular frequency $\Omega$ through a plot of contour density {$n_j({\boldsymbol \rho})=|\psi_{j} ({\boldsymbol \rho})|^2$} of different components. In figures \ref{fig1}(a)-(c) we plot the densities of components $j=+1,0,-1$ of the non-rotating { lowest-energy circularly-symmetric} state of type $(0,+1,+2)$. We checked the vorticity and circulation of the components analyzing the phase plot of the wave function displayed in figures \ref{fig2}(a)-(c). The phase drop upon a clockwise rotation of $2\pi$ in figure \ref{fig2}(b) (c) is $2\pi$ ($4\pi$) indicating a circulation of $+1$ ($+2$). The $j=-1$ component with circulation $+2$ has a larger vortex core than the $j=0$ component with circulation $+1$. In figures \ref{fig1}(d)-(l) we display the vortex lattice with hexagonal symmetry for increasing angular frequencies $\Omega=0.4, 0.65,$ and $0.835$. { The vortex structure of the BEC with $\Omega =0.4$ can be found from the phase plot of the corresponding wave function in figures \ref{fig2}(d)-(f) for components $j=+1,0,-1.$} The direction of generated angular momentum upon rotation is parallel to the intrinsic vorticity of the non-rotating state ($z$ direction). For all angular frequencies, a clean vortex lattice is generated as in the case of a scalar BEC. The only difference from a scalar BEC is that the central spot is vortex free for component $j=+1$ and hosts a vortex of circulation $+2$ in component $j=-1$. In the component $j=0$, the central spot has a vortex of circulation $+1$ as in a scalar BEC. A vortex of circulation $+2$ (greater than unity) in a scalar BEC should break into two of unit circulation from an energetic consideration \cite{fetter}. However, in this SO-coupled spin-1 spinor BEC, the vortex of circulation $+2$ at the center of component $j=-1$ is found to be energetically stable. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3f.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3g.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3h.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig3i.png} \caption{(Color online) The same as in figure \ref{fig1} with approximate square symmetry for angular frequencies $\Omega = 0.4, 0.65, $ and 0.835 in (a)-(c), (d)-(f), and (g)-(i), respectively. The parameters of the ferromagnetic BEC are the same as in figure \ref{fig1}.} \label{fig3} \end{figure} \begin{table}[!b] \caption{(Color online) Energy of the different vortex-lattice and anti-vortex-lattice states of hexagonal and approximate square symmetry for a ferromagnetic and anti-ferromagnetic (polar) BEC and the corresponding minimum energy state. In all cases the lattice has all concentric orbits with the maximum number of vortices.} \label{tab1} \begin{tabular}{ll|lll } \hline \hline & $\Omega$ & $E$ & $E$ & minimum \\ & & (hexagonal) & (square) & energy state \\ \hline & $+0.4$ & 12.262 & 12.207 & square \\ & $+0.65$ & 9.853 & 9.820& square \\ ferro & $+0.835$ & 6.599 & 6.583 & square \\ & $-0.39$ & 12.315& 12.264& square \\ & $-0.62$ &10.203 & 10.192 & square\\ & $-0.82$ & 6.921 & 6.915 & square\\ \hline & $+0.55$ & 6.751 & 6.621 & square \\ & $+0.795$ & 4.539 & 4.512 & square \\ polar& $-0.55$& 6.751 & 6.715 & square \\ & $-0.79$& 4.598 & 4.575 & square \\ \hline \end{tabular} \end{table} For the same sets of parameters as in figure \ref{fig1}, the vortex-lattice states with an approximate square symmetry are shown in figures \ref{fig3}(a)-(i) for angular frequencies $\Omega =0.4, 0.65, $ and 0.835, { where vortices are arranged in approximate concentric square orbits with 8, 12, and 16 vortices.} The central spot in components $j=0$ and $-1$ has a vortex of circulation $+1$ and $+2$, respectively, whereas the same in component $j=+1$ is vortex free. In table \ref{tab1} we display the respective energies of the vortex-lattice states of figures \ref{fig1} and \ref{fig3} and find that the states of figure \ref{fig3} have smaller energies as compared to the respective states in figure \ref{fig1}. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4f.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4g.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4h.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4i.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4j.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4k.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig4l.png} \caption{(Color online) The same as in figure \ref{fig1} for angular frequencies $\Omega =-0.05, -0.39, -0.62,$ and $-0.82$ in plots (a)-(c), (d)-(f), (g)-(i), and (j)-(l), respectively. The angular momentum of rotation is anti-parallel to the vorticity direction of the non-rotating state in figures \ref{fig1}(a)-(c). The parameters of the ferromagnetic BEC are the same as in figure \ref{fig1}. } \label{fig4} \end{figure} \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5f.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5g.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5h.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig5i.png} \caption{(Color online) The same as in figure \ref{fig4} with approximate square symmetry for angular frequencies $\Omega = -0.39, -0.62, $ and $-0.82$ in (a)-(c), (d)-(f), and (g)-(i), respectively. The angular momentum of rotation is anti-parallel to the vorticity direction of the non-rotating state in figures \ref{fig1}(a)-(c). The parameters of the ferromagnetic BEC are the same as in figure \ref{fig4}.} \label{fig5} \end{figure} Next we consider the formation of vortex lattice in a quasi-2D Rashba SO-coupled ferromagnetic spin-1 spinor BEC upon rotation with angular momentum opposite to the intrinsic vorticity of the non-rotating BEC, denoted by negative values of angular frequency $\Omega$ in (\ref{EQ1}) and (\ref{EQ2}). The contour plots of generated vortex lattices with hexagonal symmetry for angular frequencies $\Omega = -0.05, -0.39, -0.62$ and $-0.82$ are shown in figures \ref{fig4}(a)-(c), (d)-(f), (g)-(i), (j)-(l), respectively. In all plots of figure \ref{fig4} the circulation of rotation has opposite sign compared to the same in figures \ref{fig1} and \ref{fig3}. Hence these vortices are anti-vortices. Upon rotation of the state of type $(0,+1,+2)$ of figures \ref{fig1}(a)-(c) with angular frequency $\Omega=-0.05$, with opposite vorticity, a state of type $(-2,-1,0)$ is generated as shown in figures \ref{fig4}(a)-(c). The opposite vorticity of the vortices in figures \ref{fig4}(a)-(c), as compared to those in figures \ref{fig1}(a)-(c), was confirmed from a plot of the corresponding phase in figures \ref{fig2}(g)-(i). The phase drop upon a clockwise rotation of $2\pi$ in figures \ref{fig2}(g) (h) is $-4\pi$ ($-2\pi$) indicating an anti-vortex of circulation $-2$ ($-1$), viz. compare with vortices in figures \ref{fig2}(b)-(c). As angular frequency $|\Omega|$ is increased, an anti-vortex lattice with hexagonal symmetry is generated in the three components maintaining the anti-vortices $(-2,-1,0)$ at the center, viz. figures \ref{fig4}(d)-(f), \ref{fig4}(g)-(i), and \ref{fig4}(j)-(l), for $\Omega =-0.39, -0.62,$ and $-0.82$, respectively. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6f.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6g.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6h.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig6i.png} \caption{(Color online) Contour plot of component densities $n_j(\boldsymbol \rho)\equiv | \psi _j(\boldsymbol \rho)|^2$ of vortex-lattice states of a rotating Rashba SO-coupled anti-ferromagnetic spin-1 quasi-2D spinor BEC with hexagonal symmetry for angular frequencies $\Omega =0, 0.55,$ and $ 0.795,$ in plots (a)-(c), (d)-(f), and (g)-(i), respectively. The angular momentum of rotation is parallel to the vorticity direction of the vortex in component $j=-1$ in (c). The non-linearity parameters $c_0=482, c_2=15$, and SO-coupling strength $\gamma =0.5$. } \label{fig6} \end{figure} Apart from the anti-vortex lattice with hexagonal symmetry of figure \ref{fig4}, one can also have the same with approximate square symmetry for the same angular frequencies $\Omega= -0.39, -0.62$ and $-0.82$ as shown in figures \ref{fig5}(a)-(c), (d)-(f) and (g)-(i), respectively. The central part in figure \ref{fig5} has 4 and 3 anti-vortices of unit circulation in components $j=+1$ and 0, and an anti-vortex of circulation $-2$ in component $j=-1$. The net circulation of the anti-vortex at the center of component $j =-1$ was obtained from a consideration of the phase plot of the wave function for $\Omega= -0.39$ as displayed in figures \ref{fig2}(j)-(l). The phase drop upon a clockwise rotation of $2\pi$ in figure \ref{fig2}(l) is $-4\pi$ indicating a circulation of $-2$ in component $j=-1$. The other anti-vortices of unit circulation in figures \ref{fig5}(a)-(c) can be identified in the phase plots in figures \ref{fig2}(j)-(l). This central part is a superposition of the non-rotating state $(0,+1,+2)$ and the state $(-4,-4,-4)$ generated by rotation. The energies of the anti-vortex-lattice states with hexagonal and square symmetries of figure \ref{fig4} and \ref{fig5} are shown in table \ref{tab1} for angular frequencies $\Omega = -0.39, -0.62$ and $-0.82$. \begin{figure}[!t] \centering \includegraphics[width=.32\linewidth]{fig7a.png} \includegraphics[width=.32\linewidth]{fig7b.png} \includegraphics[width=.32\linewidth]{fig7c.png} \includegraphics[width=.32\linewidth]{fig7d.png} \includegraphics[width=.32\linewidth]{fig7e.png} \includegraphics[width=.32\linewidth]{fig7f.png} \includegraphics[width=.32\linewidth]{fig7g.png} \includegraphics[width=.32\linewidth]{fig7h.png} \includegraphics[width=.32\linewidth]{fig7i.png} \caption{(Color online) (a)-(c) Contour plot of the phase $\delta({\boldsymbol \rho})$ of the wave function of the rotating anti-ferromagnetic spinor BEC, with angular frequency $\Omega=0.55$, of figures \ref{fig6}(d)-(f). (d)-(f) The same of the rotating anti-ferromagnetic spinor BEC, with angular frequency $\Omega=0.55$, of figures \ref{fig8}(a)-(c). (g)-(i) The same of the rotating anti-ferromagnetic spinor BEC, with angular frequency $\Omega=-0.55$, of figures \ref{fig10}(a)-(c). } \label{fig7} \end{figure} \subsection{Anti-ferromagnetic condensate} The vortex-lattice formation with hexagonal symmetry in an anti-ferromagnetic rotating Rashba SO-coupled quasi-2D $^{23}$Na spin-1 BEC with SO-coupling strength $\gamma=0.5$, and non-linearities $c_0=482$ and $c_2 =15$ is demonstrated in figure \ref{fig6}. The non-rotating state ($\Omega=0$), in this case, is of the $(-1,0,+1)$ type as shown in figures \ref{fig6}(a)-(c) through a contour plot of densities. The vortex and anti-vortex nature of the two states is confirmed from the corresponding phase plot of the wave function (not shown here). Upon rotation, the $(-1,0,+1)$-type state, with the appearance of a vortex of circulation $+1$ in all components, transforms into a state of the $(0,+1,+2)$ type. These vortices of circulation $+1$ and $+2$ at the center of components $j=0$ and $-1$ are maintained in the vortex lattice with hexagonal symmetry of a rapidly rotating anti-ferromagnetic quasi-2D spin-1 spinor BEC as in the case of a rapidly rotating ferromagnetic spin-1 spinor BEC considered in figure \ref{fig1}. We display the formation of vortex lattice with hexagonal symmetry in the anti-ferromagnetic spinor BEC for angular frequencies $\Omega = 0.55,$ and $ 0.795$ in figures \ref{fig6}(d)-(f), (g)-(i), respectively. We checked the vorticity and circulation of the components analyzing the phase plot of the wave function of the BEC displayed in figures \ref{fig6}(d)-(f), viz. Figs. \ref{fig7}(a)-(c). The phase drop upon a clockwise rotation of $2\pi$ in figure \ref{fig7}(b) (c) is $2\pi$ ($4\pi$) indicating circulation $+1 (+2)$ at the center. The $j =-1$ component with circulation $+2$ has a larger vortex core than the $j = 0$ component with circulation $+1$. The hexagonal vortex lattices in figure \ref{fig6} for an anti-ferromagnetic quasi-2D spin-1 spinor BEC are quite similar to the vortex lattices in figure \ref{fig1} for a ferromagnetic spinor BEC. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig8f.png} \caption{(Color online) The same as in figure \ref{fig6} with approximate square symmetry with angular frequencies $\Omega = 0.55 $ and 0.795 in (a)-(c) and (d)-(f), respectively. The parameters of the anti-ferromagnetic BEC are the same as in figure \ref{fig6}.} \label{fig8} \end{figure} Next we consider the formation of vortex lattice with approximate square symmetry in a Rashba SO-coupled anti-ferromagnetic quasi-2D spinor BEC upon rotation. The resultant vortex lattices, in this case, for angular frequencies $\Omega =0.55$ and 0.795 are displayed in figures \ref{fig8}(a)-(c) and (d)-(f), respectively. The central region of the vortex lattice, in this case, is different from that in figure \ref{fig6}. The central region is formed by a superposition of the $(-1,0,+1)$-type state of the non-rotating BEC, viz. figures \ref{fig6}(a)-(c), with a $(+4,+4,+4)$-type state formed by rotation, thus resulting in a state with vortex of circulation +3 (+4, +5) in component $j=+1$ ($j=0, j=-1$). The vortex of circulation +4 (+5) in component $j=0$ ($j=-1$) breaks into 4 (5) vortices of unit circulation, whereas the $j=+1$ component contains a complex vortex structure of circulation +3, as can be seen from a phase plot of the wave function of the condensate displayed in figures \ref{fig8} (a)-(c), viz. figures \ref{fig7}(d)-(f). In the outer region we have vortices arranged in concentric square orbits with 8 and 12 vortices, viz. figures \ref{fig8}(a)-(c) and (d)-(f). Hence, although the vortex lattices with hexagonal symmetry in figures \ref{fig1} and \ref{fig6} are quite similar, those with approximate square symmetry in figures \ref{fig3} and \ref{fig8} for ferromagnetic and anti-ferromagnetic spinor BECs are different in the central region maintaining similarity in the outer region. \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig9f.png} \caption{(Color online) The same as in figure \ref{fig6} for angular frequencies $\Omega =-0.55,$ and $ -0.79,$ in (a)-(c), and (d)-(f), respectively. The angular momentum of rotation is anti-parallel to the vorticity direction of the vortex in component $j=-1$ in figure \ref{fig6} (c). The parameters of the anti-ferromagnetic BEC are the same as in figure \ref{fig6}. } \label{fig9} \end{figure} \begin{figure}[!t] \centering \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10a.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10b.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10c.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10d.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10e.png} \includegraphics[trim = 6mm 0mm 11mm 0mm,clip,width=.32\linewidth]{fig10f.png} \caption{(Color online) The same as in figure \ref{fig6} with approximate square symmetry with angular frequencies $\Omega = -0.55,$ and $ -0.79, $ in (a)-(c), and (d)-(f) respectively. The angular momentum of rotation is anti-parallel to the vorticity direction of the vortex in component $j=-1$ in figure \ref{fig6} (c). The parameters of the anti-ferromagnetic BEC are the same as in figure \ref{fig6}.} \label{fig10} \end{figure} Let us next consider the formation of anti-vortex lattice with hexagonal symmetry in a rotating Rashba SO-coupled anti-ferromagnetic spinor BEC with the angular momentum of rotation opposite to the vorticity direction of the vortex in component $j=-1$ of figure \ref{fig6}(c). The non-rotating state in figures \ref{fig6}(a)-(c) is of the $(-1, 0, +1)$ type. For small angular frequency of rotation, an anti-vortex is generated in all three components in the form of a $(-1,-1,-1)$-type state, which when superposed on the $(-1, 0, +1)$-type state leads to a $(-2,-1,0)$-type state at the center. The generated anti-vortex lattice in this case maintains this scenario in the central region, e.g., one (two) anti-vortex of circulation $-1$ in component $j=0$ ($j=+1$) and none in component $j=-1$, around which the hexagonal vortex lattice is formed. This is illustrated by a plot of contour density of components $j=+1,0,-1$ for angular frequencies $\Omega=-0.55,$ and $ -0.79 $ in figures \ref{fig9}(a)-(c), and (d)-(f), respectively. The generated anti-vortex lattice states in figure \ref{fig9} for the anti-ferromagnetic phase are identical to those figure \ref{fig4} for the ferromagnetic phase. Finally, we consider the formation of anti-vortex lattice with square symmetry in a rotating SO-coupled anti-ferromagnetic spinor BEC with the angular momentum of rotation opposite to the vorticity direction of the vortex in component $j=-1$, viz. figure \ref{fig6}(c). In this case, the generated anti-vortex lattice displayed in figure \ref{fig10} for $\Omega =-0.55$ and $-0.79$ is quite similar to the anti-vortex lattice in the case of a ferromagnetic spinor BEC presented in figure \ref{fig5}. The vortices for $\Omega =-0.55$ can be identified from a phase plot of the wave function in figures \ref{fig7}(g)-(i). The anti-vortex lattice with square symmetry for $ \Omega =-0.55,$ and $-0.79$ is presented in figures \ref{fig10}(a)-(c), and (d)-(f) respectively. In table \ref{tab1} we also display the energies of anti-vortex-lattice states of hexagonal and square symmetries of figures \ref{fig9} and \ref{fig10}. The energies of the hexagonal anti-vortex-lattice {states are larger than} the corresponding states with square symmetry in all cases. \section{Discussion} { We now compare the present results of vortex-lattice formation in a Rashba SO-coupled spin-1 BEC with previous results \cite{yy,yy2,yy1} for vortex-lattice formation in a Rashba SO-coupled pseudo spin-1/2 BEC. A Rashba SO-coupled pseudo spin-1/2 BEC supports a half-quantum vortex state with an unit vortex in one component and zero vortex in the other, which is a $(+1,0)$-type state with intrinsic vorticity. This state should be compared with $(-1,0,+1)$- and $(0,+1,+2)$-type states in the present Rashba SO-coupled spin-1 BEC. The vortex lattice for the pseudo spin-1/2 BEC has a vortex of unit circulation at the center of one component, whereas the center of the other component has no vortex in analogy with vortices of circulation 0, $+1$, and $+2$ at the centers of the three components, viz. figures \ref{fig1}(d)-(f), in the ferromagnetic case. The presence of the (+1,0)-type state with intrinsic vorticity in a pseudo spin-1/2 BEC breaks the symmetry between rotation with vorticity along the $z$ and $-z$ axes and thus might generate different vortex-lattice and anti-vortex-lattice states in a rotating SO-coupled quasi-2D pseudo spin-1/2 BEC for these two types of rotation. However, the previous studies \cite{yy,yy1} did not explore this possibility. The detailed numerical study \cite{yy2} for vortex-lattice formation in the pseudo spin-1/2 case confirmed the formation of lattice with hexagonal symmetry only. In this study on the spin-1 BEC, in addition, we also demonstrate the formation of lattice with square symmetry. } \begin{figure}[!t] \centering \includegraphics[width=.99\linewidth]{fig11.pdf} \caption{(Color online) The rotational energy in the rotating frame $[E(\Omega)-E(0)]$ versus angular frequency of rotation for the ferromagnetic and anti-ferromagnetic BEC. The points are numerically calculated whereas the lines are to guide the eye.} \label{fig11} \end{figure} The rotational energy of a scalar BEC $[E(\Omega)-E(0)]$ in the rotating frame is the energy of rigid-body rotation $\sim -I\Omega^2/2$ where $I$ is the moment of inertia of the condensate. This energy is proportional to the square of the angular frequency \cite{fetter}, where $E(\Omega)$ is given by (\ref{energy}). In the case of spin-1 spinor ferromagnetic and anti-ferromagnetic BECs, a similar relation also holds. We illustrate in figure \ref{fig11} the rotational energy of the vortex- and anti-vortex-lattice states of both ferromagnetic and anti-ferromagnetic BECs with square and hexagonal symmetry as a function of angular frequency $\Omega$, where we plot the energy of the minimum-energy state versus $|\Omega|$. The energies of ferromagnetic and anti-ferromagnetic BECs lie on two distinct lines showing similar qualitative behavior. The energy decreases with increasing angular frequency of rotation as the contribution of the rotational energy $-\Omega L_z$ in the expression for energy (\ref{energy}) is negative for large $|\Omega|$. For small $\Omega$, in the perturbative limit, this contribution is linearly proportional to $\Omega$. Hence as $\Omega \to 0$ ($\Omega \lessapprox 0.1$), the rotational energy for positive (negative) values of $\Omega$ is positive (negative). For $\Omega \gtrapprox 0.1$ rotational energies for both positive and negative $\Omega$ are negative. For $\Omega \gtrapprox 0.2$ these two energies are practically equal and negative. The difference between the energies for $\pm \Omega$ is small and for clarity of the plot, this detail is not displayed in figure \ref{fig11} and an average of the two energies are exhibited for small $|\Omega|$. But as $|\Omega|$ increases, the rotational energy behaves as $\sim -\Omega^2$ \cite{fetter}. \section{Summary} \label{IV} We studied the formation of vortex lattice in a quasi-2D Rashba SO-coupled spin-1 spinor BEC in the $x-y$ plane, under rapid rotation, using the mean-field GP equation in the rotating frame, where the generated vortex-lattice state is a stationary state. In the case of a scalar BEC, the generated vortex lattice for rotation with vorticity along $z$ and $-z$ axes are the same. The { lowest-energy circularly-symmetric} state of a non-rotating ferromagnetic Rashba SO-coupled spin-1 spinor BEC is of the $(0,+1,+2)$ type, whereas the same for an anti-ferromagnetic BEC is of the type $(-1,0,+1)$. The intrinsic vorticity of these two states make the rotation with vorticity along $z$ and $-z$ axes conceptually different for an SO-coupled spin-1 spinor BEC. Consequently, different from a scalar BEC, the generated vortex-lattice structure for a quasi-2D Rashba SO-coupled spin-1 spinor BEC for rotation with vorticity along $z$ and $-z$ axes are different. For rotation with vorticity along $z$ direction, a vortex lattice is formed and for rotation with vorticity along $-z$ direction an anti-vortex lattice is formed. Two types of vortex and anti-vortex lattices were found to be formed predominantly: a hexagonal lattice and an approximate square lattice. For rotation with vorticity along $z$ direction, the hexagonal lattice has vortices arranged in closed concentric orbits which accommodate the following maximum number of vortices: 6, 12, 18 etc., whereas the square lattice has vortices arranged in closed concentric orbits with the maximum numbers 8, 12, 16 etc. We illustrated, for different angular frequencies, the formation of vortex lattices with closed concentric orbits of vortices while all orbits accommodate the allowed maximum number of vortices. The central region in both cases is occupied by a complex structure of vortices, often accommodating vortices of circulation (angular momentum) greater than unity. In case of a scalar BEC, all vortices in a vortex lattice are of unit circulation. For rotation with vorticity along $-z$ axis, similar lattice structure emerges but with anti-vortices replacing vortices, although the central region of the lattice may have a different distribution of vortices from the case of rotation with vorticity along $z$ axis. Such a lattice structure is termed an anti-vortex lattice as opposed to a vortex lattice. If, for a fixed angular frequency of rotation, both a square and a hexagonal vortex or anti-vortex lattice can be formed, with closed concentric orbits, the square vortex-lattice structure is found to possess the smaller energy as shown in table \ref{tab1}. This strongly suggests the square lattice states to be the { lowest-energy} state. For larger values of $\gamma$ (not considered in this paper), we could not find the hexagonal lattice states; only square lattice states were found. The rotational energy of the generated vortex or anti-vortex lattice for both ferromagnetic and anti-ferromagnetic BECs is found to be proportional to the square of angular frequency $\Omega$ as displayed in figure \ref{fig11}, consistent with a theoretical suggestion by Fetter for a scalar BEC \cite{fetter}. \section*{Acknowledgments} \noindent S.K.A. acknowledges support by the CNPq (Brazil) grant 301324/2019-0, and by the ICTP-SAIFR FAPESP (Brazil) grant 2016/01343-7.
1,116,691,501,367
arxiv
\section{Introduction} Let $(M,g)$ be a smooth, connected, compact Riemannian manifold without boundary of dimension $d$. Let $\Delta$ be the Laplace-Beltrami's operator on $M$ for the metric $g$ and let $a$ be a smooth function from $M$ to $\mathscr{H}_n^+(\mathbf C)$, the space of positive-semidefinite hermitian matrices of dimension $n$. We are interested in the following system of equations \begin{equation}\label{dampedwaveequation} \left\lbrace \begin{array}{l} (\partial_t^2 -\Delta +2a(x)\partial_t)u=0 \; \text{ in } \; \mathcal D'(\mathbf R\times M)^n \\ u_{|t=0}=u_0\in H^1(M)^n\; \text{ and } \; \partial_tu_{|t=0}=u_1\in L^2(M)^n\text{.} \end{array} \right. \end{equation} Let $H=H^1(M)^n\oplus L^2(M)^n$ and define on $H$ the unbounded operator \[ A_a=\begin{pmatrix} 0 & \mathrm{Id}_n \\ \Delta & -2a \end{pmatrix} \text{ of domain } D(A_a)=H^2(M)^n\oplus H^1(M)^n\text{.} \] By application of Hille-Yosida's theorem to $A_a$ the system \eqref{dampedwaveequation} has a unique solution in the space $ C^0(\mathbf R,H^1(M)^n)\cap C^1(\mathbf R, L^2(M)^n)$, from now on we will identify $H$ with the space of solutions of \eqref{dampedwaveequation}. The euclidean norm on $\mathbf R^n $ or $\mathbf C^n$ will be written $|\cdot|$ and we will write $\langle\cdot ,\cdot\rangle_{\mathcal H}$ the inner product of an Hilbert space $\mathcal H$ or simply $\langle\cdot,\cdot\rangle$ when there is no possible confusion. Let us define $E(u,t)$, the energy of a solution $u$ at time $t$, by the formula \[ E(u,t)=\frac{1}{2}\int_{M}|\partial_t u(t,x)|^2 + |\nabla u(t,x)|^2 \mathrm{d} x \] where $|\nabla u(t,x)|^2=g_x(\nabla u(t,x),\nabla u(t,x))$. We then have the relation \begin{equation}\label{formuleenergie} E(u,T)=E(u,0)-\int_0^T\int_{M}\big\langle 2a(x)\partial_t u(t,x),\partial_t u(t,x)\big\rangle_{\mathbf C^n}\mathrm{d} x \mathrm{d} t \text{.} \end{equation} The energy is thus a non-increasing function of time. We are interested in the problem of stabilization of the wave equation; that is, determining the long time behavior of the energy. This has been well studied in the scalar setting ($n=1$) but not so much in the vectorial setting ($n>1$). Nevertheless, the stabilization of the vectorial wave equation is an interesting and naturally occurring problem. The aim of this article is to adapt and prove some classical results of scalar stabilization to the vectorial case, we will also highlight the main differences between the two settings. The most basic result about stabilization of the wave equation is probably the following. \begin{theo}\label{stabfaible} The following conditions are equivalent. \begin{description} \item[(i)] $\forall u \in H \; \; \displaystyle \lim_{t\to \infty}E(u,t)=0$ \item[(ii)] The only eigenvalue of $A_a$ on the imaginary axis is $0$. \end{description} Moreover, if $a$ is definite positive at one point (and thus on an open set) then the two conditions above are satisfied. \end{theo} The condition \textbf{(i)} is called weak stabilisation of the damped wave equation. For a succinct proof of this result see the introduction of \cite{leb93}, for a more detailed proof in a simpler setting see Theorem 4.2 of \cite{buge01}. Note that when $n=1$ (\textit{ie} in the scalar case) there is a more satisfactory result stating that the condition \textbf{(i)} is equivalent to $a\neq 0$. \begin{theo}\label{stabforte} The following conditions are equivalent. \textbf{\textup{(i)}} There is weak stabilisation and for every maximal geodesic $s\in \mathbf R \mapsto x_s$ of $M$ we have \[ \bigcap_{s\in \mathbf R} \ker(a(x_s))=\{0\}.\label{GCC}\tag{GCC} \] \textbf{\textup{(ii)}} There exists two constants $C,\beta >0$ such that for all $u\in H$ and for every time $t$ \[ E(u,t)\leq C e^{-\beta t} E(u,0)\text{.} \] \end{theo} The condition on the intersections of the kernels of $a(x_t)$ is called the Geometric Control Condition (GCC) and the condition \textbf{(ii)} is called strong stabilisation of the damped wave equation. For $n=1$ this theorem has been proved in the more general setting of a riemannian manifold with boundary by Bardos, Lebeau, Rauch and Taylor (\cite{rata74} and \cite{blr92}). Note that, when $n=1$, the weak stabilization hypothesis is not needed because it is a consequence of the geometric control condition. However when $n>1$ the geometric condition alone does not imply strong or even weak stabilization as we shall see later, so this hypothesis is necessary. It is still an open problem to find a purely geometric condition equivalent to strong stabilization of the vectorial wave equation. To my knowledge Theorem \ref{stabforte} has not been proved in the existent literature, but it seems that it was already known by people well acquainted with the field. We will get a proof of Theorem \ref{stabforte} as a corollary of Theorem \ref{bigtheoreme}. \begin{definition} We denote the best exponential decay rate of the energy by $\alpha$ defined as follow : \[\alpha=\sup \{\beta \in \mathbf R : \exists C>0 , \forall u\in H, \forall T>0 , E(u,T)\leq C e^{-\beta T}E(u,0)\}\text{.}\] \end{definition} The main result of this article is Theorem \ref{bigtheoreme}, its aim is to express $\alpha$ as the minimum of two quantities. The first quantity depends on the spectrum of $A_a$ and the second one depends on a differential equation described by the values of $a$ along geodesics. However we still need to define a few things before being able to state Theorem \ref{bigtheoreme}. \paragraph*{} It is well known that $\mathrm{sp}(A_a)$, the spectrum of $A_a$, is discrete and solely contains eigenvalues $\lambda_j$ satisfying $\mathfrak{Re}(\lambda_j)\in [-2\sup_{x\in M}\|a(x)\|_2;0]$ and $|\lambda_j|\to \infty$. This comes from the fact that $D(A_a)$ is compactly embedded in $H$ and that, for $\mathfrak{Re }(\lambda)\notin [-2\sup_{x\in M}\|a(x)\|_2;0] $, the operator $(A_a-\lambda \mathrm{Id})$ is bijective from $D(A_a) $ to $H$ and has a continuous inverse. Moreover the spectrum of $A_a$ is invariant by complex conjugation. We will denote by $E_{\lambda_j}$ the generalized eigenvector subspace of $A_a$ associated with $\lambda_j$, this subspace is defined as \[ E_{\lambda_j}=\left\{u\in D(A_a) : \exists k \in \mathbf{N} , (A_a-\lambda_j)^ku=0 \right\} \] and is of finite dimension. We next define the following quantities. \begin{equation} D(R)=\sup \{ \mathfrak{Re}(\lambda_j) : \lambda_j \in \mathrm{sp}(A_a), |\lambda_j| > R \}\text{,} \;\; D_0=\lim_{R\to 0^+} D(R) \; \text{ and } \; D_\infty=\lim_{R\to\infty}D(R)\text{.} \end{equation} These quantities are all non negative and for every $R>0$ we have $D_0\geq D(R)\geq D_\infty$. The quantity $D_0$ is sometime called the spectral abscissa of $A_a$. \paragraph*{}Since $M$ is a Riemannian manifold there is a natural isometry between $T_xM$ and $T^*_xM$ \textit{via} the scalar product $g_x$. The scalar product defined on $T^*_xM$ by this isometry is called $g^x$ and if $\xi\in T_x^*M$ we will write $|\xi|_g$ for $\sqrt{g^x(\xi,\xi)}$. Let us call $S^*M$ the cotangent sphere bundle of $M$, that is, the subset $\{(x,\xi)\in T^*M : |\xi|_g=1/2\}$ of $T^*M$. We call $\phi$ the geodesic flow on $S^*M$ and recall that it corresponds to the Hamiltonian flow generated by $|\xi|_g^2$. In everything that follows $(x_0;\xi_0)$ will denote a point of $S^*M$ and we will write $(x_t,\xi_t)$ for $\phi_t(x_0,\xi_0)$. We now introduce the function $G^+_t : S^* M \to \mathscr M_n(\mathbf C)$ where $t$ is a real number. It is defined as the solution of the differential equation \begin{equation}\label{eq:equationG} \left\lbrace \begin{array}{l} G^+_0(x_0,\xi_0)=\mathrm{Id}_n \\ \partial_t G^+_t(x_0,\xi_0)=-a(x_t)G^+_t(x_0,\xi_0)\text{.} \end{array} \right. \end{equation} We shall see later that $G_t^+$ is a cocycle map, this means that it satisfy the relation $G_{s+t}^+(x,\xi)=G^+_t(\phi_s(x,\xi))G_s^+(x,\xi)$. In the scalar-like case where $a(x)$ is a diagonal matrix everywhere the matrix $G_t^+$ is simply described by the formula \begin{equation}\label{scalarG} G^+_t(x_0,\xi_0)=\exp\left(-\int_0^t a(x_s) \mathrm{d} s \right)\text{.}\end{equation} As we will see, the fact that this formula is no longer true in the general setting is the main reason why new phenomena arise in comparison to the scalar case (see for example Proposition \ref{prop4}). Let us define for every $t>0$ the quantities \begin{equation} C(t)\overset{\mathrm{def}}{=}\frac{-1}{t}\sup_{(x_0,\xi_0)\in S^*M} \ln \left( \|G_t^+(x_0;\xi_0)\|_2 \right) \;\text{ and }\; C_\infty=\lim_{t\to \infty }C(t)\text{,} \end{equation} we will see later that this limit does exist. In the scalar case one also have the simpler formula \begin{equation}\label{eq:formuleGscalaire} C(t)=\frac{1}{t} \inf_{(x_0,\xi_0)\in S^*M} \int_0^t a(x_s)\mathrm{d} s\text{.} \end{equation} There is a similar but more complex formula in the general case. Denote by $y_t$ a vector of $\mathbf C^n$ of euclidean norm $1$ such that \begin{equation} G^+_{t}(x_0,\xi_0)G^+_{t}(x_0,\xi_0)^* y_t= \|G^+_{t}(x_0,\xi_0)\|_2^2y_t \text{.} \end{equation} The vector $y_t$ obviously depends on $(x_0,\xi_0)$ even though it is not explicitly written. We then have for every $t>0$ \begin{equation}\label{eq:formuleCinftyagainagain} C(t)=\frac{1}{t} \inf_{(x_0,\xi_0)\in S^*M} \int_0^t \langle a(x_s)y_s,y_s\rangle\mathrm{d} s \text{.} \end{equation} This formula is a direct consequence of Proposition \ref{prop:formulenormeG} and does not depends on the choice of $y_s$. Since $a$ is Hermitian positive semi-definite it follows from \eqref{eq:formuleCinftyagainagain} that $C(t)\geq 0$ and $C_\infty\geq 0$. Recall also that $D(0)\leq 0$ and we can finally state the main result of this article : \begin{theo}\label{bigtheoreme} The best exponential decay rate is given by the formula \begin{equation} \alpha= 2\min\{-D_0; C_\infty\}\text{,} \end{equation} moreover we have the following properties. \begin{description} \item[(i)]$C_\infty\leq -D_\infty$ \item[(ii)]One can have $-D_0>0$ and $C_\infty=0$. \item[(iii)]One can have $C_\infty>0$ and $D_0=0$, but only if $n>1$. \end{description} \end{theo} This result has already been proved by G. Lebeau (\cite{leb93}) for a $n=1$ on a riemannian manifold \textit{with boundary}. The novelty of this article thus comes from the fact that we are dealing with vectorial waves with a matrix damping term, this leads to the apparition of interesting new phenomena in comparison to the scalar setting (see for example section 4). The proof of Theorem \ref{bigtheoreme} stays close from the one of Lebeau and so it is pretty likely that it would extend to the case where $\partial M \neq \emptyset$ if one would be willing to adapt Corollary \ref{propagationmesure}. Let us also point out a similar result about the asymptotic behavior of the observability constant of the wave equation in Theorem 2 and Corollary 4 of \cite{hpt16}. \begin{remarque} We will show in the proof of Theorem \ref{stabforte} that the geometric control condition is in fact equivalent to $C_\infty >0$. Combining this with point \textbf{(iii)} of Theorem \ref{bigtheoreme} we already see that \eqref{GCC} is not equivalent to strong stabilization when $n>1$. Moreover, using point \textbf{(i)} of theorem \eqref{bigtheoreme}, we see that when $C_\infty>0$ and $D_0=0$ we have \eqref{GCC} but weak stabilization still fails. \end{remarque} \begin{remarque} Proposition \ref{lemmeinegaliteahauteenergie} and Proposition \ref{gaussianbeamenergylocalisation} show that $C_\infty$ is taking account of the energy decay of the high frequency solutions of \eqref{dampedwaveequation}. On the other hand we have $D_0\geq D_\infty$ and $-C_\infty \geq D_\infty$, so if $-D_0<C_\infty$ there exists an eigenfunction $u$ of $A_a$ such that $E(u,t)=e^{-2D_0 t}E(u,0)=e^{-\alpha t}E(u,0)$. This means that $D_0$ is taking account of the energy decay of low frequency solutions of \eqref{dampedwaveequation}. \end{remarque} \paragraph*{High frequency overdamping} A natural question to ask oneself is how does $\alpha$ behaves in function of the damping term $a$. Let us respectively write $\alpha(a)$, $D_0(a)$ and $C_\infty(a)$ for the quantities $\alpha$, $D_0$ and $C_\infty$ associated with a damping term $a$. An interesting fact is that the function $a\mapsto \alpha(a)$ is not monotonous, even in the simplest case. Indeed in \cite{cozu93} S. Cox and E. Zuazua showed that\footnote{Provided that $a$ is of bounded variation.}, in the case of a scalar damped wave equation on a string of length one, the decay rate is given by $\alpha(a)=-2D_0(a)$. They also calculated the spectral abscissa $D_0(a)$ in the case of a constant damping term and found $D_0(a)=-a+\mathfrak{Re}(\sqrt{a^2-\pi^2})$. This shows that increasing the constant damping term above $\pi$ actually reduces $\alpha(a)$, such a phenomenon is called ``overdamping''. Theorem 2 of \cite{leb93} shows that for a scalar damped wave equation on a general manifold the decay rate $\alpha(a)$ is governed by $D_0(a)$ \textit{and} $C_\infty(a)$. However in that case the overdamping can only come from $D_0$ since $a\mapsto C_\infty(a)$ is obviously monotonous, sub-additive and positively homogeneous from \eqref{eq:formuleGscalaire}. In view of the previous remark it makes sens to call this phenomenon ``low frequency overdamping''. On the other hand with the \textit{vectorial} damped wave equation the situation is different. We will show that $a\mapsto C_\infty(a)$ is neither monotonous nor sub-additive or homogeneous and thus an overdamping phenomenon can also come from the $C_\infty$ term. Once again in view of the previous remark we call this phenomenon ``high frequency overdamping''. Bellow, Figure 1 illustrates the non linear behavior of $a\mapsto C_\infty(a)$ in a specific example. To be more precise we will prove the following result. \begin{proposition}\label{prop4} The function $a\mapsto C_\infty(a)$ is neither homogeneous nor monotonous, more precisely it is possible to have $C_\infty(2a)<C_\infty(a)$ or $2C_\infty(a)<C_\infty(2a)$. It is also not additive, $C_\infty(a+b)$ can be strictly greater or smaller than $C_\infty(a)+C_\infty(b)$. \end{proposition} \begin{figure}[ht] \centering \includegraphics[width=1\textwidth]{charta2aHD.pdf} \caption{Plot of the function $\lambda\mapsto C_\infty(\lambda a)$ for two different damping term $a$ on $S^1$. } \label{fig:intro1} \end{figure} However it seems that $C_\infty$ still has some kind of linear behavior. Namely on $M=S^1$ and with a particular kind of damping term (see Section 4) we are able to show that \[ \lim_{\lambda \to \infty} \frac{C_\infty(\lambda a)}{\lambda} \;\text{ and } \; \lim_{\lambda \to 0^+} \frac{C_\infty(\lambda a)}{\lambda} \] both exist and are finite. This result is proved in section \ref{section4} but it remains open to know if this is still true for any damping term on a general manifold $M$. \paragraph*{} The remainder of this article is organized as follow. Section \ref{section2} contains definitions and results about the propagation of the microlocal defect measures associated with a sequence of solutions of \eqref{dampedwaveequation}. These results will play an important role while bounding $\alpha$ from below. The section 3 is devoted to the proof of Theorem \ref{stabforte} and Theorem \ref{bigtheoreme}. Establishing the formula for $\alpha$ is the most difficult part, the lower bound proof makes use of Gaussian beams while for the upper bound we will use the result of section \ref{section2} conjointly with a decomposition in high and low frequencies. Eventually in the last section we study the behavior of $C_\infty$ and prove Proposition \ref{prop4}. \section{Propagation of the microlocal deffect measure}\label{section2} Let us work with the manifold $\mathbf R \times M$ endowed with the product metric induced by the ones of $\mathbf R$ and $M$. We will denote by $(t,\tau,x,\xi)$ the points of $T^*(\mathbf R \times M)$, where $(t,\tau)\in T^*\bf R$ and $(x,\xi)\in T^*M$. Given a point $(x,\xi)\in T^*M$ we will write $|\xi|_g^2=g^x(\xi,\xi)$ the square of the norm of $\xi$. We moreover define $S^*(\mathbf R \times M)$ as the subset of points of $T^*(\mathbf R \times M)$ such that $\tau^2+|\xi|_g^2=1/2$ and recall that $S^*M=\{(x,\xi)\in T^*M : |\xi|^2_g=1/4\}$. We call $\phi$ the geodesic flow on $T^* M$, that is, the Hamiltonian flow generated by $|\xi|_g^2$ and $\varphi$ the Hamiltonian flow on $T^*(\mathbf R\times M)$ generated by $|\xi|_g^2-\tau^2$. In other words \[ \varphi_s(t,\tau,x,\xi)=(t-2s\tau,\tau, \phi_s(x,\xi))\text{.} \] In everything that follows $(x_0,\xi_0)$ will denote a point of $S^*M$ and we will write $(x_t,\xi_t)=\phi_t(x_0,\xi_0)$. \paragraph*{}Throughout this section we call $P$ the differential operator $\partial_t^2-\Delta $, we know that $P$ is self-adjoint on $L^2(\mathbf R \times M)^n$ and has $(|\xi|_g^2-\tau^2)\mathrm{Id}_n=p\cdot\mathrm{Id}_n$ for principal symbol, note that $p$ is a scalar valued function. If $b$ is a smooth function from $T^*(\mathbf R \times M)$ to $\mathscr M_n(\mathbf C)$ we note $\{p,b\}$ the Poisson's bracket of $p$ and $b$, it is defined as the matrix whose coefficients are the usual Poisson's bracket $\{p,b_{ij}\}$. With this definition the basic properties of Poisson's bracket are still true. Namely, we have a Leibniz's rule $\{p,bc\}=\{p,b\}c+b\{p,c\}$ and it is linked to the Hamiltonian flow of $p$ in the usual way, that is $\partial_s( b\circ \varphi_s) (\rho) = \{p,b\}(\varphi_s(\rho))$. Moreover, if $B$ is a pseudo-differential operator of order $m$ and of principal symbol $\sigma_m(B)=b$ then $[P,B]$ is a pseudo-differential operator of order $\leq m+1$ and of principal symbol $-i\{p,b\}$. Note that this is only possible because $p\cdot\mathrm{Id}$ commutes with every matrix of $\mathscr M_n(\mathbf C)$. For more details about pseudo-differential operators see \cite{hor85}. \paragraph*{}We now recall some results about microlocal defect measures. For proofs and more details see the original article of P. Gérard \cite{ger91}. \begin{proposition} Let $(u_n)_n$ be a sequence of functions of $H^m_{\mathrm{loc}}(\mathbf R\times M)$ weakly converging to $0$. Then there exists \begin{description} \item[-]a sub-sequence $(u_{n_k})_k$, \item[-]a positive Radon measure $\nu$ on $S^*(\mathbf R \times M)$, \item[-]a matrix $M$ of $\nu$-integrable functions on $S^*(\mathbf R \times M)$ such that $M$ is Hermitian positive semi-definite $\nu$-a.e. and $\trace(M)=1$ $\nu$-a.e., \end{description} such that, for every compactly supported pseudo-differential operator $B$ with principal symbol $b$ of order $2m$ we have \begin{equation} \label{mdm} \lim_{k\rightarrow +\infty} \left\langle B u_{n_k}| u_{n_k} \right\rangle_{H^{-m},H^m}=\int_{S^*(\mathbf R \times M)}\trace(bM)\mathrm{d}\nu\text{.} \end{equation} \end{proposition} Note that here $b$ is a matrix of dimension $n$ depending on $(t,\tau,x,\xi)$. One crucial property is that $(u_{n_k})_k$ strongly converges to $0$ if and only if $\mu=0$. \begin{definition} In the setting of the previous theorem we will call $\mu=M\nu$ the microlocal defect measure of the sub-sequence $(u_{n_k})_k$ and we will say that $(u_n)_n$ is ``pure'' if it has a microlocal defect measure without preliminary extraction of a sub-sequence. \end{definition} \begin{proposition}\label{supportP} Let $I\subset \mathbf R$ be a compact interval and $(u_n)$ be a pure sequence of $H^1(I\times M )$ weakly converging to $0$ with $M\mathrm{d}\nu$ as microlocal defect measure. Recall that $P=\partial_t^2-\Delta$ and that its principal symbol is $p\cdot\mathrm{Id}$, the following properties are equivalent : \begin{description} \item[(i)] $Pu_n \underset{n \rightarrow \infty}{\rightarrow} 0$ strongly in $H^{-1}(I\times M)$. \item[(ii)] $\nu$ is supported on the set $\{p=0\}$. \end{description} \end{proposition} \begin{proposition} Let $(u_k)_k$ be a bounded sequence of $H^1( I \times M)$ weakly converging to $0$. Assume that $u_k$ is solution of the damped wave equation for every $k$ and let $b$ be a smooth function on $S^*( I \times M)$ to $\mathscr M_n(\mathbf C)$, $1$-homogeneous in the $(\tau,\xi)$ variable. If $(u_k)$ is pure with microlocal defect measure $\mu=M\nu$ then \[ \int_{S^*( I \times M)} \trace \Big[(\{b,p\}-2\tau(ab+ba))M \Big] \mathrm{d} \nu =0 \text{.} \] \end{proposition} \begin{proof} Let $B$ be a pseudo-differential operator of order $1$ and with principal symbol $b$, we then have \[ \lim_{k\to \infty} \left\langle[B,P]u_k,u_k\right\rangle_{H^{-1},H^1}=\int \trace[\sigma_2([B,P])M]\mathrm{d} \nu=\frac{1}{i}\int\trace\left[\{b,p\}M\right]\mathrm{d} \nu\text{,} \] but we moreover know that $\langle [B,P]u_k,u_k\rangle=-2\langle(Ba\partial_t+a\partial_tB)u_k,u_k\rangle$, which tends to \[ -2i\int \trace[\tau(ab+ba)M]\mathrm{d} \nu\text{,} \] thus finishing the proof. \end{proof} In what follows $\mu=M\mathrm{d} \nu$ will denote the microlocal defect measure of a pure sequence $(u_k)_k$ of solutions of the damped wave equation on $\mathbf R\times M$. Here our aim is to give a relation between $\varphi_s^*\mu$ and $\mu$. The measure $\varphi_s^*\mu$ is the push forward of $\mu$ by $\varphi_s$, it is defined by the following property \[ \text{for every }\mu\text{-integrable function } b \text{ we have } \;\int \trace[(b\circ \varphi_s) \mathrm{d}\mu]=\int \trace[b\mathrm{d}\varphi_s^*\mu]\text{.} \] \begin{definition} For every $s\in \mathbf R$ we define the function $G_s:T^*(\mathbf R \times M) \to \mathscr{M}_n(\mathbf C) $ as the solution of the following differential equation. \[ \left\lbrace \begin{array}{l} G_0(t,\tau,x,\xi)=\mathrm{Id}_n \\ \partial_s G_s(t,\tau,x,\xi)=\{p,G_s\}(t,\tau,x,\xi)+2\tau G_s(t,\tau,x,\xi)a(x)\text{.} \end{array} \right. \] \end{definition} The matrix $G_t$ is a cocycle map, that is, it satisfies the relation $G_{s+t}(\rho)=G_t(\varphi_s(\rho))G_s(\rho)$. The proof of this fact is given for $G^+_t$ at the end of the section. \begin{proposition} The propagation of the measure is given by the formula $\varphi_s^*\mu=G_{-s}\mu G_{-s}^*$, more precisely this means that for every continuous function $b$ compactly supported in the $(t,x)$ variable we have \[ \int_{S^*(\mathbf R \times M)} \trace[(b\circ\varphi_s)G_sMG_s^*]\mathrm{d} \nu =\int_{S^*(\mathbf R \times M)} \trace[bM]\mathrm{d} \nu \] or equivalently for every continuous function $c$ compactly supported in the $(t,x)$ variable \[ \int_{S^*(\mathbf R \times M)} \trace[c \,G_{-\sigma}MG_{-\sigma}^*]\mathrm{d} \nu=\int_{S^*(\mathbf R \times M)} \trace[c\circ \varphi_\sigma M]\mathrm{d} \nu\text{.} \] \end{proposition} \begin{proof} In order to show the first equality it suffice to verify that \begin{equation}\label{deriveeintegrale} \partial_s \int \trace[(b\circ\varphi_s)G_sMG_s^*]\mathrm{d} \nu =0\text{.} \end{equation} We know that we can differentiate under the integral sign, \[ \partial_s\int \trace\big[(b\circ\varphi_s)G_sMG_s^*\big]\mathrm{d} \nu =\int \trace\Big[\partial_s\big((b\circ\varphi_s)G_sMG_s^*\big)\Big]\mathrm{d} \nu=\int \trace\Big[\partial_s\big(G_s^*(b\circ\varphi_s)G_s\big)M\Big]\mathrm{d} \nu \text{.} \] Denoting by a $'$ the differentiation with respect to $s$ we then get \[ \begin{array}{rcl} \displaystyle\partial_s \big(G_s^*(b\circ\varphi_s)G_s\big)&= &\displaystyle{G_s^*}'(b\circ\varphi_s)G_s + G_s^* \{p,b\circ \varphi_s\}G_s + G_s^*(b\circ\varphi_s)G_s'\\ \, &=&\displaystyle \{p,G_s^*(b\circ\varphi_s)G_s\} - \{p,G_s^*\}(b\circ\varphi_s)G_s- G^*_s(b\circ\varphi_s)\{p,G_s\}\\ \, &\,&\displaystyle +{G_s^*}' (b\circ \varphi_s)G_s + G_s^*(b\circ\varphi_s)G_s'\text{,} \end{array} \] and by application of the previous proposition \[ \int \trace[\{p,G_s^*(b\circ\varphi_s)G_s\}M]\mathrm{d} \nu = -\int\trace[(2\tau aG_s^*(b\circ\varphi_s)G_s+2\tau G_s^*(b\circ\varphi_s)G_s a)M ]\mathrm{d}\nu\text{.} \] By gathering all these terms we see that in order to have \eqref{deriveeintegrale} it suffices that \[ \partial_s G_s=\{p,G_s\}+2\tau G_s a \;\text{ and }\; \partial_s{G_s^*}=\{p,{G_s^*}\}+2\tau a {G_s^*}\text{,} \] which coincides with the definition of $G$ and proves the first formula. The last formula is obtained by simply writing $c=b\circ\varphi_s$ and $\sigma=-s$. \end{proof} \begin{proposition} The measure $\nu$ is supported on the set $\{\tau=\pm 1/2\}$. \end{proposition} \begin{proof} It a consequence of the proposition \ref{supportP} : $\nu$ is a measure on $S^*(\mathbf R \times M)$ so $\tau^2+|\xi|^2_g=1/2$ and it is supported on the set $\{p=0\}$ because $(\partial_t^2-\Delta) u_k=-2a\partial_t u_k$ strongly converges to $0$ in $H^{-1}$. \end{proof} \begin{definition} This encourages us to consider the two connected components \[ SZ^+=S^*(\mathbf R \times M)\cap\{\tau=-1/2\} \text{ and } SZ^-=S^*(\mathbf R \times M)\cap \{\tau=1/2\}, \] as well as $\mu^+=M^+\nu^+$ and $\mu^-=M^-\nu^-$ the restrictions of $\mu$ to $SZ^+$ and $SZ^-$. Moreover we will respectively note $G_s^+$ and $G_s^-$ the restrictions of $G_s$ to $SZ^+$ and $SZ^-$. \end{definition} With this notation we get \[ \partial_s G_s^+=\{p,G_s^+\}-G_s^+ a\text{.} \] \begin{remarque} Since the function $a$ only depends on $x$ and since the $\tau$ variable is constant on $SZ^+$ and $SZ^-$, the functions $G_s^+$ and $G_s^-$ only depends on $(x,\xi)$ so we can also consider them as functions on $S^*M$. \end{remarque} \begin{corollaire}\label{propagationmesure} Let $B$ be a Borel set of $SZ^+$ we have $\displaystyle \nu^+(\varphi_s(B))=\int_B \trace[G_{s}^+ M{G_{s}^+}^*]\mathrm{d} \nu^+$. \end{corollaire} \begin{proof} \[ \nu^+(\varphi_s(B))=\int_{SZ^+}\mathbf 1_{\varphi_s(B)}\mathrm{d} \nu^+ = \int_{SZ^+}\mathbf{1}_B\circ \varphi_{-s}\mathrm{d} \nu^+=\int_{SZ^+}\mathbf{1}_B\trace[G_s^+M{G_s^+}^*]\mathrm{d} \nu^+ \] \end{proof} The cocycle $G^+$ thus plays an important role here since it completely describes the evolution of the microlocal defect measure. We finish this section with a few useful remarks about $G^+$. \paragraph*{} A direct calculation shows that the matrix $G^+$ satisfy the following cocycle formula : \begin{equation}\label{eq:cocycleformula} \forall \rho\in S^*M, \; \forall s,t\in \mathbf R,\; \; G_{s+t}^+(\rho)=G^+_{t}(\phi_s(\rho))G^+_{s}(\rho)\text{.} \end{equation} Indeed if we differentiate the right side with respect to $s$ we get \[ \begin{array}{rcl} \partial_s G^+_{t}(\phi_s(\rho))G^+_{s}(\rho) &=&\displaystyle G_t^+(\phi_s(\rho))\big[\{p,G^+_s\}(\rho)-G_s^+(\rho)a(\rho)\big]+\{p,G_t^+\circ\phi_s\}(\rho)G_s^+(\rho)\\ \, &=&\displaystyle \{p, (G^+_{t}\circ\phi_s)G^+_{s}\}(\rho)-G^+_{t}(\phi_s(\rho))G^+_{s}(\rho)a(\rho)\text{.} \end{array} \] The matrices $(G^+_{t}\circ\phi_s)G^+_{s}$ and $G_{s+t}^+$ thus satisfy the same differential equation with the same initial condition and are consequently equal. This cocycle formula gives us a second differential equation satisfied by $G^+$. For every $(x_0,\xi_0)\in S^*M$ \[ \partial_t G_t^+(x_0,\xi_0)=\lim_{h\to 0} \frac{G_{t+h}^+(x_0,\xi_0)-G_t^+(x_0,\xi_0)}{h}\;\; \text{ and } \;\; G_{t+h}^+(x_0,\xi_0)=G_h^+(\phi_t(x_0,\xi_0))G_t^+(x_0,\xi_0) \] \[\text{ hence } \; \partial_t G_t^+(x_0,\xi_0)=\left.\partial_s G_s^+(\phi_t(x_0,\xi_0))\right|_{s=0} \cdot G_t^+(x_0,\xi_0) =-a(x_t)G_t^+(x_0,\xi_0)\text{,} \] where $(x_t,\xi_t)=\phi_t(x_0,\xi_0)$. In accordance with the definition of $G^+$ given in the introduction we see that it is the solution of the differential equation \begin{equation}\label{equationG} \left\lbrace \begin{array}{l} G^+_0(x_0,\xi_0)=\mathrm{Id}_n \\ \partial_t G^+_t(x_0,\xi_0)=-a(x_t)G^+_t(x_0,\xi_0)\text{.} \end{array} \right. \end{equation} Let us add a last formula which will be useful for later. If we define $j:(x,\xi)\mapsto (x,-\xi)$ we have $\phi_s(j(\rho))=j(\phi_{-s}(\rho))$ and we deduce that $\partial_s (G_s^- \circ j) = -\{p,G_s^-\circ j\} + (G_s^-\circ j)a$. \section{Estimation of the best decay rate}\label{section3} \paragraph*{} Recall some definitions of the introduction. The following quantities are non-positive : \begin{equation} D(R)=\sup \{ \mathfrak{Re}(\lambda_j) : \lambda_j \in \mathrm{sp}(A_a), |\lambda_j|> R \}\text{,} \;\; D_0=\lim_{R\to 0^+} D(R) \; \text{ and } \; D_\infty=\lim_{R\to\infty}D(R)\text{.} \end{equation} For every $t\geq 0$ we chose $y_t$ a vector of $\mathbf C^n$ of euclidean norm $1$ such that \begin{equation} G^+_{t}(x_0,\xi_0)G^+_{t}(x_0,\xi_0)^* y_t= \|G^+_{t}(x_0,\xi_0)\|^2_2y_t \text{.} \end{equation} The vector $y_t$ depends on $(x_0,\xi_0)$, even though it is not written. We then define for every $t>0$ the quantities \begin{equation} C(t)=\frac{1}{t} \inf_{(x_0,\xi_0)\in S^*M} \int_0^t \langle a(x_s)y_s,y_s\rangle\mathrm{d} s=\frac{-1}{t}\sup_{(x_0,\xi_0)\in S^*M} \ln \left( \|G_t^+(x_0;\xi_0)\|_2 \right)\, \text{ and }\, C_\infty=\lim_{t\to \infty} C(t) \text{.} \end{equation} We will see later that these definitions make sense and that they do not depend on the choice of $y_s$. Remember that $C(t)$ is non-negative. The remainder of this section is mainly dedicated to the proof of the formula for $\alpha$. Before starting let us just indicate the main steps of the proof. We first give an upper bound of $\alpha$ using Gaussian beams (also called coherent states). These are particular approximate solutions of the damped wave equation that are concentrated near a geodesic. In order to proves the lower bound of $\alpha$ we will use a high frequency inequality (Proposition \ref{lemmeinegaliteahauteenergie}) together with a decomposition of solutions of \eqref{dampedwaveequation} in high and low frequencies. \subsection{Upper bound for $\alpha$} \paragraph*{}Let $\lambda_j\in \mathrm{sp}(A_a)\backslash \{0\}$ and $u=(u_0,u_1)\in E_{\lambda_j}\backslash\{0\}$ be such that $A_a u=\lambda_j u$. The solution of \eqref{dampedwaveequation} then is $u(t,x)=e^{t\lambda_j}u_0(x)$ and we have $E(u,t)=e^{2t\mathfrak{Re}(\lambda_j)}E(u,0)$. Since $E(u,0)\neq 0$ we know that $\alpha\leq -2D(0)$. \paragraph*{} Showing that $\alpha\leq 2C_\infty$ is a bit more difficult as it requires us to construct Gaussian beams. We will start by constructing them on $\mathbf R^d$ endowed with a Riemaniann metric $g$. Gaussian beams are approximate solutions of the wave equation (in a sens made precise by \eqref{eq:solapprochee}) whose energy may be arbitrarily concentrated along a geodesic up to a fixed time $T>0$ (see \eqref{eq:concentrationenergiesolutionapproche}). They will allow us to construct exact solutions to the damped wave equation whose energy is also arbitrarily concentrated along a geodesic up to some time $T$. As always we will call $(x_t;\xi_t)=\phi_t(x_0,\xi_0)$ the points of the geodesic. We will follow and adapt the construction given in \cite{ral82} or \cite{mazu02} to fulfill our needs. \paragraph*{}We consider for every integer $k$ a function $u_k : \mathbf R^d \to \mathbf R^n$ given by the formula \[ u_k(t,x)=k^{-1+d/4}b(t,x)\exp(ik \psi(t,x)) \omega \] where $\psi(t,x)=\langle\xi(t),(x-x(t)\rangle+\frac{1}{2}\langle M_t(x-x(t)),x-x(t)\rangle$ with $M_t$ a $d\times d$ symmetric matrix with positive definite imaginary part, $b$ is a continuous bounded function and $\omega$ is a vector of $\mathbf C^n$. In what follows $C$ represents a positive constant that can vary from one line to another but does not depends on $k$, however $C$ can depend on $T$. \begin{theo}[\cite{ral82}] It is possible to chose $M_t$ and $b$ such that \begin{equation}\label{eq:solapprochee} \sup_{t\in [0;T]}\|\partial_t^2 u_k(t,\cdot)-\Delta_gu_k(t,\cdot)\|_{L^2(\mathbf R^d)}\leq Ck^{-1/2}\text{,} \end{equation} \begin{equation}\label{energiesolapprochee} \forall t\in [0;T] \; \lim_{k\to \infty}E(u_k,t) \;\text{ is positive, finite and does not depends on }t\text{,} \end{equation} \begin{equation}\label{eq:concentrationenergiesolutionapproche} \sup_{t\in [0;T]} \int_{\mathbf R^d\backslash B(x_t,k^{-1/4})} |\partial_t u_k(t,\cdot)|^2+|\nabla u_k(t,\cdot)|_g^2 \mathrm{d} x \leq C\exp(-\beta \sqrt k)\text{.} \end{equation} \end{theo} Under these conditions we say that $u_k$ is a Gaussian beam. We also need a lemma of \cite{ral82}. \begin{lemme}[\cite{ral82}]\label{lem:lemme 2} Let $c\in L^\infty(\mathbf R^d)$ be a function satisfying $|x-x_0|^{-\alpha}c(x) \in L^\infty(\mathbf R^d)$ for some $\alpha \geq 0$ and some $x_0\in \mathbf R^d$, and let $A$ be a symmetric, positive definite, real $d\times d $ matrix. Then \begin{equation} \int_{\mathbf R^d}\left|c(x)\exp\big(-k\langle M(x-x_0),x-x_0\rangle\big)\right|^2\mathrm{d} x \leq C k^{-d/2-\alpha} \end{equation} for some $C>0$ that does not depend on $k$. \end{lemme} Using lemma \ref{lem:lemme 2} with $c=|b(t,\cdot)|$ and $\alpha=0$ we see that $\|u_k(t,\cdot)\|_{L^2(\mathbf R^d)}\leq C k^{-1/2}$. Let us now define the function $v_k(t,x)=G_t^+(x_0,\xi_0)u_k(t,x)$, as we shall see it is an approximate solution of the damped wave equation. Indeed we have \[ (\partial_t^2-\Delta_g+2a\partial_t)v_k(t,x)=G_t^+(x_0,\xi_0)\left(\partial_t^2- \Delta_g \right)u_k(t,x)+2(a(x)-a(x_t))G_t^+(x_0,\xi_0)\partial_t u_k(t,x) \] \[ +\left(a(x_t)^2-\partial_t a(x_t)-2a(x)a(x_t)\right)G_t^+(x_0,\xi_0)u_k(t,x)\overset{\mathrm{def}}{=}f_k(t,x) \] and we need to show that $\|f_k(t,\cdot)\|_{L^2}\leq C k^{-1/2}$. In order to do that we only need to prove $ \|2(a(\cdot)-a(x_t))G_t^+(x_0,\xi_0)\partial_t u_k(\cdot,t)\|_{L^2}\leq Ck^{-1/2}$ because the other terms obviously satisfy the bound. Now since the function $x\mapsto|x-x_t|^{-1}\|a(x)-a(x_t)\|_2$ is in $L^\infty$ we can use lemma \ref{lem:lemme 2} on $2(a(\cdot)-a(x_t))G_t^+(x_0,\xi_0)\partial_t u_k(\cdot,t)$ and we finally get \[ \sup_{t\in[0;T]}\|(\partial_t^2-\Delta_g+2a\partial_t)v_k(t,\cdot)\|_{L^2(\mathbf R^d)}\leq C k^{-1/2}\text{.} \] Moreover we see that $v_k$ still satisfies the properties \eqref{energiesolapprochee} and \eqref{eq:concentrationenergiesolutionapproche}, although now the limit of the energy of $v_k$ may vary with $t$ because $G_t^+(x_0,\xi_0)$ does. We finally define $w_k$ as the solution of \eqref{dampedwaveequation} with initial conditions $w_k(0,\cdot)=v_k(0,\cdot)$ and $\partial_t w_k(0,\cdot)=\partial_t v_k(0,\cdot)$. By definition of $w_k$ we have $(\partial_t^2-\Delta_g+2a\partial_t)v_k=(\partial_t^2-\Delta_g+2a\partial_t)(v_k-w_k)=f_k$ and thus \[ \frac{\mathrm{d}}{\mathrm{d} t}E(v_k-w_k,t)=-2\int_{\mathbf R^d}\langle a\partial_t (v_k-w_k),\partial_t (v_k-w_k)\rangle\mathrm{d} x +\int_{\mathbf{R}^d}\mathfrak{Re}\langle f_k,\partial_t (v_k-w_k)\rangle\mathrm{d} x \text{.} \] The first term of the right hand side is negative and, using Cauchy-Schwarz, we can bound the second term by $Ck^{-1/2}$. Indeed we already know that $\|f_k\|_{L^2}\leq Ck^{-1/2}$ and $\|\partial_t(v_k-w_k)\|_{L^2}$ is uniformly bounded in $k\in \mathbf N$ and $t\in [0;T]$. Since $E(w_k-v_k,0)=0$ by integrating we get \[ \sup_{t\in [0;T]} E(v_k-w_k,t)\leq C Tk^{-1/2}\text{.} \] In combination with the estimate \eqref{eq:concentrationenergiesolutionapproche} of $u_k$ we see that $w_k(t,\cdot)$ is concentrated around $x_t$, more precisely we have \begin{equation}\label{gaussianbeamenergylocalisation} \sup_{t\in [0;T]} \int_{\mathbf R^d\backslash B(x_t,k^{-1/4})} |\partial_t w_k(t,\cdot)|^2+|\nabla w_k(t,\cdot)|_g^2 \mathrm{d} x \leq C T k^{-1/2}\text{.} \end{equation} Then we set $\omega$ such that $\lim_{k\to\infty}E(v_k,0)=1$ and $G^+_T(x_0,\xi_0)\omega=\|G^+_T(x_0,\xi_0)\|_2\omega$. According to the definition of $v_k$ we have \[ E(v_k,T)=\frac{1}{2}\int_M |G_T^+(x_0,\xi_0)\partial_t u_k(T,\cdot)-a(x_T)G_T^+(x_0,\xi_0)u_k(T,\cdot)|^2+|G_T^+(x_0,\xi_0)\nabla u_k(T,\cdot)|^2 \mathrm{d} x \] but $\|u_k(T,\cdot)\|_{L^2}\leq C k^{-1/2}$ so the term $a(x_T)G_T^+(x_0,\xi_0)u_k(T,\cdot)$ vanishes and we get \[\lim_{k\to \infty }E(v_k,T)= \|G^+_T(x_0,\xi_0)\|_2^2 \text{.}\] This in turn imply that $(w_k)_k$ is sequence of solutions to \eqref{dampedwaveequation} which satisfies $\lim_{k\to \infty}E(w_k,0)=1$ and $\lim_{k\to \infty }E(w_k,T)= \|G^+_T(x_0,\xi_0)\|_2^2$. Summing up the discussion so far, we have \begin{proposition} For any time $T>0$, any $\varepsilon >0$ and any $(x_0,\xi_0)\in S^*\mathbf R^d$ there exists a solution $u$ of the damped wave equation such that $E(u,0)=1$ and $\left|E(u,T)-\|G^+_T(x_0,\xi_0)\|_2^2\right|<\varepsilon$. \end{proposition} Using charts this result extends to the case of a compact Remannian manifold $(M,g)$ and we finally get \begin{proposition}\label{gaussianbeamprop} For any time $T>0$, any $\varepsilon >0$ and any $(x_0,\xi_0)\in S^*M$ there exists a solution $u$ of the damped wave equation such that $E(u,0)=1$ and $\left|E(u,T)-\|G^+_T(x_0,\xi_0)\|_2^2\right|<\varepsilon$. \end{proposition} Define $\Gamma_t=G^+_t(x_0,\xi_0)G^+_t(x_0,\xi_0)^*$ and, for every time $t$, chose $y_t$ a vector of euclidean norm $1$ such that $\displaystyle \Gamma_ty_t=\|\Gamma_t\|_2y_t$. Let us stress again that $y_t$ and $\Gamma_t$ both implicitly depends on $(x_0,\xi_0)$. \begin{proposition}\label{prop:formulenormeG} \[\|G_t^+(x_0,\xi_0)\|_2^2=\|\Gamma_t\|_2=\exp\left(-2\int_0^t \big\langle a(x_s)y_s,y_s\big\rangle\mathrm{d} t\right)\] \end{proposition} \begin{proof} The only thing to prove is the second equality. The map $t\mapsto \Gamma_t$ is the solution of the differential equation \begin{equation}\label{defmatB} \left\lbrace \begin{array}{l} \Gamma_0=\mathrm{Id}_n \\ \partial_t \Gamma_t=-a(x_t)\Gamma_t-\Gamma_ta(x_t)\text{,} \end{array} \right. \end{equation} it is hence $C^\infty$ and \textit{a fortiori} locally Lipschitz. Consequently the map $t\mapsto \|\Gamma_t\|_2$ is also locally Lipschitz\footnote{We cannot really do better than that in terms of regularity.}, this imply that it is differentiable for almost every $t$. Since $\Gamma_t$ is hermitian positive definite $\|\Gamma_t\|_2=\langle\Gamma_ty_t,y_t\rangle$ and if $z$ is any other vector of norm $1$ then $\|\Gamma_t\|_2\geq \langle\Gamma_tz,z\rangle$. Fix a time $t_0$, we then have \[\begin{array}{rcl} \left.\partial_t \langle\Gamma_ty_{t_0},y_{t_0}\rangle\right|_{t=t_0} &=& -\big\langle[a(x_{t_0})\Gamma_{t_0}+\Gamma_{t_0}a(x_{t_0})] y_{t_0},y_{t_0}\big\rangle\\ \, &=& -2\|\Gamma_{t_0}\|_2\langle a(x_{t_0})y_{t_0},y_{t_0}\rangle\text{.} \end{array} \] We know that $\langle\Gamma_ty_t,y_t\rangle\geq \langle\Gamma_t y_{t_0},y_{t_0}\rangle$ for every $t$ and there is equality when $t=t_0$. If $\|\Gamma_t\|_2$ is differentiable at $t_0$ we deduce that at this point the derivatives of the two functions $t\mapsto \langle\Gamma_ty_t,y_t\rangle$ and $t\mapsto\langle\Gamma_t y_{t_0},y_{t_0}\rangle$ must be the same. Hence for almost every time $t$ \[ \partial_t \|\Gamma_t\|_2= \partial_t \langle\Gamma_t y_t,y_t\rangle = -2\|\Gamma_t\|_2\langle a(x_t)y_t,y_t\rangle \text{.} \] To finish the proof we just need to see that the function \[ \Phi:t\mapsto \frac{\|\Gamma_t\|_2}{\displaystyle\exp\left(-2\int_0^t \big\langle a(x_s)y_s,y_s\big\rangle\mathrm{d} s\right)} \] is Lipschitz on every bounded interval $[0;T]$ and \textit{a fortiori} absolutely continuous. From $\Phi'=0$ a.e. we deduce that $\Phi$ is constant and since $\Phi(0)=1$ this finishes the proof. \end{proof} Notice that the choice of $y_t$ is not unique and that $t\mapsto y_t$ is not continuous in general. On the other hand the derivative of $\|\Gamma_{t}\|_2$ is uniquely defined almost everywhere, so that the choice of $y_t$ has no importance. Therefore we have \[ C(t)\overset{\mathrm{def}}{=}\frac{-1}{t}\sup_{(x_0,\xi_0)\in S^*M} \ln \left( \|G_t^+(x_0;\xi_0)\|_2 \right)=\frac{1}{t} \inf_{(x_0,\xi_0)\in S^*M} \int_0^t \langle a(x_s)y_s,y_s\rangle\mathrm{d} s\text{.} \] This function is obviously non-negative but in order to proves other properties it is easier to work with $\exp(-tC(t))=\sup_{\rho\in S^*M}\|G_t^+(\rho)\|_2$ . The function $a$ is continuous on $M$ and the geodesic flow $\phi$ is continuous on $\mathbf R\times S^*M$, since $G^+$ is defined as the solution of \eqref{equationG} the function $\|G^+\|$ is in turn continuous on $\mathbf R\times S^*M$. As $S^*M$ is compact, $t\mapsto \exp(-tC(t))$ is continuous and so is $t\mapsto C(t)$. We now show that $t\mapsto tC(t)$ is sub-additive : let $t$ and $s$ be two non negative reals, we have the following equivalences : \[ (t+s)C(t+s)\geq tC(t)+sC(s) \Longleftrightarrow \exp(-2(t+s)C(t+s))\leq \exp(-2tC(t))\exp(-2sC(s)) \] \begin{equation}\label{sousadditiviteinegalite} \Longleftrightarrow \sup_{(x,\xi)\in S^*M} \|G_{t+s}^+{G_{t+s}^+}^*\|_2 \leq \big(\sup_{(x,\xi)\in S^*M} \|G_{t}^+{G_{t}^+}^*\|_2\big)\cdot \big(\sup_{(x,\xi)\in S^*M} \|G_{s}^+{G_{s}^+}^*\|_2\big)\text{.} \end{equation} Recall the cocycle formula $G_{s+t}^+(\rho)=G^+_{t}(\phi_s(\rho))G^+_{s}(\rho)$, it follows that \[ G_{t+s}^+(\rho){G_{t+s}^+(\rho)}^*=G_t^+(\phi_s(\rho))G_s^+(\rho){G_s^+(\rho)}^*{G_t^+(\phi_s(\rho))}^* \] and since for any two matrices $R$ and $S$ we have $\|S^*R^*RS\|_2\leq \|S^*S\|_2\|R^*R\|_2$, the inequality \eqref{sousadditiviteinegalite} is satisfied and $t\mapsto tC(t)$ is indeed sub-additive. By application of Fekete's sub-additive lemma we deduce that $C(t)$ admits a limit when $t\to \infty$ and that $C(t)\leq C_\infty$ for every positive $t$. By combining the results of this section it is now easy to prove that $\alpha\leq 2C_\infty$. Assume that $\alpha=2C_\infty+4\eta$ for some $\eta >0$, this means that there exists some constant $C>0$ such that \begin{equation}\label{jesaispasquoimettrecommelabel} \forall t\geq0,\; \forall u \in H,\;E(u,t)\leq CE(u,0)\exp(-2t(C_\infty+\eta)). \end{equation} Now pick some $T$ such that $C\exp(-2T(C_\infty+\eta))<\exp(-T(2C_\infty+\eta))$. Since $C_\infty\geq C(T)$ we have $\exp(-T(2C_\infty+\eta))\leq\exp(-T(2C(T)+\eta))$ but using proposition \ref{gaussianbeamprop} there exist some $u\in H$ such that \[ E(u,T)>E(u,0)\exp(-T(2C(T)+\eta))>CE(u,0)\exp(-2T(C_\infty+\eta)) \] which contradicts \eqref{jesaispasquoimettrecommelabel} and concludes the proof of $\alpha\leq 2 C_\infty$. \subsection{Lower bound for $\alpha$} We are now going to use the results of Section $2$ in order to prove the following energy inequality for the high frequencies. \begin{proposition}\label{lemmeinegaliteahauteenergie} For every time $T>0$ and every $\varepsilon >0$ there exists a constant $C(\varepsilon,T)$ such that for every $u=(u_0;u_1)$ in $H$ we have \begin{equation}\label{inegaliteahauteenergie} E(u,T)\leq (1+\varepsilon)\displaystyle e^{-2TC(T)} E(u,0)+C(\varepsilon,T)\|u_0,u_1\|^2_{L^2\oplus H^{-1}}\text{.} \end{equation} \end{proposition} \begin{proof} Assume that \eqref{inegaliteahauteenergie} is false, in this case for some $T$, some $\varepsilon$ and every integer $k\geq 1$ there is a solution $u^k=(u_0^k,u_1^k)$ of \eqref{dampedwaveequation} satisfying \begin{equation}\label{3.3} E(u^k,T)\geq (1+\varepsilon)\displaystyle e^{-2TC(T)}E(u,0)+ k\|u_0^k,u_1^k\|^2_{L^2\oplus H^{-1}} \; \text{ and }\; E(u^k,0)=1\text{.} \end{equation} First we show that the sequence $(u^k)$ is bounded in $H^1(I\times M)$, where $I=[-2T;2T]$. Indeed $E(u^k,0)=1$ and \eqref{formuleenergie} implies that $E(u^k,-2T)$ is bounded uniformly in $k$. Since the energy is non increasing the sequence $(u^k)$ must be bounded in $H^1(I\times M)$. Moreover $\|u_0^k,u_1^k\|^2_{L^2\oplus H^{-1}}\leq E(u^k,T)/k\leq 1/k$, so $(u^k)$ converges to $0$ in $L^2(I\times M)$ and so it weakly converges to $0$ in $H^1(I\times M)$. If we are to extract a sub-sequence we might as well assume that $(u^k)$ admits $\mu=M\nu$ (with $\trace(M)=1$) as microlocal defect measure. As the energy is non increasing it follows from \eqref{3.3} that for every $\eta\in]0;T[$ and every non negative function $\psi\in C^\infty_0(]0;\eta[)$, \[ \int_{T-\eta}^T \psi(T-t)E(u^k,t)\mathrm{d} t \geq (1+\varepsilon)e^{-2TC(T)}\int_0^\eta \psi(t)E(u^k,t) \mathrm{d} t\text{.} \] Since this is true for every function $\psi$, taking the limit $k\to\infty$ in the previous inequality gives \begin{equation}\label{3.5} \nu(S^*(\mathbf R \times M)\cap t\in]T-\eta,T[)\geq (1+\varepsilon)e^{-2TC(T)}\nu(S^*(\mathbf R \times M)\cap t\in]0;\eta[)\text{.} \end{equation} On the other hand Corollary \ref{propagationmesure} gives us \[ \begin{array}{rcl} \nu^+(SZ^+\cap t\in ]T-\eta;T[)&=& \nu^+(\varphi_{T-\eta}(SZ^+\cap t\in ]0;\eta[))\\ \, &=& \displaystyle \int_{SZ^+\cap t\in ]0;\eta[} \trace[G_{T-\eta}^+M{G_{T-\eta}^+}^*]\mathrm{d}\nu \\ \; & \leq &\displaystyle \sup_{(x;\xi)\in S^*(M)} \|G_{T-\eta}^+(x,\xi)\|_{2}^2 \nu^+(SZ^+\cap t\in ]0;\eta[)\\ \; & = & e^{-2(T-\eta)C(T-\eta)}\nu^+(SZ^+\cap t\in ]0;\eta[)\text{.} \end{array} \] To get this upper bound we used the following properties. \[ \trace[G_{T-\eta}^+M{G_{T-\eta}^+}^*]=\trace[{G_{T-\eta}^+}^*G_{T-\eta}^+M]\leq \|G_{T-\eta}^+\|_2^2\trace(M) =\|G_{T-\eta}^+\|_2^2 \] We then use the same argument on $SZ^-$. With the relation $\partial_s (G_s^- \circ j) = -\{p,G_s^-\circ j\} +2 (G_s^-\circ j)a$ given at the end of section 2 we find \[ \begin{array}{rcl} \nu^-(SZ^-\cap t\in ]T-\eta;T[)\leq e^{-2(T-\eta)C(T-\eta)} \nu^-(SZ^-\cap t\in ]0;\eta[)\text{.} \end{array} \] By combining $\nu^+$ and $\nu^-$ together, we get \begin{equation}\label{3.6} \nu(S^*(\mathbf R \times M)\cap t\in]T-\eta,T[)\leq e^{-2(T-\eta)C(T-\eta)}\nu(S^*(\mathbf R \times M)\cap t\in]0;\eta[)\text{.} \end{equation} Recall that $\displaystyle s\mapsto e^{-2sC(s)}$ is continuous, so for $\eta$ sufficiently small the inequalities \eqref{3.5} and \eqref{3.6} imply that $\nu(S^*(\mathbf R \times M)\cap t\in]0;\eta[)=0$. Consequently the sequence $(u^k)$ strongly converges to $0$ in $H^1(]0;\eta[\times M)$ and thus it also strongly converges to $0$ in $H^1(I\times M)$. This contradicts the hypothesis $E(u^k,0)=1$ and finishes the proof. \end{proof} The remainder of the proof for the formula of $\alpha$ is completely borrowed from the article of Lebeau (\cite{leb93}), indeed it works verbatim\footnote{Although the article of Lebeau is in french, so any translation error that may occur is my mistake.}. Let $A_a^*$ be the adjoint of $A_a$, we have $-A_a^*=\begin{pmatrix} 0 & \mathrm{Id} \\ \Delta & +2a \end{pmatrix} $ and the spectrum of $A^*_a$ is the complex conjugate of the spectrum of $A_a$. Let us call $E_{\lambda_j}^*$ the generalized eigenvector space of $A_a^*$ associated with the spectral value $\overline{\lambda_j}$. For $N\geq 1$ we set \[ H_N=\left\lbrace x\in H : (x|y)_H=0, \; \forall y \in \bigoplus_{|\lambda_j|\leq N}E_{\lambda_j}^*\right\rbrace \text{.} \] The space $H_N$ is invariant by the evolution operator $e^{tA_a}$. To see that take $x\in H_N$ and $\{y_l\}$ a basis of the finite dimension vector space $\displaystyle \bigoplus_{|\lambda_j|\leq N} E_{\lambda_j}^*\subset D(A_a^*)$, we have \[ \partial_t (e^{tA}x|y_l)=(e^{tA}x|A_a^*y_l)=\sum c_{l,k}(e^{tA}x|y_k) \text{ and so } (e^{tA}x|y_l)=0 \text{.} \] Set $H'=L^2\oplus H^{-1}$ and let $\theta_n$ be the norm of the embedding of $H_N$ in $H'$. The operator $-A^*_a$ is a compact perturbation of the skew-adjoint operator $A_0$, this implies that the family $\{E^*_{\lambda_j}\}_j$ is total in $H$ (see \cite{gokr69}, chapter 5 theorem 10.1) and thus that $\lim \theta_N =0$. Let us assume that $2\min\{-D_0,C_\infty\}>0$, or otherwise there is nothing to prove. Fix $\eta>0$ small enough so that $\beta=2\min\{-D_0,C_\infty\}-\eta$ is positive. Now take $T$ such that $4|C_\infty-C(T)|<\eta$ and $2\log(3)<\eta T$ and finally $N$ such that $C(1,T)\theta^2_N\leq e^{-2TC(T)}$. It follows from the previous proposition that \[ \forall u\in H_N, \; E(u,T)\leq 3e^{-2TC(T)}E(u,0), \] and since $H_N$ is stable by the evolution \[ \forall k \in \mathbf N ,\;\forall u\in H_N,\; E(u,kT)\leq 3^ke^{-2kTC(T)}E(u,0)\text{.} \] The energy is non increasing, so there exists a real $B>0$ such that \begin{equation}\label{decroissanceenergiehautefrequence} \forall t\geq 0,\; \forall u\in H_N,\; E(u,t)\leq Be^{-\beta t } E(u,0). \end{equation} Let $\gamma$ be a path circling around $\{\lambda_j : |\lambda_j|\leq N\}$ clockwise and $\Pi=\frac{1}{2i\pi}\int_\gamma \frac{\mathrm{d} \lambda}{\lambda-A_a}$ be the spectral projector on $W_N=\bigoplus_{|\lambda_j|\leq N} E_{\lambda_j}$. In this case $\Pi^*$ is the spectral projector of $A_a^*$ on $\bigoplus_{|\lambda_j|\leq N} E_{\lambda_j}^*$ and so for every $u\in H$, one has \begin{equation}\label{decompositionhautefrequencebassefrequences} v=\Pi u \in W_N,\; w=(1-\Pi)u\in H_N\;\text{ and }\; u=v+w\text{.} \end{equation} Now $W_N$ is of finite dimension and since $\beta\leq -2D(0)$ there exists some $C$ such that \begin{equation}\label{decroissanceenergiebassefrequence} \forall u\in W_N,\; \forall t\geq0 ,\; E(u,t)\leq Ce^{-\beta t} E(u,0)\text{.} \end{equation} Finally, since the decomposition \eqref{decompositionhautefrequencebassefrequences} is continuous, there exists some $C_0$ such that $E(v,0)+E(w,0)\leq C_0 E(u,0)$. Combining \eqref{decroissanceenergiebassefrequence} and \eqref{decroissanceenergiehautefrequence} we get $\alpha\geq \beta$, thus finishing the proof of the formula for $\alpha$. \subsection{End of the proof of Theorem \ref{bigtheoreme} and proof of Theorem \ref{stabforte}} \paragraph*{} We still need to prove properties \textbf{(i)}, \textbf{(ii)} and \textbf{(iii)} of Theorem \ref{bigtheoreme}. For \textbf{(ii)} there is nothing to do since it is already done in \cite{leb93} in the case $n=1$, which is sufficient. For \textbf{(i)} we can assume $C_\infty >0$ or otherwise there is nothing to prove. Notice that $E_{\lambda_j}\subset H_N$ as soon as $|\lambda_j |>N$, together with \eqref{decroissanceenergiehautefrequence} it means that, for every $\beta<2C_\infty$ and for $N$ large enough \[ |\lambda_j|>N \Rightarrow 2\mathfrak{Re}(\lambda_j)\leq -\beta\text{.} \] This implies $D_\infty \leq -C_\infty$ and proves \textbf{(i)}. Before we get to the last point of Theorem \ref{bigtheoreme} we are going to prove Theorem \ref{stabforte}. \begin{proof}[Proof of Theorem \ref{stabforte}] We start by proving \textbf{(ii)}$\Rightarrow$\textbf{(i)} by contraposition. Assume that \textbf{(i)} is not satisfied, if there is no weak stabilization then obviously \textbf{(ii)} is false. We can thus assume that there exists a point $(x_0,\xi_0)\in S^*M$ and a vector $y\in \mathbf C^n$ of euclidean norm $1$ such that $a(x_s)y=0$ for every time $s$. This means we have \[ \partial_t {G^+_t}^*(x_0,\xi_0)y=-{G^+_t}^*(x_0,\xi_0)a(x_t)y=0 \] \[\text{ hence }\; \|{G^+_t}(x_0,\xi_0){G^+_t}^*(x_0,\xi_0)\|_2=\|{G^+_t}^*(x_0,\xi_0)\|_2^2=\sup_{|v|=1}({G^+_t}^*(x_0,\xi_0)v,{G^+_t}^*(x_0,\xi_0)v)=1\text{.}\] This implies that for every positive $t$ one has $C(t)=C_\infty=0$ and thus, by Theorem \ref{bigtheoreme}, it implies $\alpha=0$. This in turn shows that there is not strong stabilization and proves \textbf{(ii)}$\Rightarrow$\textbf{(i)}. Reciprocally, assume that condition \textbf{(i)} is satisfied. Then by a compactness argument there exists $T>0$ such that for all $(x_0,\xi_0)\in S^*M$ and all $y\in \mathbf C^n$ of euclidean norm $1$ \[ \int_0^T\langle a(x_t)y,y \rangle\mathrm{d} t >0 \text{.} \] We begin by proving $C_\infty >0$, since $C_\infty\geq C(t)$ for every $t$ it suffices to show that $C(T)$ is positive. Let us assume that $C(T)=0$, then there exists $(x_0,\xi_0)\in S^*M$ and $y\in \mathbf C^n$ of norm $1$ such that $\Gamma_0(x_0;\xi_0)y=\Gamma_T(x_0,\xi_0)y=y$. We recall that $\Gamma_t=G_t^+G_t^+{}^*$ and that, according to proposition \ref{prop:formulenormeG}, $\|\Gamma_t\|_2$ is non increasing. As $\|\Gamma_0(x_0,\xi_0)\|_2=\|\Gamma_T(x_0,\xi_0)\|_2=1$ we know that $\|\Gamma_t(x_0,\xi_0)\|_2=1$ for every $t\in[0;T]$. Using Gaussian beams in section 3.1 we have proved that, for every $\varepsilon >0$ there exists a solution $u$ of the damped wave equation such that $|E(u,t)-\|\Gamma_t(x_0;\xi_0)y\|_2|<\varepsilon$ for every $t\in[0;T]$. Since the energy is non increasing it means that, for every $t\in[0;T]$ we have $\|\Gamma_t(x_0;\xi_0)y\|_2=1$ and thus that $\Gamma_t(x_0;\xi_0)y=\|\Gamma_t(x_0;\xi_0)\|_2y=y$. In view of proposition \ref{prop:formulenormeG} this means that \[ 0=C(T)=\|\Gamma_T(x_0,\xi_0)\|_2=\frac{1}{T}\int_0^T \langle a(x_t)y,y\rangle \mathrm{d} t\text{,} \] which is absurd, so we must have $C_\infty >0$. The weak stabilization assumption implies that $A_a$ has no eigenvalue (except $0$) on the line $\{\mathfrak{Re}(z)=0\}$. It follows that the only possibility for $D_0$ to be zero is that $D_\infty $ is also zero. However we showed that $C_\infty >0$ and $C_\infty \leq -D_\infty$ so we have $-D_0>0$ and, by Theorem \ref{bigtheoreme}, we have strong stabilization. \end{proof} With this proof we see why $C_\infty >0 $ is equivalent to \eqref{GCC}, the geometric control condition. In dimension $n=1$ the geometric control condition is equivalent to strong stabilization (\cite{blr92}) which is in turn equivalent to $\alpha>0$. This means that the situation \textbf{(iii)} of Theorem \ref{bigtheoreme} cannot happen when $n=1$. To show that the situation $C_\infty >0$ and $D(0)=0$ does happen we will work on the circle $M=\mathbf R/ 2\pi \mathbf Z$. Let $k>0$ be a fixed integer and set $u_1(t,x)=e^{ikt}\sin(k x)$ and $u_2(t,x)=e^{ikt}\sin(kx+1)$. The function $u$ defined by \[ u(t,x)=\begin{pmatrix} u_1(t,x)\\ u_2(t,x) \end{pmatrix} \;\text{ is a solution of } \partial_t^2 u - \Delta u =0\text{.} \] We now define $a(x)$ as the orthogonal projector on $u(0,x)^\perp$, this way we get \[\forall (t,x)\in \mathbf R \times M,\;\;a(x)\partial_t u(t,x)=ike^{ikt}a(x)u(0,x)=0 \text{.}\] The function $u$ is thus a solution of the damped wave equation and we see that $ik$ is an eigenvalue of $A_a$. By construction $D_0=0$, however $\mathrm{ker}(a(x))$ is of dimension $1$ and not constant so the geometric control condition is satisfied. This forces $C_\infty$ to be positive and finishes the proof of Theorem \ref{bigtheoreme}. \begin{remarque} Let us emphasize once again that, in the scalar case ($n=1$), the geometric control condition implies $a> 0$ on an open set and thus it also implies weak stabilization. On the other hand, when $n>1$ we can have the geometric control condition and no weak stabilization. This means that when $n=1$ Theorem \ref{stabforte} can be stated without the weak stabilization condition but it is necessary whenever $n>1$. \end{remarque} \section{Behavior of $C_\infty$}\label{section4} \paragraph*{}In this section we are interested in the behavior of $C_\infty $ as a function of the damping term $a$. For this reason we will denote by $C_\infty(a)$ the constant $C_\infty$ associated with the damping term $a$ when needed. In the scalar case, things are pretty simple. If $a$ and $b$ are two damping terms and $\lambda\geq 0$ a real number we have $C_\infty(\lambda a)=\lambda C_\infty(a)$ and $C_\infty(a+b)\geq C_\infty(a)+C_\infty(b)$, this is a direct consequence of \eqref{eq:formuleGscalaire}. Moreover if $a$ and $b$ are such that $a\geq b$ pointwise then $C_\infty(a)\geq C_\infty(b)$. The vector case is more complicated since there is no simple expression for the matrix $G^+_t$. We will thus restrain our self to the study of a one dimensional example. \paragraph*{}We will work on the circle $M=\mathbf R/ 2\pi \mathbf Z$. Using the cocycle formula of $G^+$ it's easy to see that $\lim \frac{-1}{t}\ln (\|G_{t}^+(x,\pm1)\|_2^2)$ does not depends on $x$, which will be taken equal to $0$ from now on. Still using this cocycle formula we see that if $p$ and $q$ are integers then \[ C_\infty(a)=\lim_{t\to \infty} \frac{-1}{t}\ln (\|G_{t}^+(0,\pm1)\|_2^2)=\lim_{p\to \infty} \frac{-1}{2p\pi}\ln (\|G_{2p\pi}^+(0,\pm1)\|_2^2) \] and also \[G_{2(p+q)\pi}^+(0,\pm1)=G_{2p\pi}^+((0,\pm1))G_{2q\pi}^+((0,\pm1))\text{.}\] Combining all that, we finally find \[ \begin{array}{rcl} \displaystyle\lim_{t\to \infty} \frac{-1}{t}\ln \left(\left\|G_{t}^+(x,\pm1)\right\|_2^2\right)&=&\displaystyle\lim_{p\to \infty} \frac{-1}{2p\pi}\ln \left(\left\|\left[G_{2\pi}^+(0,\pm1)\right]^p\right\|_2^2\right)\\ \,&=& \displaystyle \frac{-1}{\pi} \ln\left(\rho\left(G^+_{2\pi}(0,\pm1)\right)\right) \end{array} \] where $\rho(M)$ denotes the spectral radius of the matrix $M$. This equality also shows that the limit do exists and that \[ C_\infty(a)=\frac{-1}{\pi}\max\left\{\ln\left(\rho\left(G^+_{2\pi}(0,1)\right)\right);\ln\left(\rho\left(G^+_{2\pi}(0,-1)\right)\right)\right\}\text{.} \] In other words the problem of finding $C_\infty$ is simply reduced to the analysis of two spectral radii. In fact it can be proved that $G^+_{2\pi}(0,1)=G^+_{2\pi}(0,-1)^*$ so there is really only one spectral radius here. To prove this equality it suffices to remark that $G^+_s(x_0,\xi_0)$ and $G_s^+(x_s,-\xi_s)^*$ satisfy the same differential equation. Equivalently, it is easy to prove this equality when $a$ is piecewise constant and by an argument of density the result is also true for every smooth function $a$. Notice that when $n=1$ the two matrices $G_{2\pi}^+(0,1)$ and $G_{2\pi}^+(0,-1)$ are equal but this is not true in the general case since $G^+$ need not be Hermitian. In conclusion we proved that \begin{equation}\label{Cinftytoymodel} C_\infty(a)=\frac{-1}{\pi}\ln\left(\rho\left(G^+_{2\pi}(0,1)\right)\right)\text{.} \end{equation} \paragraph*{}We are only going to deal with a particular case of damping terms but it will be general enough to exhibit all the behaviors we want. Take $A_1$, $A_2$ and $A_3$ three positive definite hermitian matrices with their eigenvalues in $(0;1]$, we know there exists three matrices $a_1$, $a_2$ and $a_3$ also definite positives such that $\exp(-a_j)=A_j$. Now take $\psi$ a smooth, non negative cut-off function such that $\int_{S^1}\psi \mathrm{d} \lambda =1$ and $\supp \psi \subset (0;2\pi/3)$. The damping terms we are interested in are of the form \begin{equation}\label{eq:apiecewiseconstant} a(x)=a_1\psi(x)+a_2\psi(x+2\pi/3)+a_3\psi(x+4\pi/3) \end{equation} and with this condition we simply have $G_{2\pi}^+(0,1)=A_1A_2A_3$ and $G_{2\pi}^+(0,-1)=A_3A_2A_1=G_{2\pi}^+(0,1)^*$. Let us compare $C_\infty(a)$ and $C_\infty(2a)$, according to \eqref{Cinftytoymodel} we have \[ C_\infty(a)=\frac{-1}{\pi}\ln\left(\rho(A_1A_2A_3)\right) \;\text{ and }\; C_\infty(2a)=\frac{-1}{\pi}\ln\left(\rho(A_1^2A_2^2A_3^2)\right)\text{.} \] If we use a program to randomly generate the $A_j$ it is not hard to find some function $a$ such that $C_\infty(2a)>2C_\infty(a)$ for example with \[ A_1=\begin{pmatrix} 0.87 & 0.21 + 0.09i \\ 0.21 - 0.09i & 0.51 \end{pmatrix}\text{,} \; A_2=\begin{pmatrix} 0.35 & - 0.23 + 0.08i \\ - 0.23 - 0.08i & 0.61 \end{pmatrix}\]\[ \text{ and }\, A_3=\begin{pmatrix} 0.23 & 0.11 - 0.21i \\ 0.11 + 0.21i & 0.25 \end{pmatrix}\text{.} \] It is even possible to have $C_\infty(2a)<C_\infty(a)$, for example with \[ A_1=\begin{pmatrix} 0.49 & 0.46 - 0.11i \\ 0.46 + 0.11i & 0.52 \end{pmatrix}\text{,} \; A_2=\begin{pmatrix} 0.49 & - 0.02 + 0.3i \\ - 0.02 - 0.3i & 0.58 \end{pmatrix} \] \[\text{ and }\, A_3=\begin{pmatrix} 0.52 & - 0.3 - 0.33i \\ - 0.3 + 0.33i & 0.37 \end{pmatrix}\text{.}\] \begin{figure}[h!] \centering \includegraphics[width=0.91\textwidth]{charta2aHD.pdf} \caption{Graphs of $\lambda\mapsto C_\infty(\lambda a)$ for the first example (full line) and the second (dotted line). } \end{figure} This proves that $C_\infty$ is neither monotonous nor positively homogeneous. Note that even with $A_i\in \mathscr M_n(\mathbf R)$ there are examples of damping terms $a$ such that $C_\infty(2a)<C_\infty(a)$ or $C_\infty(2a)>2C_\infty(a)$. Figure 2 shows the behavior of $\lambda\mapsto C_\infty (\lambda a)$ for the two previous examples. We are going to use the same method to study the additivity of $C_\infty$. Assume now that $\supp \psi \subset (0;\pi/2)$, we look at two damping terms defined by \[ a(x)=a_1\psi(x)+a_2\psi(x+\pi)\; \text{ and } \; b(x)=b_1\psi(x+\pi/2)+b_2\psi(x+3\pi/2)\text{.} \] By equality \eqref{Cinftytoymodel} we get \[ C_\infty(a+b)=\frac{-1}{\pi}\ln\left(\rho(A_1B_1A_2B_2)\right)\, \text{ and }\, C_\infty(a)+C_\infty(b)=\frac{-1}{\pi}\left(\ln\left(\rho(A_1A_2)\right)+\ln\left(\rho(B_1B_2)\right)\right)\text{.} \] Then again, using a program to randomly generate the $A_j$ and the $B_j$ it's not hard to find $a$ and $b$ such that $C_\infty(a+b)<C_\infty(a)+C_\infty(b)$, for example with \[ A_1=\begin{pmatrix} 0.27 & - 0.15 - 0.15i \\ - 0.15 + 0.15i & 0.18 \end{pmatrix}\text{,} \; A_2=\begin{pmatrix} 0.31 & 0.25 + 0.3i \\ 0.25 - 0.3i & 0.54 \end{pmatrix}\text{,} \] \[ B_1=\begin{pmatrix} 0.65 & 0.35 - 0.28i \\ 0.35 + 0.28i & 0.38 \end{pmatrix} \;\text{ and }\; B_2=\begin{pmatrix} 0.05 & - 0.04 + 0.05i \\ - 0.04 - 0.05i & 0.08 \end{pmatrix} \] we find $C_\infty (a+b) \approx 1.45$ and $C_\infty(a)+C_\infty(b)\approx 2.99$. Conversely, it is possible to have $C_\infty(a+b)>C_\infty(a)+C_\infty(b)$, for example with \[A_1=\begin{pmatrix} 0.17 & 0.07 - 0.11i \\ 0.07 + 0.11i & 0.12 \end{pmatrix}\text{,}\; A_2=\begin{pmatrix} 0.32 & - 0.09 - 0.35i \\ - 0.09 + 0.35i & 0.61 \end{pmatrix} \text{,}\] \[ B_1=\begin{pmatrix} 0.13 & - 0.19 + 0.04i \\ - 0.19 - 0.04i & 0.4 \end{pmatrix} \;\text{ and } \; B_2=\begin{pmatrix} 0.18 & 0.01 + 0.13i \\ 0.01 - 0.13i & 0.23 \end{pmatrix} \] we find $C_\infty (a+b) \approx 1.87$ and $C_\infty(a)+C_\infty(b)\approx 1.20$. \paragraph*{}However $C_\infty$ still has some kind of homogeneous behavior as $\lambda$ tends to infinity. Assume for example that $a$ is a piecewise constant function (not necessarily continuous) or that $a$ is of the form \eqref{eq:apiecewiseconstant} but with arbitrarily many $a_i$ instead of only $3$. In this case there exists some positive definite Hermitian matrices $A_i$ with eigenvalues in $(0;1]$ such that \[ C_\infty(a)=\frac{-1}{\pi}\ln\left(\rho(A_1\ldots A_j)\right) \] and such that for every real $\lambda\geq 0 $ we have \[ C_\infty(\lambda a)=\frac{-1}{\pi}\ln\left(\rho(A_1^\lambda \ldots A_j^\lambda)\right)\text{.} \] We are going to prove that in this case $\lim_{\lambda\to\infty} C_\infty(\lambda a)/\lambda $ exists, is non-negative and finite. The first thing to note is that every $A_i^\lambda$ converges to some orthogonal projector $P_i$ so $A_1^\lambda \ldots A_j^\lambda$ converges to $P_1 \ldots P_j$, which has a spectral radius of either $0$ or not. If $\rho(P_1\ldots P_j)=r\neq 0$ then $\rho(A_1^\lambda \ldots A_j^\lambda)$ also converges to $r$ and thus $C_\infty(\lambda a)/\lambda$ converges to $0$. We may thus assume from now on that the spectral radius of $P_1\ldots P_j $ is $0$. Remark that each coefficient of $A_i^\lambda$ is a polynomial in the eigenvalues of $A_i^\lambda$. Let us call $P_\lambda=X^n+\sum_{i=0}^{n-1} b_i(\lambda)X^i $ the characteristic polynomial of $A_1^\lambda\ldots A_j^\lambda$, since the determinant is also a polynomial we get that each coefficient of $P_\lambda$ is a polynomial in the eigenvalues of the matrices $A_i^\lambda$. If $\xi$ is an eigenvalue of $A_i$ then $\xi^\lambda$ is an eigenvalue of $A_i^\lambda$ and so each of the coefficients of $P_\lambda$ can be written as \begin{figure} \centering \includegraphics[width=0.91\textwidth]{chartCinftynullimit.pdf} \caption{Graph of $\lambda\mapsto C_\infty(\lambda a)$ for some damping term $a$ with $\lim_\lambda C_\infty(\lambda a)/\lambda=0$. } \end{figure} \[ b_i(\lambda)=\sum_{j=0}^{k_i} c_{i,j} \beta_{i,j}^\lambda \; \text{ with } c_{i,j}\in \mathbf C^* \text{ and } \beta_{i,0}>\beta_{i,1}>\cdots \beta_{i,k} > 0\text{.} \] Since $\rho(A_1^\lambda \ldots A_j^\lambda)$ converges to $0$ we know that $P_\lambda$ converges to $X^n$ and so every $\beta_{i,j}$ must be in in $(0;1)$. Now look at the polynomial $\widehat{P}_\lambda(X)=\gamma^{\lambda n}P(X/\gamma^\lambda)$, we have \[ \widehat{P}_{\lambda}(X)=X^n+\sum_{i=0}^{n-1}\gamma^{\lambda(n-i)}b_i(\lambda)X^i\; \text{ and} \] \[ \gamma^{\lambda(n-i)}b_i(\lambda)=\left(\gamma^{n-i}\beta_{i,0}\right)^\lambda \left(c_{i,0}+\sum_{j=1}^{k_i} c_{i,j}\left( \frac{\beta_{i,j}}{\beta_{0,j}}\right)^\lambda\right)\text{.} \] For this reason there exists a unique\footnote{$\gamma=\min_i \beta_{i,0}^{1/(i-n)}$} real number $\gamma>1$ such that $\widehat{P}_\lambda(X)=\gamma^{\lambda n}P(X/\gamma^\lambda)$ converges to some unitary polynomial $Q\neq X^n$. This means that the roots of $\widehat{P}_\lambda$ converge to the roots of $Q$. Let $\xi$ be a root of $Q$ with maximal modulus, recall that $\xi\neq 0$ because $Q\neq X^n$. A complex number $z$ is a root of $P_\lambda$ if and only if $\gamma^\lambda z$ is a root of $\widehat{P}_\lambda$ and these roots are converging to the ones of $Q$. We deduce from this that $\gamma^\lambda\rho(A_1^\lambda \ldots A_j^\lambda)$ converges to $|\xi|$ and we finally have \begin{equation}\label{eq:limitecinftyagain} \lim_{\lambda\to\infty}\frac{C_\infty(\lambda a)}{\lambda}=\frac{-1}{\pi}\ln(\gamma^{-1}) \end{equation} which is exactly what we wanted. The very same kind of argument also shows that \[\lim_{\lambda \to 0^+} \frac{C_\infty(\lambda a)}{\lambda}\] exists and is finite. Numerical simulations seems to indicate that we always have \[ \lim_{\lambda\to \infty} \frac{C_\infty(\lambda a)}{\lambda} \leq \lim_{\lambda\to 0^+} \frac{C_\infty(\lambda a)}{\lambda} \] but the function $\lambda \mapsto C_\infty(\lambda a)/\lambda$ needs not to be monotonous as shown with Figure 4. \begin{figure} \centering \includegraphics[width=1\textwidth]{chartbestCinftyHD.pdf} \caption{Graph of $\lambda\mapsto C_\infty (\lambda a)/\lambda$ for some damping term $a$ of the form \eqref{eq:apiecewiseconstant} but with five $a_i$ instead of three. Here $\lim_\lambda C_\infty(\lambda a)/\lambda$ is positive.} \label{fig:Cinfty} \end{figure} \paragraph*{} A very natural thing to do is to ask oneself if property \eqref{eq:limitecinftyagain} is still true in a more general setting, that is, is it still true with any smooth $a$ on a general manifold ? Unfortunately several difficulties prevent us to answer this question. For example notice that on a general manifold there is no equivalent of the formula \eqref{Cinftytoymodel} and that it is not even clear that $\|a_k-a\|_\infty\to 0$ implies $C_\infty(a_k)\to C_\infty(a)$ on a general manifold. Even on the circle where this is true it does not mean that \[ \lim_{k\to \infty} \lim_{\lambda\to \infty} \frac{C_\infty(\lambda a_k)}{\lambda}=\lim_{\lambda\to \infty} \frac{C_\infty(\lambda a)}{\lambda} \] and so it is not clear that $\lim_{\lambda\to\infty} C_\infty(\lambda a)/\lambda $ exists for a smooth $a$ even in the simple case of the circle.
1,116,691,501,368
arxiv
\section{Introduction} Bestvina-Handel defined irreducible automorphisms of free groups in \cite{BH92} as the base case of their proof of the Scott conjecture. Irreducible automorphisms are the dynamical free group analogues of pseudo-Anosov diffeomorphisms of surfaces and, as such, the study of their dynamics has been an active area of research since their introduction. For instance, a leaf of the attracting lamination of a pseudo-Anosov diffeomorphism cannot have compact support in an infinite cover of the surface, and analogously, Bestvina-Feighn-Handel constructed a lamination for fully irreducible automorphisms and showed that a leaf of the lamination cannot be supported by a finitely generated infinite index subgroup \cite[Proposition~2.4]{BFH97}. Another analogy: pseudo-Anosov mapping classes act with north-south dynamics on the Thurston compactification of Teichm\"uller space \cite{Thu88} and Levitt-Lustig proved that fully irreducible automorphisms act with north-south dynamics on the compactification of Culler-Vogtmann outer space \cite{LL03}. We would like to extend these results to irreducible endomorphisms. We begin by defining the (semi)action of an injective endomorphism on outer space and the first result is that irreducible nonsurjective endomorphisms act with sink dynamics on outer space. There is a correspondence between having a fixed point of the action and being able to represent the endomorphism with a graph immersion. Under this correspondence, we prove: \begin{restate}{Theorem}{part2} If $\phi:F\to F$ is an irreducible nonsurjective endomorphism, then it can be represented by an irreducible immersion with connected Whitehead graphs. Moreover, the immersion is unique amongst irreducible immersions. \end{restate} All the terms in the theorem will be defined in Section \ref{defs}. This is an unpublished result of Patrick Reynolds \cite[Corollary~5.5]{Rey} but the proof given here is original. We also note that Reynolds' proof assumes the endomorphism is fully irreducible but, as we shall see shortly, this is equivalent to being irreducible. Not much is currently known about the action; for instance, \begin{prob}Does the action of an irreducible nonsurjective endomorphism on outer~space always have bounded image? See Example~\ref{sapir2}. \end{prob} Using the sink dynamics on outer space, we prove a partial converse of Theorem~\ref{part2}: \begin{restate}{Theorem}{part3} If $\phi:F \to F$ is represented by an irreducible immersion whose Whitehead graphs are connected and have no cut vertices, then $\phi$ is nonsurjective and fully irreducible. \end{restate} Example~\ref{countereg} below shows that this criterion is not necessary, which raises the question: \begin{prob}Is there an algorithmic characterization for fully irreducible nonsurjective endomorphisms? \end{prob} The next proposition essentially determines which finitely generated subgroups are invariant under irreducible endomorphisms; roughly speaking, these are the iterated images of the endomorphism, up to finite index. \begin{restate}{Proposition}{invSbgrp}Suppose $\phi : F \to F$ is injective and represented by an irreducible train track with connected Whitehead graphs. If $H \le F$ is a finitely generated subgroup such that $\phi(H) \le H$ and $H$ contains a $\phi$-expanding conjugacy class, then \[ [\phi^k(F) : \phi^k(F) \cap H] < \infty \quad \text{for some } k \ge 0.\] \end{restate} This follows from the technical Proposition~\ref{supSbgrp}. We then get a characterization for fully irreducible endomorphisms that applies to all injective endomorphisms. \begin{restate}{Theorem}{propKap}Let $\phi:F \to F$ be an injective endomorphism. Then $\phi$ is fully irreducible if and only if $\phi:F \to F$ has no periodic cyclic free factor, it is represented by an irreducible train track with connected Whitehead graphs, and its image $\phi(F)$ is not contained in a proper free factor. \end{restate} Dowdall-Kapovich-Leininger had previously shown the equivalence between the irreducible and fully irreducible properties for atoroidal automorphisms \cite[Corollary~B.4]{DKL15}. Theorem~\ref{part2} and Theorem~\ref{propKap} give us this equivalence for nonsurjective endomorphisms. \begin{restate}{Corollary}{myequiv} If $\phi:F \to F$ is irreducible and nonsurjective, then it is fully irreducible. \end{restate} Another consequence of Proposition~\ref{invSbgrp} is the hyperbolicity of the mapping torus of an irreducible immersion with connected Whitehead graphs -- this was our original motivation. \begin{restate}{Theorem}{hypthm} If $\phi:F \to F$ is represented by an irreducible immersion with connected Whitehead graphs, then $F*_\phi$ is word-hyperbolic. In particular, if $\phi:F \to F$ is nonsurjective and irreducible, then $F*_\phi$ is word-hyperbolic. \end{restate} By our previous work \cite{JPM}, the proof boils down to showing that irreducible nonsurjective endomorphisms cannot have periodic laminations. As a quick application, we prove the hyperbolicity of the {\it Sapir group}: \begin{eg}\label{sapir}Let $F = F(a,b)$ be the free group on two generators and $\phi:F \to F$ be the nonsurjective endomorphism given by $\phi(a) = ab$ and $\phi(b) = ba$. The obvious map on the standard rose $f: R_2 \to R_2$ will be an immersion that induces $\phi$ on the fundamental group. It is easy to verify that $f$ is an irreducible immersion whose Whitehead graph is connected and has no cut vertices. By Theorem~\ref{part3}, $\phi$ is fully irreducible, and by Theorem \ref{hypthm}, the Sapir group $F*_\phi = \langle a, b, t ~|~ t^{-1}at = ab, t^{-1}bt = ba \rangle$ is word-hyperbolic. In contrast, $\psi:F \to F$ given by $(a,b)\mapsto (aba, bab)$ is not word-hyperbolic: $\psi(ab) = (ab)^3$ and so $BS(1,3) \cong \langle ab, t \rangle \le F*_\psi$. As $H_1(F*_\psi) = \mathbb Z^2$ has rank $>1$, there are infinitely many isomorphisms $F*_\psi \cong F_n*_{\psi_n}$ \cite{BNS87}; all such endomorphisms $\psi_n:F_n \to F_n$ are nonsurjective, because $BS(1,3) \le F*_\psi$, and reducible by Theorem~\ref{hypthm}. \end{eg} In future work, we use Proposition~\ref{invSbgrp} to show that the irreducibility of an endomorphism is a group-invariant of its mapping torus, i.e., suppose $\phi:F_n \to F_n$ and $\psi:F_m \to F_m$ are endomorphisms whose images are not contained in proper free factors and $F_n*_\phi \cong F_m *_\psi$, then $\phi$ is irreducible if and only if $\psi$ is irreducible~\cite{JPM2}. This answers Question 1.4 posed by Dowdall-Kapovich-Leininger in \cite{DKL17}; see also \cite[Theorem~C]{DKL15}. {~} \noindent \textbf{Outline:} Section~\ref{defs} contains the standard definitions and preliminary results that will be used throughout the paper. We define outer space and the action of injective endomorphisms on it in Section~\ref{cvf}. Section~\ref{immersions} contains the proof of Theorem~\ref{part2} and a partial converse Theorem~\ref{part3}. In Section~\ref{iwip}, we prove the technical result classifying subgroups that support a leaf of a lamination, Proposition~\ref{supSbgrp}. While the two previous sections are independent, we combine their main results in Section~\ref{hyperbolic} to prove Theorem~\ref{hypthm}. {~} \noindent \textbf{Acknowledgments:} I would like to thank Ilya Kapovich for getting me started on the proof of Theorem \ref{part2}. I also thank Derrick Wigglesworth and my advisor Matt Clay for the countless conversations on this material. The clarity of my exposition benefited from the referee's comments. \section{Definitions and Preliminary Results}\label{defs} In this paper, $F$ is a finitely generated free group on at least two generators. \begin{defn} An endomorphism $\phi: F \to F$ is {\bf reducible} if there exists a free factorization $A_1 * \cdots * A_k * B $ of $F$, where $B$ is nontrivial if $k = 1$, and a sequence of elements, $(g_i)_{i=1}^k$, in $F$ such that $\phi(A_i) \le g_i A_{i+1} g_i^{-1}$ where the indices are considered$\mod k$. An endomorphism $\phi$ is {\bf irreducible} if it is not reducible and it is {\bf fully irreducible} if all its iterates are irreducible; equivalently, $\phi$ is fully irreducible if $\phi$ has no invariant proper free factor, i.e., there does not exist a proper free factor $A \le F$, an element $g \in F$, and an integer $n \ge 1$ such that $\phi^n(A) \le gAg^{-1}$. \end{defn} An application of Stallings folds implies the endomorphisms studied in this paper will be injective even though it may not be explicitly stated again. \begin{lem}\label{inj} If $\phi:F \to F$ is irreducible, then it is injective.\end{lem} \begin{proof} Suppose $\phi$ is not injective. Stallings \cite{St83} showed that any endomorphism of $F$ factors through folds whose nontrivial kernels corresponding to proper subgraphs, hence are normal closures of proper free factors. Thus $\ker \phi$ contains an invariant proper free factor, which contradicts the irreducibility assumption. \end{proof} \begin{defn} Fix an isomorphism $F \cong \pi_1(\Gamma)$ for some connected finite core graph $\Gamma$, i.e., a finite 1-dimensional CW-complex with no univalent vertices. For any subgroup $H \le F$, the {\bf Stallings subgroup graph} $S(H)$ is the core of the cover of $\Gamma$ corresponding to $H$, i.e., it is the smallest subgraph of the cover containing all immersed loops of the cover. When $H$ is nontrivial and finitely generated, $S(H)$ is a finite core graph with rank $\ge 1$. A Stallings subgroup graph comes with an immersion $v: S(H) \to \Gamma$, which is a restriction of the covering map. We shall also assume that $S(H)$ is subdivided so that $v$ is {\bf simplicial}: $v$ maps edges to edges. The immersion $S(H) \to \Gamma$ uniquely determines $[H]$, the conjugacy class of $H$: if there is a subgroup $H' \le F$ with immersion $v':S(H') \to \Gamma$ and homeomorphism $h:S(H) \to S(H')$ such that $v = v'\circ h$, then $[H] = [H']$. \end{defn} The last statement makes Stallings subgroup graphs particularly useful for studying a nonsurjective injective endomorphism $\phi:F \to F$. For $i \ge 1$, let $S_i = S(\phi^i(F))$. When $\phi$ is an automorphism, the maps $v_i: S_i \to \Gamma$ are all graph isomorphisms. Conversely, we get: \begin{lem}\label{nonsurjSt} If $\phi:F \to F$ is injective and nonsurjective, then the number of edges in $S_i$ is unbounded as $i \to \infty$. \end{lem} \begin{proof} Suppose there is a homeomorphism $h: S_i \to S_j$ for some $i < j$ such that $v_i = v_j \circ h$. Then, by the last statement of the definition, $[\phi^i(F)] = [\phi^j(F)]$. This means there is some element $g \in F$ such that $g\phi^i(F) g^{-1} = \phi^j(F) \le \phi^i(F) $. But in free groups (subgroup separable), a finitely generated subgroup cannot be conjugate to a proper subgroup. Hence $\phi^j(F) = \phi^i(F)$. But $\left.\phi^{j-i}\right|_{\phi^i(F)}$ is conjugate to $\phi^{j-i}$ as $\phi$ is injective. In particular, $\phi^{j-i}$ is an automorphism and so is $\phi$ -- a contradiction. Therefore, the maps $v_i$ are all distinct and the number of edges in $S_i$ is unbounded as $ i \to \infty$. \end{proof} We shall use this lemma in Section~\ref{immersions} to show that, for an irreducible nonsurjective endomorphism, the sequence of maps $S_i \to S_i$ that induce $\phi$ on $\pi_1(S_i) \cong F$ converges to an immersion on some graph $\Gamma'$ that induces $\phi$ on $\pi_1(\Gamma') \cong F$. \begin{defn} Let $\Gamma$ be a connected finite core graph. A map $f: \Gamma \to \Gamma$ is a {\bf train track} if it maps vertices to vertices and all of its iterates are locally injective in interior of edges and at bivalent vertices. A train track is {\bf irreducible} if for any pair of edges $e_i$, $e_j$ in $\Gamma$, $e_i$ is in the image of some $f$-iterate of $e_j$. Given a train track $f: \Gamma \to \Gamma$, we fix an ordering of the edges of $\Gamma$ and construct the {\bf transition matrix} $A(f)$ as follows: it is a nonnegative square matrix whose size is given by the number of edges in $\Gamma$; the $(i, j)$-th entry of $A(f)$ is the number of times the edge $e_i$ appears in the image of $e_j$. An irreducible train track is {\bf expanding} if it is not a homeomorphism; equivalently, the transition matrix is {\it irreducible} and has a real eigenvalue $\lambda_f > 1$ -- this is the {\it Perron-Frobenius eigenvalue}. Let $f:\Gamma \to \Gamma$ be a train track map and $v$ be a vertex of $\Gamma$. The {\bf Whitehead graph} at $v$ is a simple graph whose vertices are the half-edges of $\Gamma$ attached to $v$, denoted by $T_v(\Gamma)$. A pair of elements of $T_v(\Gamma)$ is called a {\bf turn} at $v$ and it is {\bf nondegenerate} if the pair consists of distinct elements. The train track $f$ induces a map on $T_v(\Gamma)$. A nondegenerate turn is {\bf $f$-legal} if it remains nondegenerate under iteration of $f$. A turn at $v$ is an edge of the Whitehead graph if it appears in the image of an $f$-iterate of some edge of $\Gamma$. Note that the edges in the Whitehead graphs are $f$-legal. An irreducible train track $f: \Gamma \to \Gamma$ is {\bf weakly clean} if the Whitehead graph at each vertex is connected. A weakly clean map is {\bf clean} if there is an iterate such that every edge surjects onto the whole graph; equivalently, the transition matrix is {\it primitive}. It follows from the definition that if a map $f$ is clean then so are all its iterates $f^i~(i \ge 1)$. \end{defn} The following proposition due to Dowdall-Kapovich-Leininger \cite[Proposition~B.2]{DKL15} allows us to use clean and weakly clean interchangeably. We give a proof that does not assume homotopy-equivalence. \begin{prop}\label{clean} Suppose $f:\Gamma \to \Gamma$ is a train track with an irreducible but not primitive transition matrix. Then $f$ has a vertex with a disconnected Whitehead graph. In particular, if $f$ is weakly clean, then $f$ is clean.\end{prop} \begin{proof}As $A(f)$ is irreducible but not primitive, it is permutation-similar to a transitive block permutation matrix \cite[Theorem 1.8.3]{BR97}. This block permutation form gives a partition of $E(\Gamma)$ into $d \ge 2$ proper subgraphs $\Gamma_0, \ldots, \Gamma_{d-1}$ such that $f(\Gamma_i) \subset \Gamma_{i+1}$ where indices are considered$\mod d$. Thus, any vertex adjacent to two or more of these subgraphs has a disconnected Whitehead graph. Such a vertex exists since $\Gamma$ is connected.\end{proof} \begin{defn}Two homomorphisms $\phi_1, \phi_2:A \to B$ are {\it equivalent} if there is an inner automorphism of $B$, $i_g$, such that $\phi_1 = i_g \circ \phi_2$. An {\it outermorphism} is an equivalence class in $\operatorname{Hom}(A,B)$, denoted by $[\phi]$, and $\Out(F)$ is the group of outer automorphisms of $F$. A graph map $f:\Gamma \to \Gamma$ is a {\bf (topological) representative} for an injective (outer) endomorphism $\phi: F \to F$ if: $\Gamma$ is a connected core graph; the map $f$ maps vertices to vertices and is locally injective in interior of edges and at bivalent vertices; and there is an isomorphism $\alpha: F \to \pi_1(\Gamma)$, known as a {\bf marking}, such that $[f_*\alpha] = [\alpha\phi]$. \end{defn} Bestvina-Handel defined train tracks in \cite{BH92} and one of the main results was the algorithmic construction of train track representatives for irreducible endomorphisms. We note that their result was stated for irreducible automorphisms but the proof itself never used nor needed the fact that the endomorphisms were surjective. See also Dicks-Ventura \cite{DV96}. \begin{thm}[{\cite[Theorem~1.7]{BH92}}]\label{tt} If $\phi: F \to F$ is an irreducible endomorphism, then $\phi$ can be represented by an irreducible train track map. The irreducible train track is expanding if and only if $\phi$ has infinite-order.\end{thm} The following argument is now a standard technique in the field and has its roots in Bestvina-Handel's paper \cite[Proposition~4.5]{BH92}. \begin{cor}\label{clean2} If $\phi:F \to F$ is irreducible and has infinite-order, then $\phi$ has a clean representative. Moreover, any irreducible train track representative of $\phi$ will be clean.\end{cor} \begin{proof}[Sketch proof] By Theorem \ref{tt}, the endomorphism $\phi$ has an expanding irreducible train track representative. If the Whitehead graph of some vertex were disconnected, then blowing-up the vertex and the appropriate preimages to separate the components of the Whitehead graph would give a reduction of $\phi$. So all Whitehead graphs are connected and the representative is (weakly) clean. For more details, see \cite[Proposition 4.1]{Kap14}\end{proof} \begin{rmk}We shall take the moment to discuss a bit of history. As we have already noted, Bestvina-Handel constructed train track representatives for irreducible automorphisms but the construction only used Stallings folds and Perron-Frobenius theory; thus it applies for irreducible endomorphisms. In the subsequent {\it laminations paper} \cite{BFH97}, Bestvina-Feighn-Handel show that irreducible automorphisms have weakly clean representatives. The argument again applies to irreducible endomorphisms as shown in the previous corollary and Proposition~\ref{clean} allows us to strengthen the conclusion to get clean representatives. Proposition~\ref{clean} is important as it renders the erratum \cite{BFH97e} to the laminations paper unnecessary: full irreducibility is not needed for any of the results in that paper, only the existence of a clean representative. In fact, besides the results involving {\it repelling laminations/trees}, the rest also apply to nonsurjective endomorphisms with clean representatives; most importantly for our paper, Lemma~\ref{sink} holds in this greater generality even though it was originally stated only for (fully) irreducible automorphisms. \end{rmk} For any irreducible train track map $f:\Gamma \to \Gamma$, there is the associated Perron-Frobenius eigenvalue of the transition matrix, $\lambda_f \ge 1$, and a metric on the graph $\Gamma$ given by the Perron-Frobenius left eigenvector. With this metric, $f$ is homotopic rel. vertices to a (unique) map whose restrictions to edges are local $\lambda_f$-homotheties. For the rest of the paper, we shall assume an irreducible train track map has the latter linear property. The following lemmas combined with the train track linear structure allow us to carefully study the dynamics of $f$. The first lemma is known as the {\bf Bounded Cancellation Lemma}. {~} {\noindent \bf Notation.} We denote by $[p]$ (resp. $[\rho]$) the immersed path (resp. loop) homotopic rel. endpoints to a path $p$ (resp. freely homotopic to a loop $\rho$). We denote the length of an immersed path $p$ in $\Gamma$ by $l(p)$. When no metric on $\Gamma$ is specified, the length is assumed to be the combinatorial length. \begin{lem}[{\cite[Lemma II.2.4]{DV96}}]Suppose $f: \Gamma \to \Gamma$ is a topological representative for an injective endomorphism $\phi:F \to F$. Then there exists an constant $C = C(f)$ such that for any immersed path that can be written as a concatenation $a \cdot b$ in $\Gamma$, there exists path decompositions \( [f(a)] = x \cdot u, [f(b)] = \bar u \cdot y, [f(a \cdot b)] = x \cdot y,\) where $l(u) < C$. \end{lem} \begin{lem}\label{critLem}Let $f:\Gamma \to \Gamma$ be an expanding irreducible train track and $C = C(f)$ the cancellation constant. If $b$ is $f$-legal path and $l(b) > \frac{2C}{\lambda_f - 1}$, then there is a nontrivial subpath $s$ of $b$ such that $f^k(s)$ is a subpath of $[f^k(a \cdot b \cdot c)]$ for any $k \ge 1$ and immersed path $a\cdot b \cdot c$. \end{lem} \begin{proof} Let $a\cdot b \cdot c$ be an immersed path. By hypothesis and bounded cancellation, $[f(a)] = x \cdot u, f(b) = \bar u \cdot y \cdot v , [f(c)] = \bar v \cdot z$ and $[f(a \cdot b \cdot c)] = x \cdot y \cdot z$ where $l(u), l(v) < C$ and $y$ is nontrivial and $f$-legal. Furthermore, $l(y) = l(f(b)) - l(u) - l(v) > \lambda_f \cdot l( b) - 2C > \frac{2C}{\lambda_f - 1}$. The subpath of $b$ corresponding to $y$ has length $> \frac{2C}{\lambda_f - 1} - \frac{2C}{\lambda_f}$. By induction, there is a nontrivial subpath $s$ of $b$ such that $f^k(s)$ is a subpath of $[f^k(a \cdot b \cdot c)]$ for any $k \ge 1$. By removing subpaths of length $\frac{C}{\lambda_f - 1}$ from the start and end of $b$, $s$ can be chosen independent of $a$ and $c$. \end{proof} \begin{defn}\label{crit}The number $\frac{2C}{\lambda_f - 1}$ is known as the {\bf critical constant}. \end{defn} \section{The Action of Injective Endomorphisms on Outer Space}\label{cvf} Culler-Vogtmann introduced outer space in \cite{CV86} as a topological space with a nice $\Out(F)$-action. We will give two equivalent definitions and then describe the $\Out(F)$-action and, more generally, the semiaction of an injective endomorphism with respect to the two descriptions of outer space. See Karen Vogtmann's survey paper \cite{Vogt02} for details. \begin{defn}In general, {\bf trees} refers to {\it real trees} ($0$-hyperbolic geodesic spaces) and a tree is {\bf simplicial} if it is homeomorphic to a locally finite CW-complex. The {\bf Culler-Vogtmann outer space} of $F$, denoted by $CV(F)$, is the set of connected simplicial trees, $T$, with a free minimal (left) action of $F$, $\alpha: F \to \mathrm{Isom}(T)$, up to the equivalence: $(T_1, \alpha_1) \sim (T_2, \alpha_2)$ if there is an $F$-equivariant homothety $f:T_1 \to T_2$, i.e., $f \circ \alpha_1(g) = \alpha_2(g) \circ f$ for all $g \in F$. Alternatively, $CV(F)$ is the set of connected finite metric core graphs, $\Gamma$, with a marking $\alpha: F \to \pi_1(\Gamma)$, up to the equivalence: $(\Gamma_1, \alpha_1) \sim (\Gamma_2, \alpha_2)$ if there is a homothety $f: \Gamma_1 \to \Gamma_2$ such that $[f_* \alpha_1] = [\alpha_2]$. Identify $\pi_1(\Gamma)$ with the group of deck transformations of $\tilde \Gamma$ to get the correspondence between the two descriptions. \end{defn} There are several equivalent ways of defining a topology on $CV(F)$. We need two of them for this paper; the first comes from length functions: for any representative $(T, \alpha)$ of an equivalence class in $CV(F)$, let $l_{(T,\alpha)}: F \to \mathbb R$ be the length function that maps $g$ to its translation distance in $(T, \alpha)$. The equivalence class $[T, \alpha]$ determines a projective length function $l_{[T,\alpha]} \in \mathbb P \mathbb R^F$. The function $\iota: CV(F) \to \mathbb P \mathbb R^F$ given by $[T,\alpha] \mapsto l_{[T,\alpha]}$ is injective and the closure of its image is compact \cite{CV86}. Pullback the topology on $\mathbb{PR}^F$ via $\iota$ to get a topology on $CV(F)$. Length functions will also be useful in establishing the existence of limit trees in the next section. The second definition of the topology is more concrete: for an equivalence class $[\Gamma, \alpha] \in CV(F)$, choose a representative such that $\Gamma$ has no bivalent vertices and $\mathrm{vol}(\Gamma) = 1$; the volume of a metric graph $\mathrm{vol}(\Gamma)$ is the sum of the lengths of all edges in the graph. Let $n$ be the number of edges in $\Gamma$ and identify $\sigma(\Gamma,\alpha)$ with the $(n-1)$-simplex one gets by varying the lengths of the edges of $\Gamma$ to get homeomorphic $\Gamma'$ while still maintaining the equality $\mathrm{vol}(\Gamma') = 1$. In the tree description of $CV(F)$, this variation corresponds to equivariantly varying the metric on $(\tilde \Gamma, \alpha)$ to get $(\tilde \Gamma', \alpha')$. This gives us a decomposition of $CV(F)$ into a disjoint union of open simplices $\sigma(\Gamma, \alpha)$. Attaching maps for these simplices are given by decreasing the volume of some forest of $\Gamma$ to $0$. This decomposition of $CV(F)$ and description of attaching maps makes $CV(F)$ a locally finite {\it open simplicial complex}, i.e., a simplicial complex with some missing faces corresponding to collapsing noncontractible subgraphs. The set of open simplices of $CV(F)$, denoted by $SCV(F)$, can be made into a locally finite simplicial complex known as the {\it spine of outer space} but, for the most part, we will treat it as a set with no added structure. {~} It is a theorem of Culler-Vogtmann that $CV(F)$ (with either of the equivalent topologies) is contractible \cite{CV86, Vogt17}. There is a natural right action of $\Out(F)$ on $CV(F)$ given by $[T, \alpha] \cdot [\phi] = [T, \alpha\phi]$ for $[T,\alpha]\in CV(F)$ and $[\phi] \in \Out(F)$. Furthermore, this action induces an action on $SCV(F)$: $\sigma(T, \alpha) \cdot [\phi] = \sigma(T, \alpha\phi)$. For an injective (outer) endomorphism $\phi:F \to F$, there is a right (semi)action given by $[T, \alpha] \cdot [\phi] = [T', \alpha \phi]$ where $T'$ is the minimal tree of the {\it twisted} action $\alpha \phi: F \to \mathrm{Isom}(T)$, i.e., $T'$ is the minimal tree for $\phi(F)$. This induces an action on $SCV(F)$ by forgetting the metrics: $\sigma(T, \alpha) \cdot [\phi] = \sigma(T', \alpha\phi)$. For an $F$-tree $(T, \alpha)$ and a marked metric graph $(\Gamma, \alpha)$, we shall abuse notation and write only $T$ and $\Gamma$ when the actions and markings are clear. The goal of the next section is to describe dynamics of the action of irreducible nonsurjective endomorphisms on $CV(F)$. We will show that the action has a unique attracting point and no other fixed points. We now give the second description of the action of an injective endomorphism $\phi:F \to F$ on $CV(F)$ and $SCV(F)$. Let $\Gamma$ be a marked metric graph, i.e., $[\Gamma, \alpha] \in CV(F)$. Fix any topological representative $f:\Gamma \to \Gamma$ for $\phi$. For any $i \ge 1$, the map $f^i:\Gamma \to \Gamma$ factors as $f^i = v_i h_i$ where $h_i: \Gamma \to S_i'$ is a composition of folds and $v_i: S_i' \to \Gamma$ is an immersion. The graph $S_i'$ is subdivided so that the immersion is simplicial. If we let $S_i \subset S_i'$ be the core subgraph, then this is the Stallings subgroup graph for $\phi^i(F)$ and the graphs fits in the following commutative diagram: \[ \xymatrix{ & S_1 \ar@{^{(}->}[d] & & S_i \ar@{^{(}->}[d] & S_{i+1} \ar@{^{(}->}[d] & \\ \Gamma \ar@{=}[d] \ar[r] & S_1' \ar[d]^{v_1} \ar[r] & \ar[r] \cdots & S_i' \ar[d]^{v_i} \ar[r] & S_{i+1}' \ar[d]^{v_{i+1}} \ar[r] & \cdots\\ \Gamma \ar[r]^f & \Gamma \ar[r]^f & \cdots \ar[r]^f & \Gamma \ar[r]^f & \Gamma \ar[r]^f & \cdots }\] Since $\phi$ is injective, the composition of folds $h_i: \Gamma \to S_i'$ is a homotopy equivalence which induces a marking $h_{i*} \alpha: F \to \pi_1(S_i') = \pi_1(S_i)$. Pullback the metric on $\Gamma$ via the immersion $v_i:S_i \to \Gamma$ to get a metric on $S_i$. By construction, $\tilde v_i(\tilde S_i) \subset \tilde \Gamma$ is the minimal tree for $\phi^i(F)$ and $[\tilde \Gamma] \cdot [\phi]^i = [\tilde S_i]$ as $\tilde v_i$ is an isometric embedding. Therefore, in terms of marked metric graphs, the action is given by $[\Gamma, \alpha] \cdot [\phi]^i = [S_i, h_{i*} \alpha]$. As the immersion $v_i$ and folds $h_i$ do not depend on the metric on $\Gamma$, we see that the action of $\phi$ on $CV(F)$ is piecewise-linear with respect to the open simplicial structure and it induces the action on $SCV(F)$ by forgetting the metrics. The following lemma tells us precisely when $\phi$ fixes an element of $SCV(F)$ or $CV(F)$. \begin{lem}\label{fix}Let $\phi:F \to F$ be an injective endomorphism. Then: \begin{enumerate} \item $\sigma(\Gamma) \in SCV(F)$ is fixed by $\phi$ if and only if $\phi$ is represented by an immersion on $\Gamma$. \item $[\Gamma] \in CV(F)$ is fixed by $\phi$ if and only if $\phi$ is represented by a local homothety on $\Gamma$. \end{enumerate} \end{lem} \begin{proof}Suppose $\sigma(\Gamma, \alpha) \cdot [\phi] = \sigma(\Gamma, \alpha)$, i.e., $\sigma(S_1, h_{i*} \alpha) = \sigma(\Gamma, \alpha)$. Then the composition of folds and core retraction $h_1: \Gamma \to S_1$ is homotopic to a homeomorphism. As $f = v_1 h_1$ and $v_1$ is an immersion, we get that $f$ is homotopic to an immersion. Suppose $[\Gamma, \alpha] \cdot [\phi] = [\Gamma, \alpha]$. By the previous sentence, we may assume $f$ is an immersion, $S_1 = S_1'$, and $h_1: \Gamma \to S_1$ is a homeomorphism. Finally, for the point $[\Gamma, \alpha]$ to be fixed, the immersion $v_1:S_1 \to \Gamma$ and homeomorphism $h_1: \Gamma \to S_1$ must induce the same projective metric on $S_1$. Thus $f$ is (homotopic to) a local homothety. \end{proof} We end the section with a description of the action of the endomorphism in Example~\ref{sapir}. \begin{eg}\label{sapir2} Recall $F=F(a,b)$ and $\phi:F \to F$ is given by $(a,b)\mapsto (ab, ba)$. In rank~2, the spine of outer space has the structure of a regular trivalent tree with a spike at the midpoint of edges. Set $F = \pi_1(R_2)$, then the standard rose is the marked graph $R_* = (R_2, id_F)$ and all other roses are given by $(R_2, \varphi)$ for some $[\varphi] \in \Out(F)$. Let $(B, \beta)$ be the barbell graph attached to $R_*$ and let $(T, \theta)$ be the theta graph between $R_*,~(R_2, \varphi_a)$, and $(R_2, \varphi_b)$ where $\varphi_a:(a,ab)$ and $\varphi_b:(ba, b)$ are the generators of $\Out(F)$. To compute $\sigma(R_2, \varphi) \cdot [\phi]$, first represent $\varphi \phi$ on $R_*$ then fold. The composition of folds gives the resulting marked graph. For instance, $\sigma(R_2, \varphi_a)\cdot[\phi] = \sigma(T, \theta) = \sigma(R_2, \varphi_b) \cdot [\phi]$, $\sigma(R_2, \varphi_a^{-1}) \cdot [\phi] = \sigma(B, \beta)$, and $\sigma(R_2, \varphi_a^{-1}) \cdot [\phi]^2 = \sigma(T, \theta \varphi_a^{-1})$. Figure \ref{fig1} below illustrates one of these computations. Along these lines, we can show that \[ S = \left\{ \sigma(R_*),~\sigma(T, \theta),~\sigma(T, \theta \varphi_a^{-1}),~\sigma(B, \beta) \right\}\] is the set of $\phi$-periodic elements in $SCV(F)$; the first two are $\phi$-fixed while the latter two have $\phi$-period 2. By inducting on the roses of the spine, we can verify $SCV(F) \cdot [\phi] = S$. \end{eg} \begin{figure}[h] \centering \includegraphics[scale=0.36]{example} \caption{Computation showing $\sigma(R_2, \varphi_a^{-1}) \cdot [\phi] = \sigma(B, \beta)$.} \label{fig1} \end{figure} \section{Irreducible Nonsurjective Endomorphisms are Immersions}\label{immersions} Given an irreducible train track map $f:\Gamma \to \Gamma$ representing an injective endomorphism $\phi:F \to F$, then the iterates of $\phi$ act on the $F$-tree $\tilde \Gamma$ by taking minimal trees of their twisted actions. If $l_{\tilde \Gamma}:F \to \mathbb R$ is the length function for $\tilde \Gamma$, then $l_{\tilde \Gamma \cdot \phi^n} = l_{\tilde \Gamma}\phi^n$. We define the limit tree $T_f$ to be the tree whose length function $l:F \to \mathbb R$ is given by \[ l = \lim_{n \to \infty} \lambda_f^{-n} \cdot \left( l_{\tilde \Gamma} \phi^n\right). \] As $f$ is an irreducible train track map, this limit exists by bounded cancellation. The $F$-action on $T_f$ is free if and only if $\phi$ is atoroidal, i.e., it has no periodic nontrivial conjugacy class/cyclic subgroup. The class $[T_f]$ need not be in $CV(F)$ and, in fact, it is not when $\phi$ is an infinite-order automorphism. \begin{lem}[{\cite[Lemma~3.4]{BFH97}}]\label{sink}If $f:\Gamma \to \Gamma$ is a clean representative for $\phi:F \to F$, then $[T] \cdot [\phi]^n \to [T_f]$ for all $[T] \in CV(F)$. \end{lem} In the lemma, the limit is taken in the space of projective length functions, $\mathbb{PR}^F$. We use $[T_\phi] = [T_f]$ and $\lambda_\phi = \lambda_f$ to emphasize independence from the choice of train track representative $f$. When $\phi$ is represented by an irreducible immersion $f:\Gamma \to \Gamma$ and $\Gamma$ has the train track linear structure, then $f$ is a local $\lambda_\phi$-homothety, $T_\phi = \tilde \Gamma$, and $[T_\phi] \in CV(F)$. In a certain sense, the immersion is unique since $[\Gamma]$ is the unique $\phi$-fixed point in $CV(F)$. {~} In the general setting considered in the previous section, not much can be said about the graphs $S_i$ and immersions $v_i: S_i' \to \Gamma$. However, if $f:\Gamma \to \Gamma$ is assumed to be a clean representative, we gain more control; for example, we will show in this case that $S_i'$ is a core graph, hence, $S_i = S_i'$. To do this, we need to introduce a new structure: \begin{defn}Let $f:\Gamma \to \Gamma$ be an irreducible train track map and, for some $i \ge 1$, $f^i = hg$ where $g:\Gamma \to X$ and $h:X \to \Gamma$ are surjective graph maps and $X$ is a finite core graph. A turn at a vertex $v$ of $X$ is {\bf legal} if its image under $h$ is an $f$-legal turn. The {\bf relative Whitehead graph} of $(X, g, h)$ at $v$ is a simple graph whose vertex set is the set of half-edges at $v$, $T_v(X)$, and a turn at $v$ is an edge of the relative Whitehead graph if it is in the edge-path $g(f^j(e))$ for some $j \ge 0$ and edge $e$ in $\Gamma$. Note that edges of the relative Whitehead graphs are legal. \end{defn} The next lemma tells us how subdivision and folding affect the relative Whitehead~graphs. \begin{lem}\label{rel1}Suppose $f^i: \Gamma \xrightarrow{g} X \xrightarrow{h} \Gamma $ has connected relative Whitehead graphs and $h=h_2 h_1$ where $h_1: X \to Y$ is a subdivision or a single fold. Then $Y$ is a core graph and the relative Whitehead graphs of $(Y, h_1 g, h_2)$ are connected. \end{lem} \begin{proof}Suppose $h_1$ is a subdivision. If $v$ is a vertex of $Y$ that was added by the subdivision, then $T_v(Y)$ has two elements, $v \in \mathrm{Int}(e)$ for some edge $e'$ of $X$, and $v \in h_1(e')$. In particular, the two elements of $T_v(Y)$ are joined by an edge since $g$ is surjective. If $v$ is a vertex of both $Y$ and $X$, then $T_v(Y) = T_v(X)$ and subdivision makes no changes to the relative Whitehead graph at $v$. So the relative Whitehead graph of $(Y, h_1g, h_2)$ is connected since we assumed $(X, g, h)$ has connected relative Whitehead graphs. Suppose $h_1$ is a single fold. The only way to produce a univalent vertex in $Y$ with a fold is by folding a bivalent vertex of $X$. But bivalent vertices only have one turn which is legal since the relative Whitehead graphs of $(X, g, h)$ are connected. At the origin vertex, the fold identifies two distinct vertices of the relative Whitehead graphs and this preserves connectedness. At the two terminal vertices, the fold identifies two distinct vertices, one from each relative Whitehead graph, and this gluing of two connected graphs produces a connected graph. If a vertex in $X$ is not part of the fold, then its image in $Y$ will have the same connected relative Whitehead graph. \end{proof} Recall that to construct $S_i'$, we subdivide and fold the map $f^i:\Gamma \to \Gamma$ to get the immersion $v_i: S_i' \to \Gamma$. Suppose $f$ was a clean map. For the base case, the relative Whitehead graphs of $(\Gamma, id_\Gamma, f^i)$ are connected as they are isomorphic to the Whitehead graphs of $f^i$. By Lemma \ref{rel1} and induction on folds, $S_i'$ is a core graph and the relative Whitehead graphs of $(S_i, h_i, v_i)$ are connected. So the immersion $v_i$ maps edge-paths connecting branch points ({\it natural edges}) of $S_i$ to $f$-legal edge-paths in $\Gamma$. The commutative diagram in the previous section becomes: \[\xymatrix{ \Gamma \ar[r] \ar@{=}[d] & S_1 \ar[r] \ar[d]^{v_1} & S_2 \ar[r] \ar[d]^{v_2} & \cdots \ar[r] & S_i \ar[r] \ar[d]^{v_i} & S_{i+1} \ar[r] \ar[d]^{v_{i+1}} & \cdots \\ \Gamma \ar[r]^f & \Gamma \ar[r]^f & \Gamma \ar[r]^f & \cdots \ar[r]^f & \Gamma \ar[r]^f & \Gamma \ar[r] & \cdots }\] Aside from the construction of this diagram and the next lemma, this section and the next are independent of each other. This lemma will only be used in the proof of Proposition~\ref{invSbgrp}, the main result of the next section. \begin{lem}\label{rel2}Let $f, v_i, h_i$ be as in the previous paragraph. The map $\hat f^i: S_i \to S_i$ given by $\hat f^i = h_i v_i$ is a clean representative for $\phi^i$. \end{lem} \begin{proof} For all $n \ge 1$, $(\hat f^i)^{n+1} = (h_i v_i)^{n+1} = h_i ( v_i h_i )^{n} v_i = h_i f^{in} v_i $. Since $f$ is a train track, $v_i$ is a simplicial immersion, and $h_i$ folds illegal turns only, it follows that $\hat f^i$ is a train track too. Since $f$ is clean, there is an $n \ge 1$ such that, for any edge $e$ in $\Gamma$, $f^{in}(e)$ is surjective and contains all the edges of the Whitehead graphs of $f^i$. As $h_i$ is surjective, $(\hat f^i)^{n+1}$ is surjective when restricted to any edge and $\hat f^i$ has a primitive transition matrix. Since folding only identifies vertices of relative Whitehead graphs, $h_i f^{in}(e)$ contains all the edges of the relative Whitehead graphs of $(S_i, h_i, v_i )$. As $v_i$ maps edges to edges, we have that the Whitehead graphs of $\hat f^i$ contains the relative Whitehead graphs of $(S_i, h_i, v_i)$ as subgraphs. Since $v_i$ is surjective, we have the reverse containment too. So the Whitehead graphs of $\hat f^i$ are connected and the map is clean. \end{proof} With this set-up, we are now ready to present a new proof of an unpublished theorem due to Patrick Reynolds \cite[Corollary~5.5]{Rey}. The first half of this proof is based on an argument due to Ilya~Kapovich showing that irreducible nonsurjective endomorphisms are expansive; c.f. \cite[Proposition~3.11]{Rey}. \begin{thm}\label{part2}If $\phi:F\to F$ is an irreducible nonsurjective endomorphism, then it can be represented by a clean immersion, unique amongst irreducible immersions.\end{thm} \begin{proof} Let $f:\Gamma \to \Gamma$ be a clean representative for $\phi: F \to F$ given by Corollary~\ref{clean2}. By the discussion following Lemma~\ref{rel1}, we now construct core graphs $S_i$, folds $h_i:\Gamma \to S_i$, and immersions $v_i: S_i \to \Gamma$ such that $f^i = v_i h_i$ and the natural edges of $S_i$ are legal, i.e., they consist of legal turns. By Lemma~\ref{nonsurjSt}, nonsurjectivity implies that the number of edges in $S_i$ is unbounded as $i \to \infty$. So for some $i \gg 0$, $S_i$ has a natural edge longer than the critical constant. By Lemma~\ref{critLem}, there is a nontrivial subpath $s$ of the long natural edge such that, for all $ j > i$, the folds in $S_i \to S_j$ are supported in the complement of $s$. Fix an index $i \gg 0$ such that $S_i$ has the maximal number of natural edges longer than $\frac{2C\cdot L \cdot\lambda_\phi}{\lambda_\phi - 1}$, where $L = L( \mathrm{rank}(F))$ is the maximum number of natural edges of an embedded loop of a marked graph. Denote by $K_i$ the proper subgraph consisting of the remaining {\it short} natural edges of $S_i$ with length at most the critical constant. Let $\hat f: S_i \to S_i$ be a map representing $\phi$, i.e., $[\hat f_* h_{i*}] = [h_{i*} f_*]$. Suppose $K_i$ has an embedded loop $\rho$, then by construction $f v_i(\rho)$ decomposes into at most $L$ legal paths, each of length at most $\frac{2C \lambda_\phi}{\lambda_\phi - 1}$. So $[f v_i(\rho)]$ lifts to the loop $[\hat f(\rho)]$ in $K_i$. Thus if $K_i$ is not a forest, then $K_i$ is an $\hat f_i$-invariant proper subgraph up to homotopy. Since $\phi$ is irreducible, $K_i$ must be a forest. Therefore, the loops in $S_i$ are growing exponentially, $\phi$ is atoroidal, and the action of $F$ on $T_\phi$ is free. We showed in the previous paragraph that folds in the map $S_i \to S_j~(j > i)$ are supported in some forest $K \subset S_i$. Since there are finitely many combinatorially distinct ways to fold a forest, there are fixed $j > i$ and $k \ge 1$ such that the composition of folds $S_j \to S_{j+k}$ is homotopic to a homeomorphism. This means $[\tilde S_j], [\tilde S_{j+k}]$ lie in the simplex $\sigma(\tilde S_j) \subset CV(F)$. In particular, $\sigma(\tilde S_j) \cdot [\phi]^k = \sigma(\tilde S_j)$ and the sequence $[\tilde S_{j + kn}] ~ (n \ge 0)$ lies in $\sigma(\tilde S_j)$. By definition of the limit tree $T_f$ and the action $\tilde \Gamma \phi^n = \tilde S_n$, we get: \[ T_f = \lim_{n \to \infty} \lambda_f^{-n} \tilde \Gamma \phi^n = \lim_{n \to \infty} \lambda_f^{-j-kn} \tilde \Gamma \phi^{j+kn} = \lim_{n \to \infty} \lambda_f^{-j-kn} \tilde S_{j+kn}.\] Therefore, the limit tree is simplicial since $[T_f]$ is in the (simplicial) closure of $\sigma(\tilde S_j)$, i.e., the $F$-quotient of $T_f$, call it $\Gamma_0$, is a graph obtained by collapsing a subgraph of $S_j$ and rescaling the metric. As $T_f$ is free and simplicial, $[T_f] \in CV(F)$. By definition and Lemma~\ref{fix}, $[T_f]$ is fixed by $\phi$ and $\phi$ is represented by a local $\lambda_\phi$-homothety $f_0:\Gamma_0 \to \Gamma_0$. Furthermore, the local homothety $f_0$ is expanding since $\lambda_\phi > 1$; so $f_0$ has no invariant forests and the irreducibility of $\phi$ implies $f_0$ is clean by Corollary~\ref{clean2}. Uniqueness follows from Lemma~\ref{fix} and uniqueness of the fixed point $[T_\phi]$ due to Lemma~\ref{sink}. \end{proof} This theorem tells us that the action of an irreducible nonsurjective endomorphism on $CV(F)$ has a unique fixed point that is also a global attracting point. Reynolds studied this action further; for instance, the action converges to the fixed point uniformly on compact sets and, for {\it admissible} endomorphisms, the action can be extended to the compactification of outer space $\overline{CV(F)}$ where the convergence is uniform. We now use the action on outer space again to prove a partial converse to Theorem~\ref{part2}, which can also be thought of as a criterion for fully irreducible nonsurjective endomorphisms. \begin{thm}\label{part3}If $\phi:F \to F$ is represented by a clean immersion whose Whitehead graphs have no cut vertices, then $\phi$ is nonsurjective and fully irreducible.\end{thm} \begin{proof} Since $\phi$ is represented by a clean immersion $f:\Gamma \to \Gamma$, it is nonsurjective. Suppose $\phi^n(A) \le gAg^{-1}$ where $A \le F$ is a proper free factor, $g \in F$, and $n \ge 1$. Let $S(A)$ be the Stallings subgroup graph corresponding to $A$ with respect to $\Gamma$. Set $\psi = i_g \circ \phi^n$ so that $\psi(A) \le A$ and the immersion $f^n$ lifts to an immersion $g: S(A) \to S(A)$ representing $\left.\psi\right|_A$. Complete $\Delta_0 = S(A)$ to a graph $\Gamma'$ with a marking $\pi_1(\Gamma') \cong F$ and extend $g$ to the rest of $\Gamma'$ such that $g: \Gamma' \to \Gamma'$ is a topological representative for $\psi$ and $\Delta_0$ is a noncontractible $g$-invariant proper subgraph corresponding to $A$. Recall from Section \ref{cvf}, the Stallings subgroup graph $S_i = S(\psi^i(F))$ with respect to $\Gamma'$ along with the marking given by folding $g^i: \Gamma' \to \Gamma'$ determine the $i$-th iterate of $[\Gamma'] \in CV(F)$ under the $\psi$-action, i.e., $[\Gamma'] \cdot [\psi]^i = [S_i]$. Since $[S_i] \to [\Gamma]$ by Lemma~\ref{sink} and the spine of outer space is locally finite, we have that the sequence $\sigma(S_i) ~(i \ge 1)$ in $SCV(F)$ is eventually periodic. So for some fixed $i, j \ge 1$, we have $\sigma(S_i) = \sigma(S_{i+j})$ and, by Lemma \ref{fix}, $\psi^j$ can be represented by an immersion on $S_i$. Let $h_i:\Gamma' \to S_i'$ be the composition of folds given by the construction of $S_i$. Since $\left.g\right|_{\Delta_0}$ is an immersion, the corresponding restriction $\left.h_i\right|_{\Delta_0}$ is either a homeomorphism or an identification of vertices of $\Delta_0$ possibly followed by folds. Set $\Delta_i = h_i(\Delta_0)$ and let $g_i^j: S_i \to S_i$ be the induced map representing $\psi^j$. Then $\Delta_i$ is a noncontractible $g_i$-invariant subgraph and $\left.g_i\right|_{\Delta_i}$ is an immersion. By the previous paragraph, $g_i^j$ is homotopic to an immersion $\gamma: S_i \to S_i$. The homotopy will preserve the invariance of $\Delta_i$ so that $\Delta_i$ is a $\gamma$-invariant subgraph. As $\sigma(S_i)$ is fixed by $\psi^j$, the sequence $S_{i+jm}$ is constructed by pulling back the metric of $S_i$ via $\gamma^m$. Iteratively pulling back the metric via $\gamma$ and normalizing the metric has the effect of collapsing $\gamma$-invariant forests so that the induced map is a local homothety. By uniqueness of the limit $[S_{i+jm}] \to [\Gamma]$, the induced map must be $f^{nj}$ and $\Delta_\infty$, the image of $\Delta_i$ under the collapse map, is an $f^{nj}$-invariant subgraph. But $f^{nj}$ is a clean map, so $\Delta_\infty = \Gamma$. Let $h_\infty: \Delta_0 \to \Delta_\infty = \Gamma$ be the induced map. By construction, $ f^{nj} \circ h_\infty = \left.h_\infty \circ g^j \right|_{\Delta_0}$ and, as $h_\infty$ is an identification of some vertices possibly followed by a folding and/or a collapse of a forest, the Whitehead graphs of $f^{nj}$ are determined by where $g^j$ maps the edges of $\Delta_0$. So $f^{nj}$ will have disconnected Whitehead graphs or Whitehead graphs with cut vertices (depending on folds in $h_\infty$) at the identified vertices -- a contradiction. \end{proof} \begin{eg}\label{countereg} Let $F = F(a,b,c)$ and $\phi:F \to F$ is given by $(a,b,c) \mapsto (aba, c^2, cabac)$. The obvious topological representative on the standard marked rose is a clean map. Furthermore, the Stallings subgroup graphs $S_i = S(\phi^i(F))$, for $i \ge 1$, all determine the same marked graph (modulo metric), i.e., the same vertex in the spine. In particular, we can verify that $\phi$ is induced by a clean immersion on $S_1$. However, $\phi$ is reducible; in fact, $\phi(F) \le \langle aba, c \rangle = A$ and the latter is proper free factor of $F$. Furthermore, the restriction $\phi_A$ is represented by a clean immersion on a rose whose Whitehead graph has a cut vertex. However, by Theorem~\ref{hypthm} below, the {\it mapping torus} of $\phi_A$ is word-hyperbolic and, as $A$ has rank 2, the endomorphism $\phi_A$ is fully irreducible. Thus, the lack of cut vertices in Theorem~\ref{part3} is not a necessary condition. \end{eg} It would be interesting to find an algorithmic characterization of fully irreducible nonsurjective endomorphisms. \section{Subgroups Invariant Under Irreducible Endomorphisms}\label{iwip} In this section, we generalize a result by Bestvina-Feighn-Handel \cite[Proposition~2.4]{BFH97} and I.~Kapovich~\cite[Proposition~4.2]{Kap14} that characterizes the finitely generated subgroups of $F$ that support a {\em leaf of the lamination} of an irreducible automorphism. \begin{defn}\label{leaf} Given an expanding irreducible train track map $f:\Gamma \to \Gamma$, the {\bf (attracting) lamination of $f$}, denoted by $\Lambda(f)$, is defined by iterated neighbourhoods of $f$-periodic non-vertex points. Precisely, suppose $x \in \mathrm{Int}(e)$ for some $e \in E(\Gamma)$ and $k \ge 1$ are such that $f^k(x) = x$. Then a {\bf leaf of the lamination of $f$}, is the isometric immersion $\gamma_x:\mathbb R \to \Gamma$ such that $\gamma_x(0)=x$ and $f^k(\gamma_x(r)) = \gamma_x(\lambda_f^k \cdot r)$ for all $r \in \mathbb R$, unique up to orientation. \[ \Lambda(f) = \left\{\gamma_x \,:\, x \text{ is an $f$-periodic non-vertex point.} \right\} \] For any integer $m \ge 1$, $f^m$-periodic points are $f$-periodic and vice-versa. So it follows that $\Lambda(f) = \Lambda(f^m)$ for any $m \ge 1$. We shall say a nontrivial subgroup $H \le F$ {\bf supports a leaf of } $\Lambda(f)$ if some leaf $\gamma_x \in \Lambda(f)$ has a lift $\hat \gamma_x: \mathbb R \to S(H)$ to the Stallings subgroup graph of $H$. \end{defn} We now address the difficulty that arises when generalizing Bestvina-Feighn-Handel and I.~Kapovich's results. Any automorphism $\phi: F \to F$ permutes the finite index subgroups with the same index; so given any finite index subgroup $H'$, there exists $i \ge 1$ such that $\phi^i(H') = H'$. This fact is used to lift a clean map representing $\phi$ to a clean map on the Stallings subgroup graph $S(H')$. However, this fails when dealing with nonsurjective injective endomorphisms, i.e., there is no reason why some $\phi$-iterate of $H'$ must be a subgroup of $H'$. The next lemma gives us a way of getting around this failure. The key observation is to look at backward iteration: when $\phi$ is injective, then pre-images of finite index subgroups are finite index subgroups with the same index or less. \begin{lem}\label{lemFI} Let $\phi : F \to F$ be an injective endomorphism of a free group $F$. If $H' \le F$ is a finite index subgroup, then there exist $j > k \ge 0$ such that $\phi^{-k}(H') = \phi^{-j}(H')$. Furthermore, for $K = \phi^{-k}(H')$, there is an induced set bijection $\varphi: F/K \to F/K$ such that the following diagram commutes: \[ \xymatrix{ F \ar[d]^\pi \ar[r]^{\phi^{j-k}} &F\ar[d]^\pi\\ F/K \ar[r]^\varphi &F/K}\] \end{lem} \begin{proof} Set $H_k = \phi^{-k}(H')$ for $k \ge 0$. Then $\phi^k(H_k) = \phi^k(F)\cap H'$. Since $H'$ has finite index in $F$, then $\phi^k(H_k) = \phi^k(F)\cap H'$ has finite index in $\phi^k(F)$. In fact, $[F:H'] \ge [\phi^k(F):\phi^k(H_k)]$. But $\phi^k: F \to \phi^k(F)$ is an isomorphism that maps $H_k \le F$ to $\phi^k(H_k) \le \phi^k(F)$. Thus, $[\phi^k(F):\phi^k(H_k)] = [F:H_k]$ and $H_k$ has finite index in $F$ bounded by $[F:H']$ for all $k \ge 0$. As there are only finitely many subgroups with index bounded by $[F:H']$, there must exists $j > k \ge 0$ such that $H_k = H_j$, i.e., $\phi^{-k}(H') = \phi^{-j}(H')$. Let $K = \phi^{-k}(H')$ and $\pi:F \to F/K$ be the (left) coset projection map. The function $\pi \circ \phi^{j-k}$ factors through $\pi$ if and only if $\phi^{j-k}(K) \le K$, or equivalently, $K \le \phi^{k-j}(K)$, and the induced function $\varphi:F/K \to F/K$ is an injection if and only if $\phi^{k-j}(K) \le K$. By construction, $\phi^{k-j}(K)=K$, i.e., both conditions are satisfied, so $\pi \circ \phi^{j-k} = \varphi \circ \pi$ where $\varphi$ is a bijection since it is an injection of the finite set $F/K$ into itself. \end{proof} The main result of the section follows: \begin{prop}\label{supSbgrp} Suppose $\phi : F \to F$ is injective and represented by a clean map. If $H \le F$ is a nontrivial finitely generated subgroup that supports a leaf of $\Lambda(f)$, then \[ [\phi^k(F) : \phi^k(F) \cap H] < \infty \quad \text{for some } k \ge 0. \] \end{prop} \begin{proof} As $H$ is nontrivial and finitely generated, $S(H)$ is a nontrivial finite graph and we can add edges to $S(H)$ if necessary to extend the immersion $S(H) \to \Gamma$ to a finite cover $S(H') \to \Gamma$ corresponding to $H \le H' \le F$. Thus $[F:H'] < \infty$ and we can apply Lemma \ref{lemFI} to get $j > k \ge 0$ such that $\phi^{-k}(H') = \phi^{-j}(H')$. Set $K = \phi^{-k}(H')$. Then $\phi^k(K) = \phi^k(F) \cap H'$ has finite index in $\phi^k(F)$. Recalling the diagram preceding Lemma~\ref{rel2}, the graph $S(\phi^k(F)) = S_k$ and the cover corresponding to $\phi^k(K) \le \phi^k(F)$ lie in the following commutative diagram: \[\xymatrix{ S(\phi^k(K)) \ar[d]^p & S(\phi^k(K)) \ar[d]^p \\ S(\phi^k(F)) \ar[d]^{v_k} \ar@{-->}[r]^{g} & S(\phi^k(F)) \ar[d]^{v_k} \\ \Gamma \ar[ru]_{h_k } \ar[r]_{f^k} & \Gamma }\] where $g: S(\phi^k(F)) \to S(\phi^k(F))$ is clean by Lemma \ref{rel2} and $p: S(\phi^k(K)) \to S(\phi^k(F))$ is a finite cover. The map $h_k$ maps $f^k$-periodic points to $g$-periodic points and $v_k$ maps $g$-periodic points to $f^k$-periodic points. This allows us to identify $\Lambda(f)$ with $\Lambda(g)$. Let $\Delta_H = S(\phi^k(F) \cap H)$. This is a nontrivial graph since $\phi(H) \le H$. Note that $ S(\phi^k(K))$ is the pullback of $ S(\phi^k(F)) \to \Gamma$ and $S(H') \to \Gamma$ and $\Delta_H$ is the pullback of $ S(\phi^k(F)) \to \Gamma$ and $S(H) \to \Gamma$. As $S(H)$ is a subgraph of $S(H')$, we have that $\Delta_H$ is a subgraph of $ S(\phi^k(K))$. The graphs fit in this commutative diagram: \[\xymatrix{ \Delta_H \ar[d] \ar@{^{(}->}[r] & S(\phi^k(K)) \ar[d] \ar[r]^p & S(\phi^k(F)) \ar[d]^{v_k} \\ S(H) \ar@{^{(}->}[r] & S(H') \ar[r] &\Gamma }\] Since $ S(\phi^k(F))$ and $S(H)$ support a leaf of $\Lambda(f)$, it follows that their pullback $\Delta_H$ supports a leaf of $\Lambda(f)$. Following the identification $\Lambda(f) \cong \Lambda(g)$, we have that $\Delta_H$ supports a leaf of $\Lambda(g)$. Lemma~\ref{lemFI} says $\phi^{j-k}$ induces a permutation of $F/K \cong \phi^k(F)/\phi^k(K)$. Therefore, the map $g^{j-k}:S(\phi^k(F))\to S(\phi^k(F))$ lifts to a map $\hat g: S(\phi^k(K))\to S(\phi^k(K))$ with the property: if $g^{j-k}(x) = x$, then $\hat g$ permutes the elements of $p^{-1}(x)$. The rest of the argument follows that of Bestvina-Feighn-Handel \cite[Lemma~2.1]{BFH97}. We include the details for completeness; see also \cite[Proposition~4.2]{Kap14}. \begin{claim}The map $\hat g: S(\phi^k(K)) \to S(\phi^k(K))$ is clean. \end{claim} \begin{proof}Let $\{a', b'\}$ be a turn at a vertex $v \in S(\phi^k(K))$ such that its projection under $p$, $\{a, b\}$, is an edge of the Whitehead graph of $g^{j-k}$ at $p(v)$. Since $g^{j-k}$ is a clean map, we can replace it with an iterate and assume $g^{j-k}(a) = \ldots ab \ldots$. So $a$ contains a $g^{j-k}$-fixed point $x$ and, consequently, $\hat g$ permutes the lifts $p^{-1}(x)$. Replace $g^{j-k}$ with an iterate if necessary and assume $\hat g$ fixes $p^{-1}(x)$ and let $x' \in p^{-1}(x)$ be the lift of $x$ in $a'$. Then $x'$ is a $\hat g$-fixed point and $\hat g(a') = \ldots a'b' \ldots$. An identical argument shows that $\hat g(b') = \ldots a'b' \ldots$ after passing to a power if necessary. Thus the Whitehead graph at $v$ with respect to $\hat g$ is isomorphic to the Whitehead graph at $p(v)$ with respect to $g^{j-k}$. So the Whitehead graphs of $ S(\phi^k(K))$ with respect to $\hat g$ are connected and $\hat g$ is a train track as the turns in $\hat g(e')$ are lifts of the turns in $g^{j-k}(p(e'))$ for any edge $e$ in $ S(\phi^k(K))$. It remains to show that $g$ is irreducible. Since the Whitehead graphs of $ S(\phi^k(K))$ are connected, for any edges $a, b$ in $ S(\phi^k(K))$, there is a sequences of turns $\{\epsilon_1, \epsilon_2\}, \{\epsilon_3, \epsilon_4\}, \ldots, \{\epsilon_{2l-1} \epsilon_{2l}\}$ such that each turn is an edge of the corresponding Whitehead graph, $\epsilon_{2m}, \epsilon_{2m+1}$ are half-edges of the same edge for $1 \le m < l$, and $\epsilon_1, \epsilon_{2l}$ are half-edges of $a, b$ respectively. Let $a=e_0, e_1, \ldots, e_{l-1}, e_l = b$ the corresponding sequence of edges. By the previous paragraph, some iterate of $\hat g$ maps $e_m$ to $e_{m+1}$ for $0 \le m < l$. By induction, some iterate of $\hat g$ maps $a$ to $b$. As $a$ and $b$ were arbitrary, $\hat g$ is an irreducible train track. \end{proof} \begin{claim}The subgraph $\Delta_H \subset S(\phi^k(K))$ is not proper, i.e., $\Delta_H = S(\phi^k(K))$. \end{claim} \begin{proof}Recall that a leaf of $\Lambda(g) = \Lambda(g^{j-k})$ is constructed by iterating neighbourhoods of some $g^{j-k}$-periodic non-vertex point. Suppose $x \in \mathrm{Int}(e)$ for some edge $e$ in $S(\phi^k(F))$ and $l \ge 1$ are such that $g^{(j-k)l}(x) = x$ and $g^{(j-k)l}$ lifts to a map $\hat g^l: S(\phi^k(K)) \to S(\phi^k(K))$ that fixes $p^{-1}(x)$ pointwise. Then the lift of $\gamma_x \in \Lambda(g^{j-k})$ to $\Delta_H$ can be constructed by using $\hat g^l$ to iterate a neighbourhood of some $x' \in p^{-1}(x) \cap \Delta_H$. Let $e'$ be the edge containing $x'$. Since $\hat g$ is clean, $\hat g^m(e')$ surjects onto the whole graph $S(\phi^k(K))$ for some $m \ge 1$ and any iterated neighbourhood of $x'$ shall eventially cover $ S(\phi^k(K))$. Therefore, $S(\phi^k(K)) \subset \Delta_H$. \end{proof} So the natural map $\Delta_H \to S(\phi^k(F))$ is a finite cover and $[\phi^k(F) : \phi^k(F) \cap H] < \infty$. \end{proof} In this paper, we will need the conclusion of the previous proposition to hold for {\it invariant} subgroups: \begin{prop}\label{invSbgrp} Suppose $\phi : F \to F$ is injective and represented by a clean map. If $H \le F$ is a finitely generated subgroup such that $\phi(H) \le H$ and $H$ contains a $\phi$-expanding conjugacy class, then \[ [\phi^k(F) : \phi^k(F) \cap H] < \infty \quad \text{for some } k \ge 0. \] \end{prop} \begin{proof} Suppose $\phi(H) \le H$ for some finitely generated group $H \le F$. Let $f: \Gamma \to \Gamma$ be the given clean map and $\rho$ be the $f$-expanding immersed loop in $\Gamma$ that lifts to a loop in $S(H)$. The invariance $\phi(H) \le H$ implies the loops $[f^{k}(\rho)]$ lift to loops in $S(H)$ for all $k \ge 1$. As $\rho$ is $f$-expanding and $f$ is a clean map, length of $[f^{k}(\rho)]$ grows arbitrarily with $k$ while the number of $f$-illegal turns in $[f^{k}(\rho)]$ remains bounded. Thus, for some $k \ge 1$, the loop $[f^{k}(\rho)]$ will contain an $f$-legal subpath longer than the critical constant. By Lemma~\ref{critLem}, there is a nontrivial subpath $s$ of $\rho$ such that $f^{k}(s)$ is a subpath of $[f^{k}(\rho)]$ for all $k \ge 1$. As $f$ is clean, some $f$-iterate of $s$ maps onto $\Gamma$. Let $x$ be an $f$-periodic non-vertex point of $\rho$ and $\gamma_x: \mathbb R \to \Gamma$ be the corresponding leaf of $\Lambda(f)$. For all real $r > 0$, the path $\gamma_x([-r,r])$ is a subpath of $[f^{k}(\rho)]$ for some $k \ge 1$ since $x$ is contained in some $f$-iterate of $s$. Thus $\left.\gamma_x\right|_{[-r, r]}$ has a lift $\gamma_{x,r} : [-r, r] \to S(H)$. There are only finitely many preimages of $x$ in $S(H)$, so after passing to an unbounded increasing subsequence $r_m$, we can assume the sequence $(\gamma_{x,r_m}(0))_{m=1}^\infty$ is constant. By uniqueness of lifts, $\gamma_{x,r_{m+1}}$ is an extension of $\gamma_{x,r_{m}}$ for $m \ge 1$. The limit immersion $\gamma_{x, \infty}: \mathbb R \to S(H)$ is a lift of $\gamma_x$ and so $H$ supports a leaf of $\Lambda(f)$. The conclusion follows from Proposition~\ref{supSbgrp}. \end{proof} We shall now give necessary and sufficient conditions for an endomorphism to be fully irreducible. The reader only interested in our hyperbolicity result can skip to the next section. \begin{thm}\label{propKap} Let $\phi:F \to F$ be an injective endomorphism. Then $\phi$ is fully irreducible if and only if $\phi:F \to F$ has no periodic cyclic free factor, is represented by a clean map, and its image $\phi(F)$ is not contained in a proper free factor. \end{thm} \begin{proof} The forward direction follows from the definition of fully irreducible and Corollary~\ref{clean2}. We now prove the reverse direction. Let $A \le F$ be a $\phi^n$-invariant free factor for some $n \ge 1$ and assume $A$ has minimal rank (for all $n$). Then $i_g \circ \phi^n(A) \le A$ for some inner automorphism $i_g$. If $A$ is cyclic then it is generated by an element in a $\phi$-expanding conjugacy class since it cannot be periodic. Otherwise, the minimality assumption implies $\left.i_g \circ \phi^n\right|_A$ is fully irreducible. In both cases, $A$ contains a $\phi$-expanding conjugacy class. As $\phi$ is represented by a clean map, so is $i_g \circ \phi^n$. By Proposition~\ref{invSbgrp}, there exists $k \ge 0$ such that $ (i_g\circ\phi^n)^k(F) \cap A$ has finite index in $(i_g \circ \phi^n)^k(F)$. It remains to show $A = F$, hence, $\phi$ is fully irreducible. Since $A$ is a free factor of $F$, the intersection $ (i_g\circ\phi^n)^k(F) \cap A$ is a free factor of $(i_g \circ \phi^n)^k(F)$. The only finite index free factor of a finitely generated free group is the free group itself. Thus $ (i_g \circ \phi^n)^k(F) \cap A = (i_g \circ \phi^n)^k(F)$ and $(i_g \circ \phi^n)^k(F)\le A$. Recall that $\phi(F)$ is not contained in a proper free factor. Suppose $\phi^{l-1}(F)$ is not contained in a proper free factor of $F$ for some $l \ge 2$. Let $K \le F$ be a free factor such that $\phi^l(F) \le K$. Then $\phi(\phi^{l-1}(F))=\phi^l(F) \le K \cap \phi^{l-1}(F)$ and the intersection is a free factor of $\phi^{l-1}(F)$. As $\phi^{l-1}$ is injective, we have $\phi|_{\phi^{l-1}(F)}$ is conjugate to $\phi$. Therefore, $\phi^l(F)$ is not contained in a proper free factor of $\phi^{l-1}(F)$ and $K \cap \phi^{l-1}(F) = \phi^{l-1}(F)$. Therefore $\phi^{l-1}(F) \le K$. The induction hypothesis is that $\phi^{l-1}(F)$ is not contained in a proper free factor of $F$, hence $K= F$. So $\phi^l(F)$ is not contained in a proper free factor of $F$. By induction, $\phi^n(F)$ is not contained in a proper free factor of $F$. We also have $i_g \circ \phi^n(F)$ is not contained in a proper free factor of $F$ since $i_g$ is an automorphism. By the same induction argument, $(i_g \circ \phi^n)^k(F)$ is not contained in a proper free factor of $F$. Therefore, $(i_g \circ \phi^n)^k(F) \le A$ implies $A=F$ and $\phi$ is fully irreducible. \end{proof} Example~\ref{countereg} shows that the condition on the image of the endomorphism is not redundant. This proposition extends the characterization of fully irreducible automorphisms due to I.~Kapovich \cite[Theorem~1.2]{Kap19}, which in turn was motivated by Catherine Pfaff's criterion for irreducibility \cite[Theorem~4.1]{Pfaff}. A result due to Dowdall-Kapovich-Leininger is that atoroidal irreducible automorphisms are fully irreducible \cite[Corollary~B.4]{DKL15}. As a corollary of the characterization, we get the equivalence for irreducible nonsurjective endomorphisms. \begin{cor}\label{myequiv} If $\phi:F \to F$ is irreducible and nonsurjective, then it is fully irreducible. \end{cor} \begin{proof} By Corollary~\ref{clean2}, it is represented by a clean map. By the first half of the proof of Theorem~\ref{part2}, $\phi$ is atoroidal. In particular, it has no periodic cyclic free factor. Finally, irreducibility implies its image $\phi(F)$ is not contained in a proper free factor. The conclusion follows from Theorem~\ref{propKap}. \end{proof} \section{Irreducible Nonsurjective Endomorphisms are Hyperbolic}\label{hyperbolic} The goal of this final section is to prove that the mapping tori of irreducible nonsurjective endomorphisms are word-hyperbolic. \begin{defn}Let $\phi: F \to F$ be an injective endomorphism. Then the {\bf ascending HNN extension/mapping torus} of $\phi$ is given by the presentation: \[ F*_\phi = \langle F, t~ | ~ t^{-1}x t = \phi(x), \forall x \in F \rangle \] \end{defn} Thurston's hyperbolization theorem gives the correspondence between the geometry of $3$-manifolds that fiber over a circle and the dynamics of their monodromies \cite{Thu82} and Brinkmann generalized this to free-by-cylic groups $F \rtimes \mathbb Z$ \cite{Bri00}. The following theorem, the main result of \cite{JPM}, is a partial generalization to ascending HNN extensions $F*_\phi$. \begin{thm}[{\cite[Theorem~6.3]{JPM}}]\label{mythm}Suppose $\phi:F \to F$ is represented by an immersion. Then $F*_\phi$ is word-hyperbolic if and only if there are no $d,n \ge 1$ and $1 \neq a \in F$ such that $\phi^n(a)$ is conjugate to $a^d$ in $F$.\end{thm} Remarkably, this theorem combined with Theorem~\ref{part2} and Proposition \ref{invSbgrp} implies that irreducible nonsurjective endomorphisms have word-hyperbolic mapping tori. \begin{thm}\label{hypthm} If $\phi:F \to F$ is represented by a clean immersion, then $F*_\phi$ is word-hyperbolic. In particular, if $\phi$ is nonsurjective and irreducible, then $F*_\phi$ is word-hyperbolic. \end{thm} \begin{proof} As $\phi$ is represented by a clean immersion, every nontrivial conjugacy class is $\phi$-expanding. Suppose $F*_\phi$ were not word-hyperbolic. By Theorem~\ref{mythm}, there exists a nontrivial element $a \in F$, an element $g \in F$, and integers $d, n \ge 1$ such that $\phi^n(a) = g a^d g^{-1}$. If we let $H = \langle a \rangle$, then $i_g \circ \phi^n(H) \le H$. By Proposition~\ref{invSbgrp}, there is a $k \ge 0$ such that $ (i_g\circ\phi^n)^k(F) \cap H$ has finite index in $(i_g \circ \phi^n)^k(F)$. But this is a contradiction as $H$ cannot be cyclic and have finite index intersection in a noncyclic free group. Therefore, $F*_\phi$ must be word-hyperbolic. The second statement of the theorem follows from Theorem~\ref{part2}.\end{proof} On the other hand, there are fully irreducible automorphisms whose corresponding free-by-cylic groups are not word-hyperbolic. In this case, Bestvina-Handel showed that the automorphisms are induced by pseudo-Anosov homeomorphisms on once-punctured surfaces \cite[Proposition~4.5]{BH92}. By Thurston's hyperbolization theorem, the corresponding free-by-cyclic groups are fundamental groups of hyperbolic $3$-manifolds that fiber over a circle.
1,116,691,501,369
arxiv
\section{Introduction} In this paper we give two gradient estimates of Hamilton type \cite{H} for heat equations, which have been studied recently by many researchers (see \cite{P02}, \cite{Chow}, \cite{Ni},\cite{BCP}, \cite{HM}, \cite{LX}, \cite{CZ},\cite{CH},\cite{Sun},\cite{CC},etc). The equations under consideration have their deep background from the fundamental gap of the Schrodinger equation and the Ricci flow (see \cite{Hs}, \cite{ML}, \cite{Yst}, etc). Interesting gradient estimates of Hamilton type for heat equation associated to Ricci flow have been obtained in \cite{CH}, \cite{BCP}, and \cite{Z}. We derive the Hamilton type gradient estimate for the drifting heat equation and the simple nonlinear heat equation from the view-point of the Bernstein type estimates. This is a new observation. Our argument is shorter than previous ones. We first derive the Hamilton type gradient estimate for the drifting heat equation \begin{equation}\label{ht} u_t-\Delta u=-\nabla\phi\cdot\nabla u, \ \ u>0 \end{equation} on the compact Riemannian manifold $(M,g)$ of dimension $n$. Here $\phi$ is a smooth function on $M$ and $\nabla \phi$ is the gradient of $\phi$ in the metric $g$. We shall denote $D^\phi$ the hessian matrix of $\phi$. We have the following gradient estimate. \begin{Thm}\label{thm1} Assume that the compact Riemannian manifold $(M,g)$ has the non-negative Bakry-Emery-Ricci curvature in the sense that $$Rc+D^2\phi\geq -K $$ on $M$ for some constant $K\geq 0$. Let $u>0$ be a positive smooth solution to (\ref{ht}). Assume that $\sup_Mu=1$. Let $f=-\log u$. Then we have, for all $t>0$, $$ t|\nabla f|^2\leq (2Kt+1)f. $$ The same estimate is true for (\ref{ht}) on complete Riemannian manifolds when the maximum principle can be applied. \end{Thm} We also have the following result for (\ref{ht}) on the manifold with smooth boundary. \begin{Thm}\label{thm2} Assume that the compact Riemannian manifold $(M,g)$ with convex boundary has the curvature condition about the Bakry-Emery-Ricci tensor that $$Rc+D^2\phi\geq -K$$ on $M$ for some constant $K\geq 0$. Let $u>0$ be a positive smooth solution to (\ref{ht}) with Neumann boundary condition $u_\nu=0$, where $\nu$ is the outward unit normal to the boundary. Assume that $\sup_Mu=1$. Let $f=-\log u$. Then we have, for all $t>0$, \begin{equation}\label{est-1} t|\nabla f|^2\leq (2Kt+1)f. \end{equation} \end{Thm} Assume that $K=0$ and $u$ is any bounded smooth solution to (\ref{ht}). Assume that $A=\sup_Mu>0$. Let $v=(A-u)/A$. Then $v$ is a positive solution to (\ref{ht}) and the above gradient estimate is \begin{equation}\label{eq1-1} t|\nabla u|^2\leq (A-u)^2\log \frac{A}{A-u}, \end{equation} which is the usual form of Hamilton type gradient estimate. The drifting heat equation is closely related to the fundamental gap of the Schrodinger operator on convex domains (so $K=0$). Namely, Let $\lambda =\lambda_2-\lambda_1$ be the fundamental gap of the Laplacian operator $-\Delta$ and let $f_j$ be the eigenfunctions corresponding to $\lambda_j$, $j=1,2$. Let $u:=u(x):=f_2/f_1$. Then we have (\cite{Yst}) $$ \Delta u=-\lambda u-2(\nabla u\cdot \nabla \log f_1). $$ Set $$ \phi=-2\log f_1, $$ which is convex by the well-known result of Brascamp-Lieb \cite{BL}. Let $$ v(x,t)=exp(-\lambda t)u(x). $$ Then $v$ satisfies (\ref{ht}) with the Neumann boundary condition. The other interesting problem to us is to derive the Hamilton type gradient estimate for the following nonlinear heat equation problem \begin{equation}\label{n-ht1} u_t-\Delta u=au\log u, \ \ u>0 \end{equation} on the compact Riemannian manifold $(M,g)$ of dimension $n$. Here $a\in \mathbf{R}$ is some constant. This heat equation can be considered as the negative gradient heat flow to $W$-functional \cite{P02}, which is closely related to the Log-Sobolev inequalities on the Riemannian manifold. In \cite{M}, we propose the study of the local gradient estimates for solutions to (\ref{n-ht1}) based on its relation with Ricci solitons. Soon after, Y.Yang gives a nice answer in \cite{Y} and his result is Li-Yau type \cite{LY}. We have the following result for (\ref{n-ht1}). \begin{Thm}\label{thm3} Assume that the compact Riemannian manifold $(M,g)$ has the non-negative Ricci curvature condition, i.e., $Rc\geq 0$. Let $u>0$ be a positive smooth solution to (\ref{n-ht1}). Assume that $\sup_Mu<1$ at the initial time and $a\leq 0$. Let $f=-\log {u}$. Then we have, for all $t>0$, $\sup_Mu<1$ and $$ t|\nabla f|^2\leq f. $$ The same estimate is true for (\ref{ht}) on complete Riemannian manifolds when the maximum principle can be applied. \end{Thm} Similar to Theorem \ref{thm2}, we also have the following result for (\ref{n-ht1}) on the manifold with smooth boundary. \begin{Thm}\label{thm4} Assume that the compact Riemannian manifold $(M,g)$ with convex boundary has the non-negative Ricci curvature condition. Let $u>0$ be a positive smooth solution to (\ref{ht}) with Neumann boundary condition $u_\nu=0$, where $\nu$ is the outward unit normal to the boundary. Assume that $\sup_Mu<1$ at the initial time and $a\leq 0$. Let $f=-\log u$. Then we have, for all $t>0$, $\sup_Mu<1$ and \begin{equation}\label{est-2} t|\nabla f|^2\leq f. \end{equation} \end{Thm} It is quite clear that our results to (\ref{n-ht1}) are not satisfied by us because of the assumption $\sup_Mu<1$ at the initial time. So we leave it open to derive the Hamilton type gradient estimate for positive solutions to (\ref{n-ht1}). The plan of our paper is below. In section \ref{sect2}, we give the proofs of Theorems \ref{thm1} and \ref{thm2}. In section \ref{sect3} we study (\ref{n-ht1}). \section{Hamilton type estimate for drifting heat equation}\label{sect2} Assume that $u>0$ is a positive solution to (\ref{ht}). Let $f=-\log u$. Then $$ f_j=-u_j/u, \ \ \Delta f=-\Delta u/u+|\nabla f|^2. $$ Then we have \begin{equation}\label{eq2-1} (\partial_t-\Delta)f+\nabla\phi\cdot \nabla f=-|\nabla f|^2. \end{equation} Let $L=\partial_t-\Delta+\nabla\phi\cdot$. We compute $L|\nabla f|^2$. Note that $$ (|\nabla f|^2)_t=2<\nabla f,\nabla f_t>. $$ Recall the Bochner formula that $$ \Delta|\nabla f|^2=2|D^2f|^2+<\nabla f,\nabla \Delta f>+2Rc(\nabla f,\nabla f>. $$ Then we have \begin{equation}\label{eq2-2} L|\nabla f|^2= 2<\nabla f, \nabla Lf>-2|D^2f|^2-2(Rc+D^2\phi)(\nabla f, \nabla f). \end{equation} By the Ricci curvature bound assumption, we have $$ L|\nabla f|^2\leq -2|D^2f|^2+2K |\nabla f|^2. $$ Dropping the term $-2|D^2f|^2$ (comment: Hamilton type estimate is not as sharp as Li-Yau's estimate ) we have $$ L|\nabla f|^2\leq 2K |\nabla f|^2. $$ Then we have \begin{equation}\label{eq2-3} L(t|\nabla f|^2)\leq (1+2Kt) |\nabla f|^2. \end{equation} Using (\ref{eq2-1}), we get from (\ref{eq2-3}) that \begin{equation}\label{eq2-4} L(t|\nabla f|^2-(2Kt+1)f)\leq -2Kf. \end{equation} We may re-write (\ref{eq2-4}) as $$ L(t|\nabla f|^2-(2Kt+1)f)\leq \frac{2K}{2K+1}(t|\nabla f|^2-(2K+1)f)-\frac{2K}{2K+1}(t|\nabla f|^2). $$ Then we have $$ L(t|\nabla f|^2-(2Kt+1)f)\leq \frac{2K}{2K+1}(t|\nabla f|^2-(2K+1)f). $$ Applying the Maximum principle we obtain that $$ t|\nabla f|^2-(2Kt+1)f\leq 0. $$ This completes the proof of Theorem \ref{thm1}. We now prove Theorem \ref{thm2}. We need to treat the boundary term. Note that $f_\nu=0$ on the boundary. Then on the boundary, $$ [t|\nabla f|^2-(2Kt+1)f]_\nu=2tf_jf_{j\nu}=-2II(\nabla f,\nabla f)\leq 0. $$ Hence by the strong maximum principle we know that the maximum point of $t|\nabla f|^2-(2Kt+1)f$ can not occur at the boundary point and then we have $$ t|\nabla f|^2-(2Kt+1)f\leq 0. $$ This completes the proof of Theorem \ref{thm2}. \section{Hamilton type estimate for the simple nonlinear heat equation}\label{sect3} As before, we let $f=-\log u$. Then we have \begin{equation}\label{eq3-1} (\partial_t-\Delta)f=af-|\nabla f|^2. \end{equation} Using $a\leq 0$ and the maximum principle we know that if $\inf_Mf>0$ at the initial time, then it is always positive for $t>0$. Let $L:=\partial_t-\Delta$ in this section. Compute, \begin{equation}\label{eq3-2} L|\nabla f|^2= 2<\nabla f, \nabla Lf>-2|D^2f|^2-2Rc(\nabla f, \nabla f). \end{equation} Then we have $$ L|\nabla f|^2=2a|\nabla f|^2- 2<\nabla f, \nabla |\nabla f|^2>-2|D^2f|^2-2Rc(\nabla f, \nabla f). $$ Using the non-negative Ricci curvature assumption we have $$ L|\nabla f|^2\leq 2a|\nabla f|^2- 2<\nabla f, \nabla |\nabla f|^2>. $$ Then $$ L(t|\nabla f|^2)\leq (2at+1)|\nabla f|^2- 2<\nabla f, \nabla (t|\nabla f|^2)>. $$ Using (\ref{eq3-1}) we get that $$ L(t|\nabla f|^2-f)\leq 2at|\nabla f|^2-af- 2<\nabla f, \nabla (t|\nabla f|^2-f)>. $$ Let $H=t|\nabla f|^2-f$. Then $f=t|\nabla f|^2-H$. Hence we have $$ LH\leq at|\nabla f|^2+aH- 2<\nabla f, \nabla H>. $$ Using the assumption that $a\leq 0$, we obtain that $$ LH\leq aH- 2<\nabla f, \nabla H>. $$ Applying the maximum principle to $H$ we know that $H\leq 0$. That is, $$ t|\nabla f|^2-f\leq 0, $$ which is the desired gradient estimate of Hamilton type. Then we complete the proof of Theorem \ref{thm3}. Using the same argument as in the proof of Theorem \ref{thm2}, we can prove Theorem \ref{thm4}. \emph{Acknowledgement}: The author would also like to thank IHES, France for host and the K.C.Wong foundation for support in 2010.
1,116,691,501,370
arxiv
\section{Introduction / Background} Steganography is the process of hiding one data stream ``in plain sight'' within another data stream known as a ``carrier.'' In this work, we are interested in using audio as a carrier to store images, which has been relatively unexplored in the academic literature (Section~\ref{sec:priorwork}). As with any stegonographic technique, one can use this as a general means of covert communication, artists can also use it to hide images and to watermark their tunes. One such example occurred when Aphex Twin embedded a face image inside the spectrogram of their ``Windowlicker'' song \cite{mathews2004music}, though the technique yields very loud, inharmonic, scratchy sounds. By contrast, we seek an embedding of the image that is undetectable. Like Aphex Twin, however, {\em we would like the image to survive lossy audio compression} so that people can communicate their images via social media, which makes the problem significantly more challenging. To make our audio steganography system as robust and as usable as possible, we have the following design goals: \begin{enumerate} \item \label{goal:imperceptible} The hidden data should be audibly imperceptible \item \label{goal:geomquality} The hidden data should be faithfully preserved {\em under lossy compression} \item \label{goal:misalignment} The hidden data should be robust to frame misalignment, or it should be possible to recover a frame alignment without any prior information \item \label{goal:metadata} No metadata should be required to retrieve the hidden data; (lossily compressed) audio alone should suffice \item \label{goal:partial} It should be possible to recover the data partially from partial audio chunks; that is, we don't need to wait for the entire data stream to recover the signal \end{enumerate} Goals ~\ref{goal:imperceptible} and ~\ref{goal:geomquality} are at odds with each other, and satisfying them simultaneously is the biggest challenge of this work. We formulate an objective function in Section~\ref{sec:formulation} to trade off both of these goals. Rather than storing images directly in audio, we constrain our problem to hiding artistic curves that trace out images (Section~\ref{sec:tspart}) or 3D surface shapes (Section~\ref{sec:hamiltonian}). We hide each dimension of our curve in a different frequency, and we use sliding window sums of the frequency magnitudes to smooth them in time, which makes them more robust to compression, as we show in Section~\ref{sec:experiments}. Furthermore, since we know that the curves only move by a small amount between adjacent samples in time, we can exploit this fact to recover from frame misalignments to satisfy Goal~\ref{goal:misalignment}, as explained in Section~\ref{sec:framealignments}. In the end, all of the hidden information needed to reconstruct the curves is stored in the magnitudes and phases of frequencies in the audio, so no metadata is needed (Goal~\ref{goal:metadata}). Finally, the information needed to recover individual curves samples is localized in time, so the curves can be partially decoded from partial audio (Goal~\ref{goal:partial}). \subsection{Prior Work in Audio Steganography} \label{sec:priorwork} Before we proceed to describing the techniques for constructing artistic curves (Section~\ref{sec:tspart}, Section~\ref{sec:hamiltonian}), and to ultimately hiding them in audio (Section~\ref{sec:methods}), we briefly review adjacent work in the area of audio steganography to put our work in context (for more information, review to these survey articles \cite{djebbar_comparative_2012, dutta_overview_2020}). As is the case for most other steganographic carriers, the simplest techniques for audio steganography rely on changing the least significant bit of uncompressed encodings (e.g. \cite{cvejic_wavelet_2002}). Other early works rely on the ``frequency masking'' property of the human auditory system. Binary data is stored by controlling the relative amplitude of two frequencies that are masked in this way \cite{gopalan_unified_2009, gopalan2004audio}. Beyond amplitude perturbations, it is also possible to keep the amplitudes fixed and to change the phases \cite{xiaoxiao_dong_data_2004, yun2009acoustic}; the technique of \cite{malik_robust_2007} was particularly successful at this by manipulating the poles of allpass filters to encode binary data. One can also hide binary data by adding and manipulating echoes of a signal \cite{gruhl1996echo}, which is more robust than other techniques to lossy compression since it is akin to impulse responses of natural environments; however, the bit rate is quite low (on the order of dozens of bits per second). For another approach to compressed audio, some techniques are able to use the properties of mp3 files directly to hide information \cite{qiao_steganalysis_2009,atoum2013exploring}. Other techniques adapt wireless transmissions schemes, such as and on-off keying (OOK) \cite{madhavapeddy_audio_2005} (at a low bit rate of 8bps) and orthogonal frequency-division multiplexing (OFDM) \cite{eichelberger_receiving_2019} (at a higher bit rate of closer to 1kbps), to the audio domain. Most of the hidden messages in the audio steganography literature are in binary format, but a few recent works have focused on hiding images pixels using deep learning to train a ``hiding network'' and a ``reveal network'' in tandem to encode an image in an audio stream and then to decode its addition into the carrier, respectively, while maximizing quality of the decoded image and the encoded audio \cite{cui_multi-stage_2021, geleta_pixinwav_2021, takahashi_source_2022, domenech2022hiding}. Not only do we also attempt to hide images, but we have a similar perspective that rather than attempting to extract binary sequences exactly, we can tolerate progressive noise in the reconstructions. Challenges of deep-learning based approaches include the need to extensively train on examples, the difficulty of including a loss term that can model the effect of lossy audio compression, and the difficulty of training to be robust to frame misalignment (Goal~\ref{goal:misalignment}). We sidestep these challenges in our work by using a simple model based on hiding in frequencies. \subsection{Traveling Salesperson Art} \label{sec:tspart} To devise 2D curves that stand in for images, we use Traveling Salesperson (TSP) art \cite{bosch2004continuous, kaplan2005tsp,bosch2008connecting}, which is an automated artistic line drawing pipeline which computes a {\em simple closed loop} to approximate a source image. ``Simple'' in this context does not imply a lack of complexity; rather, it means that a curve does not intersect itself (see, for example, the Jordan Curve Theorem \cite{bosch2009jordan}). To construct such a curve, we first place a collection of dots in the plane in a ``stipple pattern'' to approximate brightness in the image (e.g. more dots concentrate in darker regions), and then connect them in a loop via an approximate traveling salesperson (TSP) tour. Figure~\ref{fig:TSPTour} shows an example. We follow the TSP Art technique of \cite{kaplan2005tsp}, with a few modifications, as described below. \begin{figure} \centering \includegraphics[width=\columnwidth]{TSPTour.pdf} \caption{Our modified pipeline for creating TSP art. Color on the tour indicates phase along the loop.} \label{fig:TSPTour} \end{figure} \begin{figure} \begin{minipage}[c]{0.4\textwidth} \caption{ Since mp3 compression introduces noise to our embedded curves, we pre-smooth them using one iteration of curvature-shortening flow at $\sigma=1$, which smooths the curves without introducing any crossings. } \label{fig:CurvatureShortening} \end{minipage} \begin{minipage}[c]{0.6\textwidth} \includegraphics[width=\columnwidth]{CurvatureShortening.png} \end{minipage}\hfill \end{figure} The first step in TSP art is to generate a stipple pattern $X$ to best approximate a grayscale image $G$. Following the authors of \cite{kaplan2005tsp}, we use Secord's technique for Voronoi stippling \cite{secord2002weighted}, which takes an initial sample of points according to some density weights $W$ and then repeatedly moves them closer the weighted centroid of their Voronoi regions (an instance of Lloyd's algorithm) so that they are spread more evenly. Secord \cite{secord2002weighted} takes the weight $W_{ij}$ at a pixel to be inversely proportional to its brightness, using a weight of $0$ above some brightness threshold $b$. This leads more dots to concentrate in darker regions; however, the algorithm may fail to sample any dots along important edges between brighter regions (the authors of \cite{li2011structure} also observed this). To mitigate this, we run a Canny edge detector \cite{canny1986computational} on the original image and set the weight of any pixels along an edge to be 1, so that the final weights promote samples both in darker regions and along edges of any kind. This addition is particularly helpful for line drawings. Once a stipple has been established, the next step in TSP art is to ``connect the dots'' with a closed loop that visits each stipple point exactly once, referred to as a ``tour''. A well known objective function for a tour that doesn't ``jump too much'' is the total distance traveled, or the sum of all edge lengths, and a tour that achieves the optimum is known as a {\em traveling salesperson (TSP) tour}. Since the TSP problem is NP-hard, the authors of \cite{kaplan2005tsp} use the Concorde TSP solver \cite{applegate2001concorde} for an approximate solution. We opt for a simpler technique that first creates a 2-approximation of a TSP from a depth-first traversal through the minimum spanning tree of the stipple dots, which is a already a 2-approximation of the optimal tour. We then iteratively improve on this tour via a sequence of 2-opt relaxations \cite{johnson1997traveling}; that is, if for some $i > 1, i < j < N$ the distances between the 4 points $X_i, X_{i+1}, X_j$, and $X_{j+1}$ satisfy \begin{equation} d(X_i, X_j) + d(X_{i+1}, X_{j+1}) < d(X_i, X_{i+1}) + d(X_j, X_{j+1}) \end{equation} then it is possible to perform a swap to yield a new tour with a smaller distance. This amounts to reversing the indices in the tour between index $i+1$ and $j$, inclusive. We repeat this step as long as such a swap is still possible. Though this is not guaranteed to yield an optimal TSP tour, it does produce aesthetically pleasing tours which are simple; that is, every crossing is removed \subsubsection{Curvature Shortening Flow} Since mp3 compression introduces noise into our embedded curves, we smooth them before embedding to improve visual quality. To this end, we apply a numerical version of curvature shortening flow described by Mokhtarian and Mackworth \cite{mokhtarian1992theory} which applies to piecewise linear curves (like our TSP tours). The technique works by numerically by convolving coordinates of each curve with smoothed versions of Gaussians and their derivatives. To approximately smooth a curve via one step of curvature-shortening flow, Mokhtarian and Mackworth \cite{mokhtarian1992theory} show that it suffices to first re-parameterize by the arc length, and then to smooth the curve by convolving with once with a Gaussian\footnote{The beauty of convolving with Gaussians as such is that $\gamma$ does not even have to be differentiable, so this works on our piecewise linear TSP tours.}. By the Gage-Hamilton-Grayson theorem, simple curves that undergo curvature shortening flow {\em remain simple} and eventually become convex, shrinking to a point under repeated applications of the flow. Figure~\ref{fig:CurvatureShortening} shows an example of one application of curvature shortening flow for different $\sigma$ values. \subsection{Hamiltonian Cycles on Watertight Triangle Meshes} \label{sec:hamiltonian} \begin{figure} \centering \includegraphics[width=0.8\columnwidth]{HamiltonianHorse.pdf} \caption{An example of the algorithm of Gopi and Epstein \cite{gopi2004single} on a horse model from the Princeton mesh segmentation benchmark \cite{Chen:2009:ABF}.} \label{fig:HamiltonianHorse} \end{figure} In addition to 2D TSP tours on stipple patterns on planar images, we also create artistic 3D space loops that fill out the surfaces of 3D shapes following the technique of Gopi and Epstein \cite{gopi2004single}. They observe that watertight triangle meshes (those with no boundary) have a dual graph in which a perfect matching exists. In other words, it is possible to partition the triangles into a set of adjacent pairs (second column of Figure~\ref{fig:HamiltonianHorse}). In practice, we use the Blossom-5 algorithm \cite{kolmogorov2009blossom} to find perfect matches of the dual graph. Then, adding edges between unpaired triangles leads to a set of disconnected cycles (third column of Figure~\ref{fig:HamiltonianHorse}), which one can connect by a spanning set of edges between paired triangles. Finally, the spanning edges are split into bridges between the disconnected cycles, joining them together into one large cycle that covers the surface of the triangle mesh. \section{Curve Embedding in Audio} \label{sec:methods} We now introduce our new algorithm for hiding artistic space curves in audio. Before we go into the details, we first define quantitative measurements for measuring the fit to the original audio (Goal~\ref{goal:imperceptible}) and the geometric quality of the hidden curve (Goal~\ref{goal:geomquality}). Let the original carrier audio be $x$ and let the steganography audio be $y$, each with $N$ samples. Then we define the steganographic signal to noise ratio in decibels (dB) as \begin{equation} \label{eq:stegsnr} \text{snr}(y|x) = 10 \log_{10} \left(\sum_{j=1}^N x_j^2 \right) - 10 \log_{10}\left(\sum_{j=1}^N (y_j-x_j)^2 \right) \end{equation} For our geometric measurement of quality, let $X_i$ be a sequence of $M$ target curve points in $\mathbb{R}^d$ and $Y_i$ be a sequence of $M$ reconstructed points in $\mathbb{R}^d$. Then we define the {\em mean geometric distortion} is simply as the mean Euclidean distance between these points: \begin{equation} \label{eq:distortion} \text{distortion}(Y|X) = \frac{1}{M} \sum_{j=1}^M ||X_j - Y_j||_2 \end{equation} \subsection{Formulation of Least Squares Problem} \label{sec:formulation} We now formulate an objective function that trades off Equation~\ref{eq:stegsnr} and Equation~\ref{eq:distortion}. Let $T = (T_1, T_2, \hdots, T_N )$ be the sequence of points of the target curve to hide, where each $T_i \in \mathbb{R}^d$, and let $T_{i, m}$ refer to $m^{\text{th}}$ coordinate of $T_i$, and let $x$ be a set of audio samples which will serve as a carrier. The goal is to perturb the samples of $x$ so that some function of $x$ matches $T$ as closely as possible. The function we choose is based on a ``time regularized'' version of the magnitude Short-Time Fourier Transform (STFT) which we call a {\em sliding window sum} STFT (SWS-STFT). As a first step, we compute a {\em non-overlapping} STFT $S$ based on a chosen window length $w$ with a total of $N$ frames: \begin{equation} S_{k, j} = \sum_{n = 0}^w x_{jw + n} \left(e^{-i 2 \pi k n / w} \right) = M_{k, j} \left( e^{i P_{k, j}} \right) \end{equation} and we factor $S$ it into its magnitude and phase components $M$ and $P$, respectively. Next, we choose a subset of $d$ frequencies $k_i, i \in \{1, 2, \hdots, d\}$, each of which will hide a different dimension of $T$. Given a second window length $\ell$, we then define the following {\em sliding window sum} function, which we apply to each row $k_i$ of the magnitudes of $S$ that we wish to perturb to obtain the SWS-STFT \begin{equation} \text{SWS}^{\ell}(M)_{k_i, j} = \sum_{n = 0}^{\ell-1} M_{k_i, j+n} \end{equation} The effect of $\ell$ is to smooth out the noisy rows $k_i$ of the magnitude spectrogram so that the rows in the SW-STFT match smoother transitions in the target curves. Each row of the SW-STFT has $N-\ell+1$ samples. Let's assume momentarily that $T$ has exactly this many samples; we will address the case where $\text{length}(T) > N-\ell+1$ in Section~\ref{sec:reparam}. We then seek a perturbed version of the magnitudes $\hat{M}$ so that each coordinate $i$ is hidden in a single frequency index $k_i$ of $\hat{M}$. To that end, we minimize the following objective function, one coordinate dimension $i = 1, ... d$ at a time: \begin{equation} \label{eq:objfn} f(\hat{M}_{k_i}) = \sum_{j=1}^{N-\ell+1} \left( \left( \sum_{n = 0}^{\ell-1} \hat{M}_{k_i, j+n} \right) - T_{i, j} \right)^2 + \lambda \sum_{j=1}^N \left( M_{k_i, j} - \hat{M}_{k_i, j} \right)^2 \end{equation} \begin{figure} \centering \includegraphics[width=0.9\columnwidth]{WindowEffect.pdf} \caption{Varying $\ell$ for a fixed $\lambda=0.1$, using the lowest two non-DC frequencies to carry. A larger $\ell$ for the SW-STFT in Equation~\ref{eq:objfn} leads to smoother curves which are more likely to survive compression, but an $\ell$ that's too large may over-smooth.} \label{fig:WindowEffect} \end{figure} \begin{figure} \centering \includegraphics[width=0.9\columnwidth]{LambdaEffect.pdf} \caption{Varying $\lambda$ for a fixed $\ell=16$. A smaller $\lambda$ in Equation~\ref{eq:objfn} leads to higher geometric fidelity (Goal~\ref{goal:geomquality}), at the cost of audio quality (Goal~\ref{goal:imperceptible}), as measured by SNR.} \label{fig:LambdaEffect} \end{figure} subject to $\hat{M_{k_i, j}} \geq 0$. In other words, we want the magnitude SW-STFT of a perturbed signal to match the target coordinate as well as possible (minimizing Equation~\ref{eq:distortion}), while preserving the original audio as well as possible (minimizing Equation~\ref{eq:viterbiobj}), according to $\lambda$. A greater $\lambda$ means that the signal $\hat{M}$ will fit the original audio better, at the cost of a noisier curve. Figure~\ref{fig:LambdaEffect} shows an example. After solving for $\hat{M_{k_i, j}}$, we replace all rows of $M_{k_i}$ with $\hat{M_{k_i}}$, and we perform an inverse STFT of $M e^{i P}$ using the original phases $P$ (Section~\ref{sec:componentscales} will explain when it is necessary to modify the phases as well). We then save the resulting audio in a compressed format, and we recover the hidden signal by loading it and extracting the corresponding components of the magnitude SW-STFT. \subsubsection{Computational Complexity} \label{sec:computation} Minimizing equation~\ref{eq:objfn} can be formulated as a sparse nonnegative linear least squares problem, and there are myriad algorithms (e.g. \cite{branch1999subspace}) for solving such systems efficiently via repeated evaluation of the linear system and its adjoint on iterative estimates of a solution . Furthermore, though Equation~\ref{eq:objfn} suggest an $O(N \ell)$ time complexity to evaluate the objective function at each iteration, we implement the linear operator and its adjoint with $O(N)$ operations only, independent of $\ell$, using the 1D version of the ``summed area tables'' trick in computer vision \cite{lewisfast}. For example, let $C_{k_i, j} = \sum_{i=0}^{j} \hat{M_{k, i}}$, and $C_{k_i, j < 1} = 0$. Then Equation~\ref{eq:objfn} can be rewritten as \begin{equation} \label{eq:objfncumusum} f_i(\hat{M}) = \sum_{j=1}^{N-\ell+1} \left( (C_{k_i, j+\ell-1}-C_{k_i, j-1}) - T_{i, j} \right)^2 + \lambda \sum_{j=1}^N \left( M_{k_i, j} - \hat{M}_{k_i, j} \right)^2 \end{equation} It is also worth noting that a higher $\lambda$ in equation~\ref{eq:objfn} leads to a lower condition number\footnote{The condition number of a matrix is defined as the ratio of the largest to smallest singular values, and lower condition numbers are more numerically desireable.} of the matrix in the implied linear system, which leads to faster minimization of Equation~\ref{eq:objfn}. But as Figure~\ref{fig:LambdaEffect} shows, it may still be worth it to use smaller $\lambda$ values. In practice, we see it as a difference between an encoding in 30 seconds of audio that takes a few seconds for $\lambda=0.1$ versus an encoding that takes a split second for $\lambda=10$ on a CPU. \subsubsection{The choice of non-overlapping windows} Though some other works use transforms with overlapping windows \cite{yun2009acoustic,geleta_pixinwav_2021}, changing different STFT bins independently leads to STFTs that do not correspond to any real signal due to discrepancies between overlapping windows. An algorithm like Griffin-Lim \cite{griffin1984signal} can recover a signal whose spectrogram has a locally minimal distance to the perturbed STFT, but we find that this step introduces an unacceptable level of distortion both in the target and the carrier audio. This problem occurs even for real-valued transforms such as the Discrete Cosine Transform (which was used by \cite{geleta_pixinwav_2021}). Like \cite{xiaoxiao_dong_data_2004}, we sidestep this problem by using non-overlapping windows. Aside from window effects, the main downside of non-overlapping windows is that they lead to fewer STFT frames. For instance, with a 30 second audio clip at 44100hz using a window length of 1024, we are limited to about 1292 frames, which is half of what we would get using an overlapping STFT with a hop length of 512. However, a quick thought experiment shows that this is still a reasonable data rate. Suppose that we use a sliding window length $\ell=16$, for a total of 1277 carrier samples. Suppose also that the precision of our embedding of a 2D plane curve is roughly on par with an 8 bit per coordinate quantization in a more conventional binary encoding scheme. Then the total number of bits transmitted over 30 seconds is $1277 \times 8 \times 2$, or about 681 bits/second. This number jumps to about 1022 bits/second for a 3D curve. These numbers are on par with other recent techniques that are designed to be robust to noise (e.g. 900 bits/second in the OFDM technique of \cite{eichelberger_receiving_2019}). Furthermore, our equivalent of a ``bit error'' is additive coordinate noise, and the hidden signal degrades continuously with increased noise, rather than reaching a failure mode when a bit error is too high. \subsection{Shifting, Scaling And Re-Parameterizing Targets for Better Fits} \label{sec:reparam} In this section, we explain how to modify the target curves to better match the given SW-STFT so that the STFT magnitudes don't have to be perturbed as much for the SW-STFT to match the target, leading to a less noticeable embedding of the hidden curve for the same quality geometry. \subsubsection{Vertical Translation/Scaling} A crucial step to keep the hidden signal imperceptible is to shift and rescale the target coordinates of the hidden curve to match the dynamic range of the SW-STFT components. We first choose a scale ratio $a_i$ for each component as the ratio of the standard deviations $\text{stdev}_j \left( \text{SWS}^{\ell} (M)_{k_i, j} \right) / \text{stdev}_j (T_{i, j})$. Then, letting $N$ be the length of the two signals, we compute the vertical shift $b_i$ as \begin{equation} b_i = \frac{1}{N} \sum_{j=1}^N \text{SWS}^{\ell} (M)_{k_i, j} - a_i T_{i, j} \end{equation} The rescaled target coordinate $\hat{T_i}$ is then defined as $\hat{T}_{i, j} = a_i T_{i, j} + b_i$. Unfortunately, we lose relative scale information between the components, but we will explain how to hide and recover this in Section~\ref{sec:componentscales} \subsubsection{Viterbi Target Re-Parameterization} \algrenewcommand\algorithmicindent{0.8em}% \begin{algorithm} \caption{Viterbi Target Re-Parameterization} \begin{algorithmic}[1] \Procedure{ViterbiTargetReparam}{$\hat{T}$, $\text{SWS}^{\ell} (M)$, $K$} \State $N_T \gets \text{len}(\hat{T})$ \Comment{Number of target points} \State $N_M \gets \text{len}(\text{SWS}^{\ell}(M))$ \Comment{Number of SW-STFT frames} \State $S[i>1, j] \gets 0$ \Comment{$N_T \times N_M$ Cumulative cost matrix} \State $S[1, j] \gets \sum_{i=1}^d (\hat{T}_{i, 1} - \text{SWS}^{\ell} (M)_{k_i, j})^2$ \State $B[i, j] \gets 0$ \Comment{$N_T \times N_M$ backpointers to best preceding states} \For{$j = 2:N_M$} \For{$t = 1:N_T$} \State $S[t, j] = \min_{k=(t-K) \mod N_T} ^ {k=(t-1) \mod N_T} S[k, j-1] $ \Comment {Find best preceding state} \State $S[t, j] \gets S[t, j] + \sum_{i=1}^d (\hat{T}_{i, t} - \text{SWS}^{\ell} (M)_{k_i, j})^2$ \Comment{Add on matching cost} \State $B[t, j] = \argmin_{k=(t-K) \mod N_T} ^ {k=(t-1) \mod N_T} S[k, j-1] $ \Comment {Save reference to best preceding state} \EndFor \EndFor \State Backtrace $B$ to obtain the optimal sequence $\Theta$ \\ \Return $\Theta$ \EndProcedure \end{algorithmic} \label{alg:viterbiwarp} \end{algorithm} Beyond shifting and scaling the targets coordinates vertically, we may also need to re-parameterize them in time, since, in general, the SWS-STFT sequence will not have the same number of samples as the shifted target $\hat{T}$. Furthermore, the target curves are cyclic, so the starting point is arbitrary, nor does it matter if we traverse the curve left or right, or at a constant speed. This gives us a lot of freedom in how we choose to re-parameterize the target to best match the signal even before we perturb the signal. Let $N_M$ be the number of frames in the SW-STFT and $N_T$ be the number of samples of the target, and assume that $N_T > N_M$ (we can always resample our target curves to make this true). Let $\Theta = \{ \theta_1, \theta_2, \theta_3, \hdots, \theta_{N_M} \}$ be new indices into the target, and let $K > 0$ be a positive integer so that $1 \leq \left( (\theta_i - \theta_{i-1}) \mod N_T \right) \leq K$; that is, $K$ is the maximum number of samples by which the target re-parameterization can jump in adjacent time steps. We seek a $\Theta$ minimizing the following objective function for a $d$-dimensional shifted target $\hat{T}$ \begin{equation} \label{eq:viterbiobj} g(\Theta = \{\theta_1, \theta_2, \hdots, \theta_{N_M}\}) = \sum_{j = 1}^{N_M} \sum_{i=1}^d \left( \text{SWS}^{\ell} (M)_{k_i, j} - \hat{T}_{i, j} \right)^2 \end{equation} For a fixed $K$, we use the Viterbi algorithm to obtain a $\Theta$ minimizing Equation~\ref{eq:viterbiobj} in $O(MNK)$ time. Algorithm~\ref{alg:viterbiwarp} gives more details. \begin{figure} \centering \includegraphics[width=\columnwidth]{Viterbi.pdf} \caption{Circularly shifting the target loop and traversing it at a non-uniform speed traces out the same shape, while matching the given SW-STFT better than a uniform parameterization starting at an artibrary place on the target.} \label{fig:ViterbiWarp} \end{figure} In practice, we re-run the algorithm starting at $K = 1$, and we repeatedly increment $K$ until the optimal $\Theta$ goes through at least one full loop on the target. Since clockwise or counter-clockwise traversal of the target is arbitrary, we then rerun this procedure again for a reversed version of the sequence and keep the result which minimizes Equation~\ref{eq:viterbiobj}. As a rule of thumb, we find that having a target curve with about 1.5-2x as many samples as there are SWS-STFT frames gives enough wiggle room for the Viterbi algorithm. In our experiments in Section~\ref{sec:experiments}, we will generate TSP and Hamiltonian sequences with 2000 samples for our 1200-1300 SWS-STFT frames. Figure~\ref{fig:ViterbiWarp} shows an example running this algorithm under these conditions on the Usher example in column 3 of Figure~\ref{fig:WindowEffect}. \subsection{Storing Component Scales in Phase} \label{sec:componentscales} Since we intentionally rescale the dimensions of the target to match the dynamic range of each SW-STFT component, we lose the aspect ratio between the dimensions. But since we have only perturbed the magnitude components of the frequency indices $k_i$, we still have some freedom to perturb the phases to store additional information. To that end, we use the technique presented by the authors of \cite{xiaoxiao_dong_data_2004} to store the relative scale of each dimension in the phase. We store the same scale in the phase of every STFT frame, and we take the scale to be the median of the phases upon decoding. \subsection{Recovering Frame Alignments} \label{sec:framealignments} What we've described so far works for audio that is aligned to each window, but additional work needs to be done to address Goal ~\ref{goal:misalignment} if the audio to decode comes in misaligned. To this end, we use the fact that the hidden curves move only slightly between adjacent samples; a TSP tour is defined as length-minimizing, and adjacent samples in Hamiltonian cycles on meshes move only between neighboring triangles on the original mesh. If the embedding is frame aligned, the length of the curve should be minimized. Conversely, if the embedding is not frame aligned, the curve becomes noisy and is more likely to jump around quickly from sample to sample. Therefore, we can pick the alignment which minimizes the length in all possible shifts from $0$ to the STFT window length. Figure~\ref{fig:FrameAlignments} shows an example. We will empirically evaluate this in Section~\ref{sec:experiments}. \begin{figure} \centering \includegraphics[width=\columnwidth]{OptimalShift.pdf} \caption{A shift which minimizes curve length is most likely the shift needed to re-align audio to frame windows. In this example with the embedding using the lowest two non-DC frequencies with $\lambda=0.1, \ell=16$, the global mins exactly match ground truth.} \label{fig:FrameAlignments} \end{figure} \section{Experiments} \label{sec:experiments} \begin{figure} \centering \includegraphics[width=\columnwidth]{caltech101_samples.png} \caption{Examples of TSP art on the Caltech-101 dataset \cite{li_andreeto_ranzato_perona_2022}.} \label{fig:caltech101examples} \end{figure} \begin{figure} \centering \includegraphics[width=\columnwidth]{MeshSegExamples.png} \caption{Examples of Hamiltonian cycles on triangle meshes from the Princeton mesh segmentation benchmark \cite{Chen:2009:ABF}.} \label{fig:meshsegexamples} \end{figure} We now quantitatively assess the performance of our system. To generate a large set of curves, we generate TSP art on the roughly 10,000 images in the Caltech-101 dataset \cite{li_andreeto_ranzato_perona_2022} (e.g. Figure~\ref{fig:caltech101examples}), and we generate Hamiltonian cycles on the 380 watertight triangle meshes in the Princeton mesh segmentation database \cite{Chen:2009:ABF} (e.g. Figure~\ref{fig:meshsegexamples}). We then use the 1000 30 second audio clips from the Tzanetakis genre dataset \cite{tzanetakis2002musical} as carrier audio. For the Caltech-101 database, we partition the images into sets of 10, which are each encoded in one of the audio carriers varying $\lambda$ and $\ell$. Likewise, for the mesh segmentation dataset, we hide each Hamiltonian path in three different audio clips from the Tzanetakis dataset. In all cases, we use an STFT window length of 1024 at a sample rate of 44100hz, and we encode the audio using lossy mp3 compression at 64 kbps. \begin{figure} \centering \includegraphics[width=\columnwidth]{CaltechGTzanFixedWin.pdf} \includegraphics[width=\columnwidth]{MeshSegGTzanFixedWin.pdf} \caption{The results embedding curves into clips from the Tzanetakis genre dataset, for a fixed $\ell=16$. As expected from Equation~\ref{eq:objfn}, both the distortion and SNR go up as $\lambda$ increases.} \label{fig:ResultsFixedWin} \end{figure} \begin{figure} \centering \includegraphics[width=\columnwidth]{CaltechGTzanFixedLam.pdf} \caption{Embedding curves into clips from the Tzanetakis genre dataset varying the window length $\ell$ for a fixed $\lambda$. Moderate window lengths are the best choices for both SNR and distortion. We recommend $\ell=16$. (3D is similar; see supplementary)} \label{fig:ResultsFixedLam} \end{figure} Overall, we see slightly higher distortions and lower SNRs for 3D embeddings than 2D embeddings, which makes sense since there is one additional coordinate to hide in 3D. As expected, increasing $\lambda$ increases both the SNR and distortion, as shown in Figure~\ref{fig:ResultsFixedWin}. Also, as Figure~\ref{fig:ResultsFixedLam} shows, increasing the window length has a positive effect on geometric distortion, while moderate window lengths lead to the best SNR. We also see a positive effect of the Viterbi alignment from Section~\ref{sec:reparam} on the SNR in Figure~\ref{fig:ResultsViterbiExperiment}. Though $\approx$+1dB may not seem significant, it can make a huge difference in audio, particularly in quiet regions. Finally, we run an experiment on the Caltech-101 dataset by choosing 4 random frame offsets per embedding, and we see in Figure~\ref{fig:ShiftsExperiment} that the algorithm of Section~\ref{sec:framealignments} recovers alignments well. \begin{figure} \centering \includegraphics[width=\columnwidth]{CaltechGTzanViterbiExperiment.pdf} \includegraphics[width=\columnwidth]{MeshSegGtzanViterbiExperiment.pdf} \caption{Pre-warping the target with Viterbi alignment (Section~\ref{sec:reparam}) overall improves the resulting distortion and SNR.} \label{fig:ResultsViterbiExperiment} \end{figure} \begin{figure} \centering \includegraphics[width=\columnwidth]{ShiftsExperiment.pdf} \caption{Estimating the frame alignment by minimizing the length (Section~\ref{sec:framealignments}) works nearly as well as perfect knowledge of the alignment. Using $\lambda=0.1, \ell=10$ over all images in Caltech-101, 93\% of the shifts are within 10 of the ground truth for an STFT window length of 1024, which has hardly any effect on the geometry of the curve.} \label{fig:ShiftsExperiment} \end{figure} \subsubsection{Subjective Listening Experiment} Though the experiments above are encouraging, SNR can be misleading; frequencies that are more audible may actually have a higher SNR due to psychoacoustic phenomena. To address this, we performed a crowd-sourced listening experiment on the Amazon Mechanical Turk where we embedded a random image from the Caltech-101 dataset in each of the Tzanetakis clips. We split them into 4 groups with no embedding (control), and with $\lambda = 0.1, 1, 10$. We asked the listeners to rate the quality of the noise on the 5 point impairment scale of \cite{bassia2001robust} (5: imperceptible, 4: perceptible but not annoying, 3: slightly annoying, 2: annoying, 1: very annoying). In our experiment, we had 46 unique Turkers, 21 of whom participated in at least 40 rankings. Figure~\ref{fig:TurkResults} shows the results. Mean opinion scores (MOS) are correlated with $\lambda$, but there is little difference between $\lambda=0.1$ and $\lambda=1$, which suggests using the former as a rule of thumb due to its lower geometric distortion. \begin{figure} \begin{minipage}[c]{0.36\textwidth} \caption{ Results of the listening experiment on the Amazon Mechanical Turk. A lower $\lambda$ leads to a lower mean opinion score, as expected, though not to an intolerable degree. } \label{fig:TurkResults} \end{minipage} \begin{minipage}[c]{0.64\textwidth} \includegraphics[width=\columnwidth]{Turk.pdf} \end{minipage}\hfill \end{figure} \section{Discussion / Supplementary Material} We have presented a model-based scheme for hiding artistic curves in audio, and the curves survive lossy compression while remaining reasonably imperceptible, as validated both with quantitative measurements and by humans. We hide the dimensions of curves in time regularized magnitudes of STFT frequencies, though coefficients of any orthogonal decomposition could work (we also implemented wavelets, though we found them more audibly perceptible). Our scheme is incredibly simple and requires no training. Decoding is nearly instantaneous, as it only requires computing the STFT of a few frequencies. To show off our pipeline, we created an interactive viewer in Javascript using WebGL that can load in and decode any mp3 file. The viewer plays the decoded curve synchronized to the music. We provide a variety of 2D and 3D precomputed examples to demonstrate our capabilities. To view our supplementary material, source code for encoding and decoding, and live examples, please visit \url{https://github.com/ctralie/AudioCurveSteganography} \bibliographystyle{splncs04}
1,116,691,501,371
arxiv
\section{Two algorithms in search of a type-system} As \citet{Hofmann:non-size-incr} has noted, a problem with implicit characterizations of complexity classes is that they often fail to capture many natural \emph{algorithms}---usually because the complexity-theoretic types used to control primitive recursion impose draconian restrictions on programming. Here is an example. In Bellantoni and Cook's \citep{Bellantoni-Cook:Recursion-theoretic-char} and Leivant's \citep{Leivant:Ram-rec-I} well-known characterizations of the poly\-nom\-ial-time computable functions, a recursively-computed value is prohibited from driving another recursion. But, for instance, the recursion clause of insertion-sort has the form $\pgm{ins\_sort}(\pgm{cons}(a, l)) = \pgm{insert}(a, \pgm{ins\_sort}(l))$, where $\pgm{insert}$ is defined by recursion on its second argument; selection-sort presents analogous problems. \citet{Hofmann:non-size-incr,Hofmann:ic03} addresses this problem by noting that the output of a non-size-increasing program (such as $\pgm{ins\_sort}$) should be permitted to drive another recursion, as it cannot cause the sort of complexity blow-up the B-C-L restrictions guard against. To incorporate such recursions, Hofmann defines a higher-order language with typical first-order types and a special type~$\Diamond$ through which functions defined recursively must ``pay'' for any use of size-increasing constructors, in effect guaranteeing that there is no size increase. Through this scheme Hofmann is able to implement many natural algorithms while still ensuring that any typable program is non-size-increasing poly\-nom\-ial-time computable (\citet{aehlig-schw:tocl02} sketch an extension that captures all of polynomial-time). Our earlier paper~\citep{danner-royer:ats,danner-royer:ats-lmcs}, hereafter referred to as~\textit{ATS}, takes a different approach to constructing a usable programming language with guaranteed resource usage. We introduce a type-$2$ programming formalism called $\mathsf{ATR}$ (for Affine Tail Recursion, which we rechristen in this paper as Affine \emph{Tiered} Recursion) based on $\mathsf{PCF}$. $\mathsf{ATR}$'s type system is motivated by the tiering and safe/normal notions of \cite{Leivant:Ram-rec-I} and \cite{Bellantoni-Cook:Recursion-theoretic-char} and serves to control the size of objects. Instead of restricting to primitive recursion, $\mathsf{ATR}$ has an operator for recursive definitions; affine types and explicit clocking on the operator serve to control time. We give a denotational semantics to $\mathsf{ATR}$ types and terms in which the size restrictions play a key part. This allows us, for example, to give an~$\mathsf{ATR}$ \emph{definition} of a primitive-recursion-on-notation combinator (with appropriate types and without explicit bounding terms) that preserves feasibility. We also give a \emph{time-complexity semantics} and use it to prove that each type-$2$ $\mathsf{ATR}$ program has a (second-order) polynomial run-time.% \footnote{ These kinds of results may also have applications in the type of static analysis for time-complexity that \citet{frederiksen-jones:recognition} investigate.} Finally, we show that the standard type-$2$ basic feasible functionals (an extension of polynomial-time computability to type-$2$) of \citet{mehlhorn:stoc74} and \citet{cook-urquhart:fca} are $\mathsf{ATR}$ definable. Moreover, our underlying model of computation (and complexity) is just a standard abstract machine that implements call-by-value $\mathsf{PCF}$. However, $\mathsf{ATR}$ is still somewhat limited as its only base type is binary words and the only recursions allowed are tail-recursions. \paragraph{What is new in this paper.} In this paper we extend $\mathsf{ATR}$ to encompass a broad class of feasible affine recursions. We demonstrate these extensions by giving fairly direct and natural versions of insertion- and selection-sorts on lists. As additional evidence of $\mathsf{ATR}$'s support for programming we do not add lists as a base type, but instead show how to implement them over $\mathsf{ATR}$'s base type of binary words. The technical core of this paper is a simplification and generalization of the time-complexity semantics of~\textit{ATS}. We construct a straightforward framework in which recursion schemes in~$\mathsf{ATR}$ lead to time-complexity recurrences that must be solved to show that these schemes preserve feasibility. This gives a route to follow when adding new forms of recursion to~$\mathsf{ATR}$. We follow this route to show that the recursions used to implement lists and insertion-sort are (second-order) polynomial-time bounded. We also discuss how to extend these results to handle the recursions present in selection-sort. Thus along with significantly extending our existing system to the point where many standard algorithms can be naturally expressed, we also provide a set of basic tools for further extensions. \section{Programming in~$\mathsf{ATR}$} \paragraph{The~$\mathsf{ATR}$ formalism.} An $\mathsf{ATR}$ base type has the form $\mathsf{N}_L$, where \emph{labels} $L$ are elements of the set $(\Box\dmnd)^*\bigcup \dmnd(\Box\dmnd)^*$ (our use of~$\dmnd$ is not directly related to Hofmann's). The labels are ordered by $\eps\leq\dmnd\leq\Box\dmnd\leq\dmnd\Box\dmnd\leq\dotsb$ We define a subtype relation on the base types by $\mathsf{N}_L\mathrel{\leq:}\mathsf{N}_{L'}$ if $L\leq L'$ and extend it to function types in the standard way. Roughly, we can think of type-$\mathsf{N}_\varepsilon$ values as basic string inputs, type-$\mathsf{N}_\dmnd$ values as the result of poly\-nom\-ial-time computations over $\mathsf{N}_\varepsilon$-values, type-$\mathsf{N}_{\Box\dmnd}$-values as the result applying an oracle (a type-1 input) to $\mathsf{N}_\dmnd$-values, type-$\mathsf{N}_{\dmnd\Box\dmnd}$ values as the result of poly\-nom\-ial-time computations over $\mathsf{N}_{\Box\dmnd}$-values, etc. $\mathsf{N}_L$ is called an \emph{oracular} (respectively, \emph{computational}) type when $L\in (\Box\dmnd)^*$ (respectively, $\dmnd(\Box\dmnd)^*$). We let $\base b$ (possibly decorated) range over base types. Function types are formed as usual from the base types. The base datatype is $K=\set{\mathbf{0},\mathbf{1}}^*$, and the $\mathsf{ATR}$ terms are defined in Figure~\ref{fig:expr}. The term forming operations correspond to adding and deleting a left-most bit ($\comb{c}_0$, $\comb{c}_1$, and $\comb{d}$), testing whether a word begins with a $\mathbf{0}$ or a $\mathbf{1}$ ($\comb{t}_0$ and $\comb{t}_1$), and a conditional. The intended interpretation of $\comb{down} s\,t$ is $s$ if $\lh s\leq\lh t$ and $\eps$ otherwise. The recursion operator is $\comb{crec}$, standing for clocked recursion. \begin{figure}[tb] \begin{align*} s, t &::= V \mid K \mid O \mid (\lambda V.s) \mid (st) \\ &\qquad\mid (\comb{c}_a s) \mid (\comb{d} s) \mid (\comb{t}_a s) \mid (\cond{s}{t_0}{t_1}) \mid (\comb{down} s\,t) \mid (\comb{crec} K(\lambda_r f.t)) \end{align*} \caption{$\mathsf{ATR}$ expressions. $V$ is a set of variable symbols and $O$ a set of oracle symbols.\label{fig:expr}} \end{figure} The typing rules are given in Figure~\ref{fig:typing}. Type contexts are split (after Barber and Plotkin's DILL~\citep{barber:dill}) into intuitionistic and affine zones. Variables in the former correspond to the usual $\mathbin{\rightarrow}$ introduction and elimination rules and variables in the latter are intended to be recursively defined; variables that occur in the affine zone are said to \emph{occur affinely} in the term. The \InfRule{$\comb{crec}$-I} rule serves as both introduction and elimination rule for the implicit $\lollipop$~types (in the rule $\vec{\base b} = \base b_1,\dots,\base b_k$ and $\vec v\mathbin{:}\vec{\base b}$ stands for $v_1\mathbin{:} \base b_1,\dots,v_k\mathbin{:} \base b_k$). We use $\lambda_r$ as the abstraction operator for variables introduced from the affine zone of the type context to further distinguish them from ``ordinary'' variables. The side-conditions on \InfRule{$\comb{crec}$-I} are that $f$ occurs in cons-tail position\footnote{Informally, $f$ occurs in \emph{cons-tail position in~$t$} if in the parse-tree of~$t$ a path from the root to a complete application of~$f$ passes through only conditional branches (not tests), $\comb{c}_0$, $\comb{c}_1$, and the left-argument of $\comb{down}$; $\semanticOp{tail\_len}(f,t)$ is defined to be the maximum number of $\comb{c}_a$ operations not below any $\comb{down}$ node in any such path.\label{footnote:cons-tail-recn}} in~$t$ and if $\base b_i\mathrel{\leq:}\base b_1$ then $\base b_i$ is oracular (including $i=0$). The constraint on the types allows us to prove a polynomial size-bound on the growth of the arguments to~$f$, which in turn allows us to prove such bounds on all terms. The typing rules enforce a ``one-use'' restriction on affine variables by disallowing their occurrence as a free variable in both arguments of~$\comb{down}$, the argument of an application, the test of a conditional, or anywhere in a $\comb{crec}$-term. The intuition behind the \emph{shifts-to} relation $\shiftsto$ between types is as follows. Suppose $f\mathbin{:} \mathsf{N}_{\eps}\mathbin{\rightarrow}\mathsf{N}_\dmnd$. We think of $f$ as being a function that does some polynomial-time computation to its input. If we have an input~$x$ of type~$\mathsf{N}_{\Box\dmnd}$ then recalling the intuition behind the base types, we should be able to assign the type~$\mathsf{N}_{\dmnd\Box\dmnd}$ to~$f(x)$. The shifts-to relation allows us to shift input types in this way, with a corresponding shift in output type. As a concrete example, the judgment $\typing{f\mathbin{:}\mathsf{N}_\eps\mathbin{\rightarrow}\mathsf{N}_\dmnd,x\mathbin{:}\mathsf{N}_\eps}{} {f(fx)}{\mathsf{N}_{\dmnd\Box\dmnd}}$ is derivable using \InfRule{Subsumption} to coerce the type of~$f(x)$ to $\mathsf{N}_{\Box\dmnd}$ and \InfRule{Shift} to shift the type of the outer application of~$f$. The definition of~$\shiftsto$ must take into account multiple arguments and level-$2$ types and hence is somewhat involved. Since we do not need it for the typings in this paper, we direct the reader to~\textit{ATS}\ for the full definition. \begin{figure}[tb] \begin{gather*} \AXC{} \LeftLabelBold{Zero-I} \UIC{$\GDtyping\eps{\mathsf{N}_\eps}$} \DisplayProof \qquad \AXC{} \LeftLabelBold{Const-I} \UIC{$\GDtyping K {\mathsf{N}_{\dmnd}}$} \DisplayProof \\ \AXC{} \LeftLabelBold{Int-Id-I} \UIC{$\typing{\Gamma,v\mathbin{:}\sigma}\Delta v\sigma$} \DisplayProof \qquad \AXC{} \LeftLabelBold{Aff-Id-I} \UIC{$\typing\Gamma{\Delta,v\mathbin{:}\sigma}v\sigma$} \DisplayProof \\ \AXC{$\GDtyping s\sigma$} \LeftLabelBold{Shift} \RightLabel{($\sigma\shiftsto\tau$)} \UIC{$\GDtyping s\tau$} \DisplayProof \qquad \AXC{$\GDtyping s\sigma$} \LeftLabelBold{Subsumption} \RightLabel{($\sigma\mathrel{\leq:}\tau$)} \UIC{$\GDtyping s\tau$} \DisplayProof \\ \AXC{$\GDtyping s{\mathsf{N}_{\dmnd_d}}$} \LeftLabelBold{$\comb{c}_a$-I} \UIC{$\GDtyping {(\comb{c}_a s)} {\mathsf{N}_{\dmnd_d}}$} \DisplayProof \qquad \AXC{$\GDtyping s {\mathsf{N}_L}$} \LeftLabelBold{$\comb{d}$-I} \UIC{$\GDtyping {\comb{d} s} {\mathsf{N}_L}$} \DisplayProof \qquad \AXC{$\GDtyping s {\mathsf{N}_L}$} \LeftLabelBold{$\comb{t}_a$-I} \UIC{$\GDtyping {\comb{t}_a s} {\mathsf{N}_L}$} \DisplayProof \\ \AXC{$\typing\Gamma{\Delta_0} s{\mathsf{N}_{L_0}}$} \AXC{$\typing\Gamma{\Delta_1} t{\mathsf{N}_{L_1}}$} \LeftLabelBold{$\comb{down}$-I} \BIC{$\typing\Gamma{\Delta_0,\Delta_1}{(\comb{down} st)}{\mathsf{N}_{L_1}}$} \DisplayProof \\ \AXC{$\Gtyping s{\mathsf{N}_{L}}$} \AXC{$\typing\Gamma{\Delta_0} {t_0}{\mathsf{N}_{L'}}$} \AXC{$\typing\Gamma{\Delta_1} {t_1}{\mathsf{N}_{L'}}$} \LeftLabelBold{$\comb{if}$-I} \TIC{$\typing\Gamma{\Delta_0\union\Delta_1}{(\cond s{t_0}{t_1})}{\mathsf{N}_{L'}}$} \DisplayProof \\ \AXC{$\typing{\underline{~}}{\underline{~}}K{\mathsf{N}_\dmnd}$} \AXC{$\typing{\Gamma,\vec v\mathbin{:}\vec{\base b}}{f\mathbin{:}\vec{\base b}\mathbin{\rightarrow}\base b_0}t{\base b_0}$} \LeftLabelBold{$\comb{crec}$-I} \BIC{$\Gtyping{\crec\,a\,(\afflambda f.\lambda\vec v.t)}{\vec{\base b}\mathbin{\rightarrow}\base b_0}$} \DisplayProof \\ \AXC{$\typing{\Gamma,v\mathbin{:}\sigma}\Delta t\tau$} \LeftLabelBold{$\mathbin{\rightarrow}$-I} \UIC{$\GDtyping {(\lambda v.t)}{\sigma\mathbin{\rightarrow}\tau}$} \DisplayProof \qquad \AXC{$\GDtyping s{\sigma\mathbin{\rightarrow}\tau}$} \AXC{$\Gtyping t\sigma$} \LeftLabelBold{$\mathbin{\rightarrow}$-E} \BIC{$\GDtyping {(st)}\tau$} \DisplayProof \end{gather*} \caption{$\mathsf{ATR}$ typing. The changes from~\textit{ATS}\ are as follows: (1)~\textit{ATS}\ imposed no constraint on~$\base b_0$ in~(\textbf{$\comb{crec}$-I});% (2)~\textit{ATS}\ restricted~(\textbf{$\comb{crec}$-I}) to tail-recursion; and (3)~\textit{ATS}\ restricted~(\textbf{$\comb{d}$-I}) and~(\textbf{$\comb{t}_a$-I}) to computational types.\label{fig:typing}} \end{figure} Motivated by the approach of \citet{jones:life-wout-cons}, we define the cost of evaluation to be the size of a call-by-value evaluation derivation. This is essentially equivalent to the abstract machine-based cost model of~\textit{ATS}, but the derivation-based model helps avoid considerable bookkeeping clutter. Values are string constants, oracles, or abstractions. Environments map term variables to values or to closures over $\comb{crec}$ terms. A closure~$\cl t\rho$ consists of a term~$t$ and an environment~$\rho$. The evaluation relation has the form~$\cl t\rho\evalto\cl z\theta$ where $\cl t\rho$ and $\cl z\theta$ are closures and $z$ is a value. The derivation rules for the evaluation are mostly straightforward and mimic the action of the abstract machine of~\textit{ATS}; for example, we have \[ \AXC{$\rho(x)\evalto\cl z\theta$} \UIC{$\cl x\rho\evalto\cl z\theta$} \DisplayProof \qquad \AXC{$\cl t\rho\evalto \cl*{\mathbf{0} z}\theta$} \UIC{$\cl*{\comb{d} t}{\rho}\evalto \cl z\theta$} \DisplayProof \qquad \AXC{$\cl s\rho\evalto \cl w\zeta \qquad \cl t\rho\evalto \cl z\theta \qquad \lh w\leq\lh z$} \UIC{$\cl*{\comb{down} st}{\rho}\evalto \cl w\zeta$} \DisplayProof. \] The evaluation rule for~$\comb{crec}$ terms is \begin{prooftree} \AXC{} \UIC{$\cl*{\comb{crec} a(\lambda_r f.\lambda\vec v.t)}\rho\evalto \cl*{\lambda\vec v.\cond{\lh a<\lh{v_1}}{t}{\eps}}{\extend\rho f{\comb{crec}(\mathbf{0} a)(\lambda_r f.\lambda\vec v.t)}}$} \end{prooftree} which shows how unwinding the recursion increments the clock by one step. The cost of most inference rules is~$1$, except the $\comb{down} s\,t$ inference rules have cost $2\lh z+1$ where $\cl t\rho\evalto \cl z\theta$ and environment and oracle evaluation have length-cost (so, e.g., the cost of the environment rule shown above is $\max(\lh z,1)$ when $z$ is of base type, $1$ otherwise). \paragraph{Implementing lists and sorting.} We implement lists of binary words via concatenated self-delimiting strings. Specifically, we code the word $w=b_0\dots b_{k-1}$ as $s(w) = 1b_01b_1\dots1b_{k-1}0$ and the list $\langle w_0,\dots,w_{k-1}\rangle$ as $s(w_0)\cat\dots\cat s(w_{k-1})$, where $\cat$ is the concatenation operation. Code for the basic list operations is given in Figure~\ref{fig:list-ops}.\footnote{ In these code samples, \lstinline[basicstyle=\footnotesize]!letrec f=s in t end! abbreviates $\pgm{t}[f\mapsto\comb{crec}\eps(\lambda_r f.s)]$ and we use the ML notation \lstinline[basicstyle=\footnotesize]!fn x$\; \Rightarrow\dotsc$! for $\lambda$-abstraction.} Note that the $\pgm{cons}$, $\pgm{head}$, and $\pgm{tail}$ programs all use cons-tail recursion. Insertion-sort is expressed in essentially its standard form, as in Figure~\ref{fig:insert-sort}. This implementation requires another form of recursion, in which the complete application of the recursively-defined function appears in an argument to some operator. In the later part of Section~\ref{sec:rec-in-arg} we show how this \emph{recursion in an argument} can be incorporated into $\mathsf{ATR}$. Selection-sort requires yet another form of recursion (a generalization of cons-tail recursion); we discuss how to incorporate it into $\mathsf{ATR}$ in Section~\ref{sec:concl}. \begin{figure}[tb] \lstinputlisting{list-ops.atr} \caption{The basic list operations in~$\mathsf{ATR}$.\label{fig:list-ops}} \end{figure} \begin{figure}[tb] \lstinputlisting{ins-sort.atr} \caption{Insertion-sort in~$\mathsf{ATR}$.\label{fig:insert-sort}} \end{figure} Our $\pgm{head}$ and $\pgm{ins\_sort}$ programs use the $\comb{down}$ operator to coerce the type~$\mathsf{N}_{\dmnd}$ to~$\mathsf{N}_{\eps}$. Roughly, $\comb{down}$ is used in places where our type-system is not clever enough to prove that the result of a recursion is of size no larger than one of the recursion's initial arguments; the burden of supplying these proofs is shifted off to the correctness argument for the recursion. A cleverer type system (say, along the lines of Hofmann's \citep{Hofmann:ic03}) could obviate many of these $\comb{down}$'s, but at the price of more complex syntax (i.e., typing), semantics (of values and of time-complexities), and, perhaps, pragmatics (i.e., programming). Our use of $\comb{down}$ gives us a more primitive (and intensional) system than found in pure implicit complexity,% \footnote{Leivant's \emph{recursion under a high-tier bound}~\citep[\S3.1]{Leivant:Ram-rec-I} implements a similar idea.} but it also gives us a less cluttered setting to work out the basics of complexity-theoretic compositional semantics---the focus of the rest of the paper. Also, in practice the proofs that the uses of $\comb{down}$ forces into the correctness argument are for the most part obvious, and thus not a large burden on the programmer. \section{Soundness theorems} In this section we rework the Soundness Theorem of~\textit{ATS}\ to set up the framework for such theorems, and then use the framework to handle the recursions used to implement insertion-sort (we discuss selection-sort in Section~\ref{sec:concl}). Because of space considerations, we just sketch the main points here and leave detailed proofs to the full paper. The key technical notion is that of \emph{bounding} a closure~$\cl t\rho$ by a \emph{time-complexity}, which provides upper bounds on the cost of evaluating $\cl t\rho$ to a value~$\cl z\theta$ as well as the \emph{potential} cost of using~$\cl z\theta$. The potential of a base-type closure is just its (denotation's) length, whereas the potential of a function~$f$ is a function that maps potentials~$p$ to the time complexity of evaluating~$f$ on arguments of potential~$p$. The bounding relation gives a \emph{time-complexity semantics} for~$\mathsf{ATR}$-terms; a \emph{soundness theorem} asserts the existence of a bounding time-complexity for every $\mathsf{ATR}$~term. In this paper, our soundness theorems also assert that the bounding time-complexities are \emph{safe}, which in particular implies type-2 polynomial size and cost bounds for the closure. We thereby encapsulate the Soundness, polynomial-size-boundedness, and polynomial-time-boundedness theorems of~\textit{ATS}\ (the \emph{value semantics} for the meaning of~$\mathsf{ATR}$ terms and corresponding soundness theorem are unchanged). \paragraph{Soundness for tail-recursion.} \label{sec:soundness} We start by defining \emph{cost}, \emph{potential}, and \emph{time-complexity} types, all of which are elements of the simple product type structure over the \emph{time-complexity base types} $\set{\mathsf{T}}\union\setst{\mathsf{T}_L}{\text{$L$ is a label}}$ (we sometimes conflate the syntactic types with their intended meaning, which is the standard set-theoretic semantics when all base types are interpreted as unary numerals). The subtype relation on base types is defined by $\mathsf{T}_L\mathrel{\leq:}\mathsf{T}_{L'}$ if $L\leq L'$ and $\mathsf{T}_L\mathrel{\leq:}\mathsf{T}$ for all~$L$, and extended to product and function types in the standard way. The only cost type is $\mathsf{T}$, and for each $\mathsf{ATR}$-type~$\sigma$ we define the potential type~$\potden\sigma$ and time-complexity type~$\tcden\sigma$ by $\potden{\mathsf{N}_L} = \mathsf{T}_L$, $\potden{\sigma\mathbin{\rightarrow}\tau} = \potden\sigma\mathbin{\rightarrow}\tcden\tau$, and $\tcden\tau = \mathsf{T}\cross\potden\tau$. Write $\semanticOp{cost}(\cdot)$ and $\semanticOp{pot}(\cdot)$ for the left- and right-projections on~$\tcden\tau$. We introduce \emph{time-complexity variables}, a new syntactic category, and define a time-complexity context to be a finite map from t.c.\ variables to cost and potential types. For a t.c.\ context~$\Sigma$, $\Env\Sigma$ is the set of $\Sigma$ environments, defined in the usual way. We extend $\tcden\cdot$ to $\mathsf{ATR}$-type contexts by introducing t.c.\ variables~$x_c$ and~$x_p$ for each $\mathsf{ATR}$-variable~$x$ and setting $\tcden\Gamma = \union_{(x\mathbin{:}\sigma)\in\Gamma}\set{x_c\mathbin{:}\mathsf{T},x_p\mathbin{:}\potden\sigma}$. A \emph{time-complexity denotation} of t.c.\ type~$\gamma$ w.r.t.\ a t.c.\ environment $\Sigma$ is a function $X:\Env{\Sigma}\to\gamma$. The projections $\semanticOp{cost}$ and $\semanticOp{pot}$ extend to t.c.\ denotations in the obvious way. \begin{defn} ~ \begin{enumerate} \item Suppose $\cl t\rho$ is a closure and $\cl z\theta$ a value, both of type~$\tau$; $\chi$ a time-complexity of type~$\tcden\tau$; and $q$ a potential of type~$\potden\tau$. Define the \emph{bounding relations} $\cl t\rho\apprby^\tau\chi$ and $\cl z\theta\apprby_{\mathrm{pot}}^\tau q$ as follows:\footnote{We will drop the superscript when it is clear from context.} \begin{enumerate} \item $\cl t\rho\apprby^\tau\chi$ if $\semanticOp{cost}(\cl t\rho)\leq\semanticOp{cost}(\chi)$ and if $\cl t\rho\evalto\cl z\theta$, then $\cl z\theta\apprby_{\mathrm{pot}}^\tau\semanticOp{pot}(\chi)$. \item $\cl z\theta\apprby_{\mathrm{pot}}^{\base b} q$ if $\lh z\leq q$. \item $\cl*{\lambda v.t}{\theta}\apprby_{\mathrm{pot}}^{\sigma\mathbin{\rightarrow}\tau}q$ if for all values~$\cl z\eta$, if $\cl z\eta\apprby_{\mathrm{pot}}^{\sigma} p$, then $\cl{t}{\extend\theta v{\cl z\eta}}\apprby^\tau q(p)$. \item $\cl O\theta\apprby_{\mathrm{pot}}^{\sigma\mathbin{\rightarrow}\tau}q$ if for all values~$\cl z\eta$, if $\cl z\eta\apprby_{\mathrm{pot}}^{\sigma} p$, then $\cl*{O(\cl z\eta)}[]\apprby^\tau q(p)$. \end{enumerate} \item For $\rho\in\Env{\Gamma}$ and $\varrho\in\Env{\tcden\Gamma}$, we write $\rho\apprby\varrho$ if for all $v\in\mathrm{Dom}\;\rho$ we have that $\cl v\rho\apprby(\varrho(v_c),\varrho(v_p))$. \item For an $\mathsf{ATR}$-term $\GDtyping t\tau$ and a time-complexity denotation $X$ of type~$\tcden\tau$ w.r.t.~$\tcden{\Gamma;\Delta}$, we say $t\apprby X$ if for all~$\rho\in\Env{(\Gamma;\Delta)}$ and $\varrho\in\Env{\tcden{\Gamma;\Delta}}$ such that $\rho\apprby\varrho$ we have that $\cl t\rho\apprby X\varrho$. \end{enumerate} \end{defn} We define second-order polynomial expressions of tally, potential, and time-complexity types using the operations $+$, $*$, and $\bmax$ (binary maximum); the typing rules are given in Figure~\ref{fig:poly-typing}. Of course, a polynomial $\tctyping\Sigma p\gamma$ corresponds to a t.c.\ denotation of type~$\gamma$ w.r.t.\ $\Sigma$ in the obvious way. We shall frequently write $p_p$ for $\semanticOp{pot}(p)$. \begin{figure}[t] \begin{gather*} \AXC{} \UIC{$\Stctyping \eps {\mathsf{T}_\eps}$} \DisplayProof \quad \AXC{} \UIC{$\Stctyping {\mathbf{0}^n} {\mathsf{T}_\dmnd}$} \DisplayProof \quad \AXC{} \UIC{$\tctyping {\Sigma,x\mathbin{:}\gamma} {x} \gamma$} \DisplayProof \\ \AXC{$\Stctyping p \gamma$} \RightLabel{($\gamma\shiftsto\gamma'$)} \UIC{$\Stctyping p {\gamma'}$} \DisplayProof \quad \AXC{$\Stctyping p \gamma$} \RightLabel{($\gamma\mathrel{\leq:}\gamma'$)} \UIC{$\Stctyping p {\gamma'}$} \DisplayProof \\ \AXC{$\Stctyping p {\mathsf{T}_{\dmnd_k}}$} \AXC{$\Stctyping q {\mathsf{T}_{\dmnd_k}}$} \BIC{$\Stctyping {p\bullet q}{\mathsf{T}_{\dmnd_k}}$} \DisplayProof \quad \AXC{$\Stctyping p {\gamma}$} \AXC{$\Stctyping q {\gamma}$} \BIC{$\Stctyping {p\bmax q}\gamma$} \DisplayProof \\ \AXC{$\tctyping{\Sigma, x\mathbin{:}\sigma}{p}\tau$} \UIC{$\tctyping\Sigma {\lambda x.p} {\sigma\mathbin{\rightarrow}\tau}$} \DisplayProof \quad \AXC{$\Stctyping p {\sigma\mathbin{\rightarrow}\tau}$} \AXC{$\Stctyping q \sigma$} \BIC{$\Stctyping {pq} {\tau}$} \DisplayProof \end{gather*} \caption{Typing rules for time-complexity polynomials. $\bullet$ is $+$ or $*$, $\gamma$ is a t.c.\ base type. \label{fig:poly-typing}} \end{figure} \begin{defn} Let $\gamma$ be a potential type, $\base b$ a time-complexity base type, $p$ a potential polynomial, and suppose $\Stctyping p\gamma$. \begin{enumerate} \item $p$ is $\base b$-strict w.r.t.~$\Sigma$ when $\semanticOp{tail}(\gamma)\mathrel{\leq:}\base b$ and every unshadowed\footnote{Roughly, a free-variable occurrence is \emph{shadowed} if it is in a subterm that does not contribute to the size of the term; see \textit{ATS}\ for details.} free-variable occurrence in~$p$ has a type with tail $\mathrel{<:}\base b$. \item $p$ is $\base b$-chary w.r.t.~$\Sigma$ when $\gamma=\base b$ and $p = p_1\bmax\dots\bmax p_m$ with $m\geq 0$ where $p_i = (vq_1\dots q_k)$ with each $q_i$ $\base b$-strict. \item $p$ is \emph{$\base b$-safe} w.r.t.~$\Sigma$ if: \begin{enumerate} \item $\gamma$ is a base type and $p = q\pmj_{\base b} r$ where $q$ is $\base b$-strict and $r$ is $\base b$-chary, $\pmj_{\base b} = \bmax$ if $\base b$ is oracular, and $\pmj_{\base b} = +$ if $\base b$ is computational. \item $\gamma=\sigma\mathbin{\rightarrow}(\mathsf{T}\cross\tau)$ and $\semanticOp{pot}(pv)$ is $\base b$-safe w.r.t.~$\Sigma,v\mathbin{:}\sigma$. \end{enumerate} \item A t.c.\ polynomial $\Stctyping q{\mathsf{T}\cross\gamma}$ is \emph{$\base b$-safe} if $\semanticOp{pot}(q)$ is. \item A t.c.\ denotation $X$ of type~$\gamma$ w.r.t.~$\Sigma$ is \emph{$\base b$-safe} if $X$ is bounded by a $\base b$-safe t.c.\ polynomial $\Stctyping p\gamma$. \end{enumerate} \end{defn} The Soundness Theorem of~\textit{ATS}\ asserts that every tail-recursive term is bounded by a t.c.\ denotation for which the cost component is bounded by a type-2 polynomial in the lengths of~$t$'s free variables. In the next subsection, we extend this to cons-tail recursion and prove that the bounding t.c.\ denotation is in fact safe. In particular, we also have that the potential of $t$'s denotation is bounded by a safe polynomial. At base type, this latter statement corresponds to the ``poly-max'' bounds that can be computed for Bellantoni-Cook and Leivant-style tiered functions (e.g., \citep[Lemma 4.1]{Bellantoni-Cook:Recursion-theoretic-char}). \paragraph{Soundness for cons-tail-recursion.} For the remainder of this subsection $t$ is a term such that $f$ is in cons-tail position in~$t$ and for which we have a typing~$\typing{\Gamma,\vec v\mathbin{:}\vec{\base b}}{f\mathbin{:}\vec{\base b}\mathbin{\rightarrow}\base b}{t}{\base b}$. We write $\Gamma_{\vec v}$ for for the type context $\Gamma,\vec v\mathbin{:}\vec{\base b}$. Define the terms $C_\ell = \comb{crec} (\mathbf{0} ^\ell a)(\lambda_r f.\lambda\vec v.t)$ and $T_\ell = \cond{\lh{\mathbf{0} ^\ell a}<\lh{v_1}}{t}{\eps}$ (we write $\mathbf{0} ^\ell a$ for $\mathbf{0}\dotsc\mathbf{0} a$ with $\ell$ $\mathbf{0}$'s, remembering that this is a string constant), and for any environment~$\rho$, set $\rho_\ell = \extend\rho f{C_\ell}$. The main difficulty in proving soundness is constructing a bounding t.c.\ denotation for~$\comb{crec}$ terms. A key component in the construction is the Affine Decomposition Theorem in Section~14 of~\textit{ATS}, which describes how to compute the time-complexity of a term in which $f$ occurs affinely and in tail position. To state it, we need some definitions. \begin{defn} Let $X$ and $Y$ be t.c.\ denotations of type~$\tcden{\sigma\mathbin{\rightarrow}\tau}$ and $\tcden\sigma$, respectively. \begin{enumerate} \item For a potential~$p\mathbin{:}\mathsf{T}_L$, $\semanticOp{val} p = (1\bmax p,p)$; if $p$ is of higher type, then $\semanticOp{val} p = (1, p)$. For a t.c.\ environment~$\varrho$ and $\mathsf{ATR}$ variable~$v$ we write $\extend\varrho v{\chi}$ for $\extend\varrho{v_c,v_p}{\semanticOp{cost}(\chi),\semanticOp{pot}(\chi)}$. \item If $Y$ is w.r.t.~$\tcden{\Gamma,v\mathbin{:}\sigma'}$, then $\llambda_\star v.Y =_{\mathrm{df}} \llambda\varrho(1, \llambda v_p.Y(\extend\varrho v {\semanticOp{val} v_p}))$ is a t.c.\ denotation of type~$\tcden{\sigma'\mathbin{\rightarrow}\sigma}$ w.r.t.~$\tcden\Gamma$ (we use $\llambda x.\dotsb$ to denote the map $x\mapsto\dotsb$). \item $X\star Y =_{\mathrm{df}} \llambda\varrho(\semanticOp{cost}(X\varrho)+\semanticOp{cost}(Y\varrho)+\semanticOp{cost}(\chi)+1,\semanticOp{pot}(\chi))$ is a t.c.\ denotation of type~$\tcden\tau$, where $\chi = \semanticOp{pot}(X\varrho)(\semanticOp{pot}(Y\varrho))$ (we write $\llambda\varrho.\dots$ for $\varrho\mapsto\dots$). \item $\semanticOp{dally}(\ell,X) = \llambda\varrho(\ell+\semanticOp{cost}(X\varrho),\semanticOp{pot}(X\varrho))$ and for $\tcden\sigma = \mathsf{T}\cross\mathsf{T}_L$, $\semanticOp{pad}(\ell, Y) = \llambda\varrho(\semanticOp{cost}(Y\varrho),\ell+\semanticOp{pot}(Y\varrho))$. \item For $\tcden\sigma =\mathsf{T}\cross\mathsf{T}_L$ and $Z$ also a t.c.\ denotation of type~$\tcden\sigma$, $(Z\plusmax Y)\varrho = (\semanticOp{cost}(Z\varrho)+\semanticOp{cost}(Y\varrho), \semanticOp{pot}(Z\varrho)\bmax\semanticOp{pot}(Y\varrho))$. \end{enumerate} \end{defn} \begin{thm}[Decomposition Theorem] \label{ats-decomp} Suppose $t\apprby X$ and $Y_i$ is such that if $ft_1\dots t_k$ is a complete application of~$f$ in~$t$, then $t_i\apprby Y_i$. Then $$ t\apprby\llambda\varrho\left(X\varrho_\eps\plusmax\semanticOp{pad}\bigl(\semanticOp{tail\_len}(f,t),\varrho f\star Y_1\varrho_\eps\star\dots\star Y_k\varrho_\eps\bigr)\right) $$ where $\varrho_\eps = \extend\varrho f{\llambda_\star\vec v.(1, 0)}$ and $\semanticOp{tail\_len}(f, t)$ is defined in Footnote~\ref{footnote:cons-tail-recn}. \end{thm} Intuitively, the cost of ``getting to'' the recursive call is covered by~$X\varrho_\eps$, and the cost of the call itself by~$\varrho f\star Y_1\varrho_\eps\star\dots\star Y_k\varrho_\eps$, taking into account any $\comb{c}_a$ operations after the call (this is an over-estimate if no recursive call is made). The potential (size in this case, since~$t$ is of base type) is either independent of any complete application of~$f$ or is equal to the size of such an application, again taking into account later~$\comb{c}_a$ operations. \begin{defn} A \emph{decomposition function} for~$t$ is a function $d(\varrho^{\Env{\tcden{\Gamma_{\vec v}}}},\chi^{\tcden\gamma})\mathbin{:}\tcden{\base b}$ such that $t\apprby\llambda\varrho.d(\varrho_\eps,\varrho f)$ (recall that $f$ is the affinely-restricted variable in~$t$). \end{defn} Recalling the evaluation rule for~$\comb{crec}$ and the definition of~$\apprby$, we see that we must understand how the closure~$\cl{T_0}{\rho_1}$ is evaluated for appropriate~$\rho$. It is easy to see that in such an evaluation, the only sub-evaluations of closures over terms of the form~$T_m$ are evaluations of closures of the form $\cl{T_m}{\extend{\rho_{m+1}}{\vec v}{\vec{\cl z\theta}}}$ for some closures~$\cl{z_i}{\theta_i}$. For the closure~$\cl{T_0}{\rho_1}$ we say that \emph{the clock is bounded by~$K$} if in every such sub-evaluation we have that~$\lh{z_1}<K$. For a decomposition function~$d$ define $\Phi_{d,K}(n):\Env{\tcden{\Gamma_{\vec v}}}\to\tcden{\base b}$ by \begin{align*} \Phi_{d,K}(0) &= \llambda\varrho.(2K+1,0) \\ \Phi_{d,K}(n+1) &= \llambda\varrho.\semanticOp{dally}\bigl(2K+1,\;d\bigl(\varrho_\eps,\semanticOp{dally}\bigl(2,(\llambda_\star\vec v.\Phi_{d,K}(n))\varrho\bigr)\bigr)\bmax\bigl(1,0\bigr)\bigr) \end{align*} We will use $\Phi_{d,K}$ to bound~$T_\ell$. \begin{thm}[Recomposition Lemma] \label{recomposition} Suppose $d$ is a decomposition function for~$t$, $\rho\in\Env{\Gamma_{\vec v}}$, $\varrho\in\Env{\tcden{\Gamma_{\vec v}}}$, $\rho\apprby\varrho$, and. that in the evaluation of $\cl{T_0}{\rho_1}$ the clock is bounded by~$K$. Then $\cl{T_0}{\rho_1}\apprby\Phi_{d,K}(K-\lh a)(\extend\varrho{v_i}{\semanticOp{val}(\varrho v_{ip})})$. \end{thm} The Recomposition Lemma tells us that~$\Phi_{d,K}(n)$ gives us a bound on the time-complexity of our recursion scheme. What we must do now is to ``solve'' the recurrence used to define~$\Phi$ and show that it is polynomially-bounded. \begin{thm}[Bounding Lemma] \label{bounding} Suppose that in Theorem~\ref{ats-decomp} we can assume that $X$ and each $Y_i$ are bounded by t.c.\ polynomials~$p$ and $p_i$, respectively. Assume further that $p$ is $\potden{\base b}$-safe and $p_i$ is $\potden{\base b_i}$-safe w.r.t.~$\tcden{\Gamma_{\vec v}}$. Then there is a $\potden{\base b}$-safe polynomial $\tctyping{\tcden{\Gamma_{\vec v}},K\mathbin{:}\potden{\base b_1},n\mathbin{:}\potden{\base b_1}}{\phi(K,n)}{\tcden{\base b}}$ such that for all $K$ and $n$, $\Phi_{d,K}(n)\leq\phi(K,n)$. \end{thm} \begin{proof} Let $d$ be the decomposition function for~$t$ given in Theorem~\ref{ats-decomp}. Using the definition of~$d$ we can find a $\potden{\base b}$-safe polynomial $\tctyping{\tcden{\Gamma_{\vec v}},K\mathbin{:}\potden{\base b_1}}{(P_0(K), P_1)}{\tcden{\base b}}$ and a recursive upper bound on~$\Phi_{d,K}(n)\varrho$: \begin{align*} \Phi_{d,K}(0)\varrho &\leq (2K+1, 0) \\ \Phi_{d,K}(n+1)\varrho &\leq (P_0(K), P_1)\varrho \plusmax \semanticOp{pad}(\ell,\Phi_{d,K}(n)\extend\varrho{v_i}{\semanticOp{val}(p_{ip}\varrho)}) \end{align*} where $\ell=\semanticOp{tail\_len}(f,t)$. An easy proof by induction shows that $\Phi_{d,K}(n)\leq (nP_0(K)\xi^{n-1}+2K+1, n\ell+P_1\xi^{n-1})$ for $n\geq 1$, where $\xi^0 = \mathop{\mathrm{id}}\nolimits$ and $(v_{ic},v_{ip})\xi^{n+1} = \semanticOp{val}(p_{ip}\xi^n)$. Since $\ell\not=0$ implies~$\base b_1\mathrel{<:}\base b$, $n\ell+P_1\xi^{n-1}$ is bounded by a $\potden{\base b}$-safe polynomial provided that $P_1\xi^{n-1}$ is $\potden{\base b}$-safe. Since $P_1$ is $\potden{\base b}$-safe and type-correct substitution of safe polynomials into a safe polynomial yields a safe polynomial (shown in Section~$8$ of~\textit{ATS}), to prove the theorem it suffices to show that $p_{ip}\xi^n$ is a $\potden{\base b_i}$-safe polynomial for each~$i$. The proof of this is essentially the proofs of the One-step and $n$-step lemmas of Section~$10$ in~\textit{ATS}\ (it is here that we use the remaining constraints on the types in the $\comb{crec}$ typing rule). \end{proof} \begin{prop}[Termination Lemma] \label{termination} Assume the hypotheses of Theorem~\ref{bounding} hold and that $\rho\apprby\varrho$. Then in the evaluation of $\cl {T_{0}}{\rho_{1}}$ the clock is bounded by $p_{1p}\xi^1\varrho$, where $\xi^1$ is defined as in the proof of Theorem~\ref{bounding}. \end{prop} \begin{proof} This follows from the details of the proof of Theorem~\ref{bounding}. \end{proof} \begin{thm}[Soundness Theorem] \label{soundness} For every $\mathsf{ATR}$ term~$\GDtyping{t}{\tau}$ there is a $\semanticOp{tail}(\tcden{\tau})$-safe t.c.\ denotation $X$ of type~$\tcden\tau$ w.r.t.\ $\tcden{\Gamma;\Delta}$ such that $t\apprby X$. \end{thm} \begin{proof} The proof is by induction on terms; for non-$\comb{crec}$ terms it is essentially as in~\textit{ATS}. For $\Gtyping{\comb{crec} a(\lambda_r f.\lambda\vec v.t)}{\vec{\base b}\mathbin{\rightarrow}\base b}$, suppose $\tilde\rho\in\Env\Gamma$, $\tilde\varrho\in\Env{\tcden{\Gamma}}$, $\rho\apprby\varrho$. Use the Bounding, Termination and Recomposition Lemmas to show that $\cl*{\lambda\vec v.T_0}{\tilde\rho_1}\apprby(\llambda_\star\vec v.\phi(p_{1p}\xi^1,p_{1p}\xi^1-\lh{a}))\tilde\varrho$, where $p_1$, $\phi$, and $\xi^n$ are as in the proof of the Bounding Lemma. We conclude that $\crec\,a\,(\afflambda f.\lambda\vec v.t)\apprby\semanticOp{dally}(1,\llambda_\star\vec v.\phi(p_{1p}\xi^1,p_{1p}\xi^1-\lh{a}))$. Since this last time-complexity is a $\potden{\base b}$-safe polynomial, the claim is proved. \end{proof} \begin{cor} If $\typing{\underline{~}}{\underline{~}}{t}{\tau}$, then $t$ is computable in type-2 polynomial time. \end{cor} \paragraph{Soundness for recursion in an argument.} \label{sec:rec-in-arg} We now address the recursions used in insertion-sort, in which the recursive use of the function occurs inside an argument to a previously-defined function. What we are really after here is structural (primitive) recursion for \emph{defined} datatypes (such as our defined lists). First we adapt our \InfRule{$\mathbin{\rightarrow}$-E} rule to allow affine variables to appear in arguments to applications. We still require some restrictions in order to ensure a one-use property; the following is more than sufficient for our needs: \begin{prooftree} \AXC{$\typing\Gamma{\Delta_0} s{\sigma\mathbin{\rightarrow}\tau}$} \AXC{$\typing\Gamma{\Delta_1} t\sigma$} \BIC{$\typing\Gamma{\Delta_0\union\Delta_1}{st}{\tau}$} \end{prooftree} where at most one of $\Delta_0$ and $\Delta_1$ are non-empty, and if $\semanticOp{level}\sigma>0$, then $\Delta_1=\emptyset$. Thus an affine variable~$f$ may only occur in~$t$ if $t$ is of base type, and may not occur simultaneously in~$s$ and~$t$. In particular, it is safe for $\beta$-reduction to copy a completed $f$-computation, but not an incomplete one. To simplify notation for the recursion present in insertion-sort we consider the special case in which we allow typings of the form~$(*)$ provided $t = \cond{s'}{s(f\vec t)}{s''}$ where $f$ is not free in~$s'$ or~$s''$ (we treat the general case in the full paper). First we must find a decomposition function. Assuming that $s\apprby X_s$, $t\apprby X_t$, and $t_i\apprby Y_i$, we can take as our decomposition function \begin{multline*} d(\varrho,\chi) = X_t\varrho\plusmax \bigl(\semanticOp{cost}\bigl(X_s\varrho\bigr)+\semanticOp{cost}\bigl(\chi\star \vec{X\varrho}\bigr) + \semanticOp{cost}\bigl(\semanticOp{pot}(X_s\varrho)(\semanticOp{pot}(\chi\star \vec{X\varrho}))\bigr), \\ \semanticOp{pot}\bigl(\semanticOp{pot}(X_s\varrho)(\semanticOp{pot}(\chi\star \vec{X\varrho}))\bigr)\bigr) \end{multline*} where we have written $\chi\star\vec{X\varrho}$ for $\chi\star X_1\varrho\star\dots\star X_k\varrho$. Assume the inductively-given bounding t.c.\ denotations are bounded by safe polynomials~$p_s$, $p_t$, and $p_1,\dots,p_k$. The Soundness Theorem follows from the Recomposition Lemma provided we have a polynomial bound on~$\Phi_{d,K}(n)$, so now we establish such a bound. When $\base b$ is oracular, then since $p_{sp}$ ($=\semanticOp{pot}(p_s)$) is $\potden{\base b}$-safe, we have that $p_{sp} = \lambda z^{\potden{\base b}}.(p,q_s\bmax (r_s\bmax z))$ where $q_s$ is $\potden{\base b}$-strict and $r_s$ is $\potden{\base b}$-chary and does not contain~$z$. We can therefore find a $\potden{\base b}$-safe t.c.\ polynomial~$(P_0(K,z^{\potden{\base b}}), P_1)$ and derive the following recursive bound on~$\Phi_{d,K}$ using the same conventions as in our analysis of cons-tail recursion: \begin{align*} \Phi_{d,K}(0)\varrho &\leq (2K+1,0) \\ \Phi_{d,K}(n+1)\varrho &= (P_0(K,\semanticOp{pot}(\Phi_{d,K}(n)\varrho')), P_1) \plusmax \Phi_{d,K}(n)\varrho' \end{align*} where $\varrho'=\extend\varrho{v_i}{\semanticOp{val}(p_{ip}\varrho)}$. It is an easy induction to show that for $n\geq 1$ $\Phi_{d,K}(n) \leq ((n\cdot P_0(K, P_1)+2K+1)\xi^{n-1},P_1\xi^{n-1})$ and thus the Bounding and Termination Lemmas that must be proved are exactly those of before. When $\base b$ is computational a similar calculation yields the bounding polynomial $((n\cdot P_0((n-2)q_s+P_1)+2p_{1p})\xi^{n-1}, (n-1)q_s\xi^{n-2}+P_1\xi^{n-1})$ for a $\potden{\base b}$-strict polynomial~$q_s$. \section{Concluding remarks} \label{sec:concl} \suppressfloats In~\textit{ATS}\ we introduced the formalism~$\mathsf{ATR}$ which captures the basic feasible functionals at type-level~$\leq 2$. We have extended the formalism with recursion schemes that allow for more natural programming and demonstrated the new formalism by implementing lists of binary strings and insertion-sort and showing that the new recursion schemes do not take us out of the realm of feasibility. We have also given a strategy for proving that particular forms of recursion can be ``safely'' added to the base system. Here we indicate some future directions: \paragraph{More general affine recursions.} In the full paper we give a definition of \emph{plain affine recursion} that generalizes cons-tail recursion, allows recursive calls in arguments, and permits recursive calls in the body of $\comb{let}$-expressions. In particular, it covers all forms of recursion used in the list operations and insertion- and selection-sort (code for the latter is in Figure~\ref{fig:sel-sort}). At the time of writing, we do not have all the details of the soundness argument in the general case, but we expect it to follow the framework we have developed here. \begin{figure}[tb] \lstinputlisting{sel-sort.atr} \caption{Selection-sort in~$\mathsf{ATR}$. The function \lstinline!leq! tests two integers written in binary for inequality; we leave its full definition as an exercise for the reader. Note: \lstinline[basicstyle=\footnotesize]!let val x=s in t end! abbreviates \lstinline[basicstyle=\footnotesize]!(fn x $\;\Rightarrow\;$ t)s!\label{fig:sel-sort} where we restrict~$x$ to be of base type.} \end{figure} \paragraph{Lazy $\mathsf{ATR}$.} A version of~$\mathsf{ATR}$ with lazy constructors (streams) and evaluation would be very interesting. There are many technical challenges in analyzing such a system but again we expect that the general outline will be the approach we have used in this paper. Of course one can implement streams in the current call-by-value setting in standard ways (raising the type-level), but a direct lazy implementation of streams is likely to be more informative. We expect the analysis of such a lazy-$\mathsf{ATR}$ to require an extensive reworking of the various semantic models we have discussed here and in~\textit{ATS}. \paragraph{Real-number algorithms.} $\mathsf{ATR}$ is a type-$2$ language, but here we have focused on type-$1$ algorithms. We are working on implementing real-number algorithms, viewing a real number as a type-$1$ (stream) oracle. This can be done in either a call-by-value setting (e.g., algorithms that take a string of length~$n$ as input and return something like an $n$-bit approximation of the result) or a lazy setting (in which the algorithm returns bits of the result on demand). \bibliographystyle{abbrvnat}
1,116,691,501,372
arxiv
\section*{Appendix \thesection\protect\indent \parbox[t]{11.15cm}{#1}} \addcontentsline{toc}{section}{Appendix \thesection\ \ \ #1}} \newcommand{\multicolumn}{\multicolumn} \def{\sf a}{{\sf a}} \def{\sf b}{{\sf b}} \def{\sf c}{{\sf c}} \def{\sf e}{{\sf e}} \def{\sf N}{{\sf N}} \def\,\nabla\kern -0.7em\raise0.2ex\hbox{/}\,\,{\,\nabla\kern -0.7em\raise0.2ex\hbox{/}\,\,} \def\,y\kern -0.47em /{\,y\kern -0.47em /} \def\,a\kern -0.49em /{\,a\kern -0.49em /} \def\,\partial\kern -0.55em /\,\,{\,\partial\kern -0.55em /\,\,} \def{\vphantom{5pt}}{{\vphantom{5pt}}} \newcommand{\mathbb{E}}{\mathbb{E}} \newcommand{\mathbb{J}}{\mathbb{J}} \newcommand{\mathbb{M}}{\mathbb{M}} \newcommand{\mathbb{N}}{\mathbb{N}} \newcommand{\mathbb{P}}{\mathbb{P}} \newcommand{\mathbb{X}}{\mathbb{X}} \newcommand{\mathbb{Z}}{\mathbb{Z}} \def{\widetilde{P}}{{\widetilde{P}}} \newcommand{\eq}[1]{Eq.~(\ref{#1})} \def\begin{equation}{\begin{equation}} \def\end{equation}{\end{equation}} \def\begin{eqnarray}{\begin{eqnarray}} \def\end{eqnarray}{\end{eqnarray}} \def{\scriptscriptstyle \Lambda}{{\scriptscriptstyle \Lambda}} \def{\scriptscriptstyle R}{{\scriptscriptstyle R}} \def{\scriptscriptstyle L}{{\scriptscriptstyle L}} \def{\scriptscriptstyle CSF}{{\scriptscriptstyle CSF}} \def{\scriptscriptstyle (a)}{{\scriptscriptstyle (a)}} \def{\scriptscriptstyle (aa)}{{\scriptscriptstyle (aa)}} \def{\scriptscriptstyle (ab)}{{\scriptscriptstyle (ab)}} \def{\scriptscriptstyle (b)}{{\scriptscriptstyle (b)}} \def{\scriptscriptstyle (a+1)}{{\scriptscriptstyle (a+1)}} \def{\scriptscriptstyle (a+2)}{{\scriptscriptstyle (a+2)}} \def{\scriptscriptstyle (n)}{{\scriptscriptstyle (n)}} \def{\scriptscriptstyle (1)}{{\scriptscriptstyle (1)}} \def{\scriptscriptstyle (2)}{{\scriptscriptstyle (2)}} \def{\scriptscriptstyle (3)}{{\scriptscriptstyle (3)}} \def{\scriptscriptstyle (4)}{{\scriptscriptstyle (4)}} \def{\scriptscriptstyle (12)}{{\scriptscriptstyle (12)}} \def{\scriptscriptstyle (23)}{{\scriptscriptstyle (23)}} \def{\scriptscriptstyle (31)}{{\scriptscriptstyle (31)}} \def{\scriptscriptstyle (11)}{{\scriptscriptstyle (11)}} \def{\scriptscriptstyle (22)}{{\scriptscriptstyle (22)}} \def{\scriptscriptstyle (33)}{{\scriptscriptstyle (33)}} \def{\scriptscriptstyle (aa+1)}{{\scriptscriptstyle (aa+1)}} \def{\scriptscriptstyle (a+1a+2)}{{\scriptscriptstyle (a+1a+2)}} \def{\scriptscriptstyle (a+2a)}{{\scriptscriptstyle (a+2a)}} \def{\scriptscriptstyle [2]}{{\scriptscriptstyle [2]}} \def{\scriptscriptstyle [3]}{{\scriptscriptstyle [3]}} \def{\scriptscriptstyle [4]}{{\scriptscriptstyle [4]}} \def{\scriptscriptstyle [n]}{{\scriptscriptstyle [n]}} \def{\bf a}{{\bf a}} \def{\bf b}{{\bf b}} \def{\bf c}{{\bf c}} \def{\bf d}{{\bf d}} \def{\bf s}{{\bf s}} \def{\bf J}{{\bf J}} \def{\bf E}{{\bf E}} \def{\bf L}{{\bf L}} \def{\bf M}{{\bf M}} \def{\bf P}{{\bf P}} \def{\bf S}{{\bf S}} \def{\bf X}{{\bf X}} \def{\bf V}{{\bf V}} \def{\boldsymbol{\beta}}{{\boldsymbol{\beta}}} \def{\boldsymbol{\zeta}}{{\boldsymbol{\zeta}}} \def{\bf i}{{\bf i}} \def{\bf ii}{{\bf ii}} \def{\bf iii}{{\bf iii}} \def{\bf iv}{{\bf iv}} \def{\bf v}{{\bf v}} \def{\bf vi}{{\bf vi}} \def{\bf vii}{{\bf vii}} \def{\bf viii}{{\bf viii}} \def{\bf ix}{{\bf ix}} \def{\bf x}{{\bf x}} \def{\cal B}{{\cal B}} \def{\cal I}{{\cal I}} \def{\cal L}{{\cal L}} \def{\cal N}{{\cal N}} \def{\cal P}{{\cal P}} \def{\cal X}{{\cal X}} \def{\cal Z}{{\cal Z}} \def{\sf N}{{\sf N}} \def\nu{\nu} \def\widehat{k}{\widehat{k}} \def\tilde{\phi}{\tilde{\phi}} \def\widetilde{\phi}{\widetilde{\phi}} \def\tilde{G}{\tilde{G}} \def\widetilde{G}{\widetilde{G}} \def|0\rangle{|0\rangle} \def\frac{1}{2}{\frac{1}{2}} \def\bar{\alpha}{\bar{\alpha}} \def\bar{\upsilon}{\bar{\upsilon}} \def\bar{\zeta}{\bar{\zeta}} \def{\bar{C}}{{\bar{C}}} \def{\bar{M}}{{\bar{M}}} \def{\bar{V}}{{\bar{V}}} \def{\bar{v}}{{\bar{v}}} \def{\bar{e}}{{\bar{e}}} \def{\bar{g}}{{\bar{g}}} \def{\bar{x}}{{\bar{x}}} \def|\phi\rangle{|\phi\rangle} \def{\rm i}{{\rm i}} \def{\rm t}{{\rm t}} \def{\rm m}{{\rm m}} \def{\rm bos}{{\rm bos}} \def{\rm fer}{{\rm fer}} \def{\rm kin}{{\rm kin}} \def{\rm dyn}{{\rm dyn}} \def{\rm min}{{\rm min}} \def{\rm max}{{\rm max}} \def{\rm field}{{\rm field}} \def{\rm superfield}{{\rm superfield}} \def{\rm diff}{{\rm diff}} \def{\rm GP}{{\rm GP}} \def{\rm cov}{{\rm cov}} \def{\rm lc}{{\rm lc}} \def{\rm ext}{{\rm ext}} \def\check{\beta}{\check{\beta}} \jot=10pt \begin{document} \begin{flushright} FIAN-TD-2019-18 \ \ \ \ \ \\ arXiv: 1909.05241V2 \end{flushright} \vspace{1cm} \begin{center} {\Large \bf Cubic interactions for arbitrary spin \hbox{${\cal N}$}-extended \medskip massless supermultiplets in 4d flat space} \vspace{2.5cm} R.R. Metsaev\footnote{ E-mail: [email protected] } \vspace{1cm} {\it Department of Theoretical Physics, P.N. Lebedev Physical Institute, \\ Leninsky prospect 53, Moscow 119991, Russia } \vspace{3cm} {\bf Abstract} \end{center} ${\cal N}$-extended massless arbitrary integer and half-integer spin supermultiplets in four dimensional flat space are studied in the framework of light-cone gauge formalism. For such multiplets, by using light-cone momentum superspace, we build unconstrained light-cone gauge superfield formulation. The superfield formulation is used to develop a superspace representation for all cubic interactions vertices of the ${\cal N}$-extended massless supermultiplets. Our suitable treatment of the light-cone gauge superfields allows us to obtain attractively simple superspace representation for the cubic interaction vertices. Superspace realization of relativistic symmetries of the ${\cal N}$-extended Poincar\'e superalgebra on space of interacting fields is also obtained. \vspace{3cm} Keywords: N-extended supersymmetric higher-spin fields, light-cone gauge formalism, interaction vertices. \newpage \renewcommand{\thefootnote}{\arabic{footnote}} \setcounter{footnote}{0} \section{ \large Introduction} In view of aesthetic features, ${\cal N}$-extended supersymmetric theories have attracted considerable interest during long period of time. As is known, light-cone gauge approach offers considerable simplifications for study of supersymmetric theories. For this reason, the ${\cal N}$-extended supersymmetric theories have extensively been studied in the framework of this approach. We mention application of light-cone formalism for the investigation of ultraviolet finiteness of ${\cal N}=4$ supersymmetric YM theory in Refs.\cite{Brink:1982wv,Mandelstam:1982cb}. Also we note that the light-cone gauge formulation of type $IIB$ supergravity theories in $10d$ flat space and $AdS_5 \times S^5$ space was developed in the respective Ref.\cite{Green:1982tk} and Ref.\cite{Metsaev:1999gz}, while the study of type $IIA$ and $IIB$ light-cone gauge superstring field theories may be found in Refs.\cite{Green:1983hw}. Recent interesting application of light-cone formalism for studying ${\cal N}=8$ supergravity may be found in Refs.\cite{Kallosh:2009db,Ananth:2017xpj}. Attractive example of application of light-cone gauge formalism is a supersymmetric higher-spin massless field theory. This is to say that, in the framework of light-cone gauge approach, a cubic interaction vertex of the {\it scalar} ${\cal N}$-extended massless supermultiplet with arbitrary ${\cal N}=4\mathbb{N}$ in $4d$ flat space was obtained in Ref.\cite{Bengtsson:1983pg}, while in Ref.\cite{Metsaev:2019dqt}, for the case of {\it arbitrary spin} (integer and half-integer) ${\cal N}=1$ massless supermultiplets in $4d$ flat space, we obtained the full list of cubic interaction vertices. Result in Ref.\cite{Metsaev:2019dqt} provides the ${\cal N}=1$ supersymmetric completion for all cubic interaction vertices for arbitrary spin bosonic massless fields obtained in Refs.\cite{Bengtsson:1983pd,Bengtsson:1986kh}.% \footnote{ In the recent time, the study of cubic interactions of higher-spin ${\cal N}=1$ massless supermultiplets by using gauge invariant supercurrents, may be found in Refs.\cite{Buchbinder:2017nuc}-\cite{Gates:2019cnl}.} In this paper, we consider {\it arbitrary spin} (integer and half-integer) ${\cal N}$-extended massless supermultiplets with arbitrary ${\cal N}=4\mathbb{N}$ in the $4d$ flat space. For such supermultiplets, our aim is to find all cubic interaction vertices. To this end, as in Refs.\cite{Metsaev:2019dqt}, we prefer to use a light-cone gauge unconstrained superfields that are defined in a light-cone momentum superspace. We note that, in the past, the light-cone momentum superspace has fruitfully been used in many important and interesting studies of supergravity and superstring theories. As example of attractive use of the momentum superspace we mention the building of $IIB$ supergravity in $10d$ flat space and superstring field theories in $10d$ flat space in the respective Ref.\cite{Green:1982tk} and Ref.\cite{Green:1983hw}. The momentum superspace turns also out to be very convenient for studying supergravity in $11d$ flat space \cite{Metsaev:2004wv} and $IIB$ supergravity in $AdS_5\times S^5$ space \cite{Metsaev:1999gz}. In this paper, using Grassmann momentum entering the light-cone momentum superspace, we collect fields of ${\cal N}$-extended massless supermultiplets into a suitable unconstrained light-cone gauge superfields and use such superfields to construct a full list of cubic interaction vertices. We note that it is the formalism of unconstrained light-cone gauge superfields that provides us a possibility to build attractively simple expressions for cubic vertices and allows us to obtain the full classification of cubic interactions. Some long term motivations for our study of supersymmetric higher-spin field theory which are beyond the scope of this paper may be found in Conclusions. Our paper is organized in the following way. In Sec.\ref{sec-02}, we start with brief review of light-cone coordinates frame and discuss general structure of the ${\cal N}$-extended Poincar\'e superalgebra. We discuss a field content that enters arbitrary spin (integer and half-integer) massless ${\cal N}$-extended supermultiplets. After that, we introduce our ${\cal N}$-extended momentum superspace and provide the explicit description of light-cone gauge unconstrained superfields which are defined on such superspace. Section \ref{sec-03} is devoted to description of general structure of $n$-point interaction vertices for theories of interacting fields. We provide a detailed description of constraints that are imposed by kinematical symmetries of the ${\cal N}$-extended Poincar\'e superalgebra on $n$-point interaction vertices. In Sec.\ref{sec-04}, we restrict out attention to cubic vertices. First, we adopt general kinematical constraints of the ${\cal N}$-extended Poincar\'e superalgebra obtained in Sec.\ref{sec-03} to the case of cubic vertices. Second, we derive constraints imposed on the cubic vertices by dynamical symmetries of the ${\cal N}$-extended Poincar\'e superalgebra. Third, we formulate light-cone gauge dynamical principle and, finally, we present the complete system of equations that allows us to fix the cubic vertices unambiguously. In Sec.\ref{sec-05}, we present our main result in this paper. We show explicit expressions for all cubic vertices that describe interactions of arbitrary spin ${\cal N}$-extended massless supermultiplets. We start with the presentation of the superspace form of the cubic interaction vertices. After that, we present the restrictions on allowed values of ${\cal N}$ and superfields helicities entering our cubic interaction vertices. These restrictions provide the classification of the cubic vertices that can be build for arbitrary spin ${\cal N}$-extended supermultiplets in the framework of light-cone gauge approach. Also we discuss representation of the cubic vertices in terms of the component fields. Sec.\ref{concl} summarizes our conclusions. In Appendix A, we present our notation and conventions. In Appendix B, we describe properties of our light-cone gauge superfields. In appendix C, we outline the derivation of the superspace cubic interaction vertices. \newsection{ \large Light-cone gauge superfield formulation of free ${\cal N}$-extended massless supermultiplets }\label{sec-02} \noindent {\bf Light-cone coordinates frame}. We consider light-cone gauge fields by using a helicity basis. Therefore we start with the description of light-cone coordinates frame. In the flat space $R^{3,1}$, the Lorentz basis coordinates are denoted as $x^\mu$, $\mu=0,1,2,3$, while the light-cone basis coordinates denoted as $x^\pm$, $x^{\scriptscriptstyle R}$, $x^{\scriptscriptstyle L}$ are expressed in terms of $x^\mu$ as \begin{equation} \label{02092019-man-01} x^\pm \equiv \frac{1}{\sqrt{2}}(x^3 \pm x^0)\,,\qquad \qquad x^{\scriptscriptstyle R} \equiv \frac{1}{\sqrt{2}}(x^1 + {\rm i} x^2)\,,\qquad x^{\scriptscriptstyle L} \equiv \frac{1}{\sqrt{2}}(x^1 - {\rm i} x^2)\,. \end{equation} Throughout this paper, the coordinate $x^+$ is taken to be a time-evolution parameter. Let $X^\mu$ be a vector of the $so(3,1)$ Lorentz algebra. In the light-cone basis \rf{02092019-man-01} the $X^\mu$, is decomposed as $X^+,X^-,X^{\scriptscriptstyle R}$, $X^{\scriptscriptstyle L}$. Using notation $\eta_{\mu\nu}$ for the mostly positive flat metric of $R^{3,1}$, we note that a scalar product of the $so(3,1)$ Lorentz algebra vectors $X^\mu$ and $Y^\mu$ is represented in the following way: \begin{equation} \label{02092019-man-02} \eta_{\mu\nu}X^\mu Y^\nu = X^+Y^- + X^-Y^+ + X^{\scriptscriptstyle R} Y^{\scriptscriptstyle L} + X^{\scriptscriptstyle L} Y^{\scriptscriptstyle R}\,. \end{equation} Relation \rf{02092019-man-02} implies that, in the light-cone basis, non-vanishing elements of the $\eta_{\mu\nu}$ are given by $\eta_{+-} = \eta_{-+}=1$, $\eta_{{\scriptscriptstyle R}{\scriptscriptstyle L}} = \eta_{{\scriptscriptstyle L}{\scriptscriptstyle R}} = 1$. We note then that the covariant and contravariant vectors $X_\mu$, $X^\mu$ are related as follows: $X^+=X_-$, $X^-=X_+$, $X^{\scriptscriptstyle R}=X_{\scriptscriptstyle L}$, $X^{\scriptscriptstyle L}=X_{\scriptscriptstyle R}$. \noindent {\bf Extended Poincar\'e superalgebra in light-cone frame}. The method proposed in Ref.\cite{Dirac:1949cp} reduces the problem of finding a light-cone gauge dynamical system to the problem of finding a solution of commutation relations for algebra of basic symmetries. For field theories with extended supersymmetries in flat space, the basic symmetries are governed by the extended Poincar\'e superalgebra. Therefore in order to fix our notation we now discuss a general structure of the extended Poincar\'e superalgebra. For the case of the $R^{3,1}$ space, the ${\cal N}$-extended Poincar\'e superalgebra consists the translation generators $P^\mu$, the generators of the $so(3,1)$ Lorentz algebra $J^{\mu\nu}$, Majorana supercharges $Q^{\alpha i}$, $Q_i^\alpha$, and $su({\cal N})$ R-symmetry algebra generators $J^i{}_j$. Explicit light-cone form of commutation relations of the extended Poincar\'e superalgebra we use in this paper may be found in Appendix A. Here we note that, in light-cone basis \rf{02092019-man-01}, generators of the extended Poincar\'e superalgebra can be separated into the following two groups: {\small \begin{eqnarray} \label{02092019-man-03} && \hspace{-1.6cm} P^+, \ \ P^{\scriptscriptstyle R},\ \ \ P^{\scriptscriptstyle L}, \ \ \ \ J^{+{\scriptscriptstyle R}},\ \ \ \ J^{+{\scriptscriptstyle L}},\ \ J^{+-},\ \ J^{{\scriptscriptstyle R}{\scriptscriptstyle L}},\ \ Q_i^{+{\scriptscriptstyle R}},\ \ Q^{+{\scriptscriptstyle L} i},\ \ J^i{}_j, \hspace{0.5cm}\hbox{ kinematical generators}; \\ \label{02092019-man-04} && \hspace{-1.6cm} P^-, \ \ J^{-{\scriptscriptstyle R}}, \ \ J^{-{\scriptscriptstyle L}}, \ \ Q^{-{\scriptscriptstyle R} i}, \ \ Q_i^{-{\scriptscriptstyle L}}, \hspace{5.7cm} \hbox{ dynamical generators}. \end{eqnarray} } Our aim in this paper is to find a field theoretical realization for generators in \rf{02092019-man-03},\rf {02092019-man-04}. We note that, with the exception of $J^{+-}$, the kinematical generators \rf{02092019-man-03} are quadratic in fields,% \footnote{ The $J^{+-}$ takes the form $J^{+-} = G_0 + {\rm i} x^+ P^-$, where the generator $G_0$ is quadratic in fields, while the light-cone Hamiltonian $P^-$ consists quadratic and higher order terms in fields.} while, the dynamical generators \rf{02092019-man-04} consist quadratic and higher order terms in fields. To provide a field realization of generators of the extended Poincar\'e superalgebra, we use a light-cone gauge description of fields. \noindent {\bf Content of component fields}. We now discuss component fields entering extended massless supermultiplets. To this end we use a label $\lambda$ to denote a helicity of a massless field, while the indices $i,j,k,l=1,\ldots,{\cal N}$ stand for vector indices of the $su({\cal N})$ algebra. Using such notation, we introduce a field $\phi_{\lambda\,;\,i_1\ldots i_q}$ which is (integer or half-integer) helicity-$\lambda$ field of the Poincar\'e algebra and rank-$q$ totally antisymmetric covariant tensor field of the $su({\cal N})$ algebra.% \footnote{ Transformations of the field $\phi_{\lambda;i_1\ldots i_q}$ under action of generators of the Poincar\'e algebra take the same form as the ones for the field $\phi_\lambda$ in (2.23)-(2.27) in Ref.\cite{Metsaev:2019dqt}.} Now the field content entering arbitrary (integer or half-integer) spin ${\cal N}$-extended massless supermultiplet of the Poincar\'e superalgebra in $R^{3,1}$ is given by \begin{eqnarray} \label{02092019-man-05} && \{\lambda\}_{\rm ext} = \sum_{q=0,1,2,\ldots, {\cal N}} \oplus \,\, \phi_{\lambda - \frac{1}{2} q+\frac{1}{4}{\cal N}\,;\, i_1\ldots i_q }(x)\,, \\ && \hspace{1.5cm} \lambda = \hbox{$0,\pm \frac{1}{2} ,\pm 1,\pm \frac{3}{2},\ldots, \pm \infty$}\,, \hspace{1cm} {\cal N} \in 4 \mathbb{N}\,, \end{eqnarray} where \begin{eqnarray} && \phi_{\lambda;i_1\ldots i_q } \ \hbox{ are bosonic fields for } \ \lambda\in \mathbb{Z}; \nonumber\\[-14pt] \label{02092019-man-05-a1} && \\[-14pt] && \phi_{\lambda;i_1\ldots i_q } \ \hbox{ are fermionic fields for } \lambda \in \mathbb{Z}+\frac{1}{2}\,. \nonumber \end{eqnarray} From \rf{02092019-man-05}, we see that the ${\cal N}$-extended massless supermultiplet involves fields with the following values of the helicities {\small \begin{eqnarray} && \lambda_{\rm min},\,\,\lambda_{\rm min}+\hbox{$\frac{1}{2}$}\,,\ldots\ldots, \lambda-\hbox{$\frac{1}{2}$},\,\,\lambda,\,\,\lambda+\hbox{$\frac{1}{2}$}, \ldots\ldots,\lambda_{\rm max} - \hbox{$\frac{1}{2}$}\,,\,\, \lambda_{\rm max}\,; \\ && \lambda_{\rm min} = \lambda - \frac{1}{4}{\cal N}\,, \qquad \lambda_{\rm max} = \lambda + \frac{1}{4}{\cal N}\,. \qquad \end{eqnarray} } Also, from \rf{02092019-man-05}, we see that multiplicity of the helicity $\lambda-\frac{1}{2} q + \frac{1}{4}{\cal N}$ is equal to $C_{\cal N}^q$. We find it convenient to label the supermultiplet \rf{02092019-man-05} by the $\lambda$ instead of $\lambda_{\rm max}$ (or $\lambda_{\rm min}$) because, by using such convention, a scalar supermultiplet is simply labelled as $\{0\}_{\rm ext}$. Fields \rf{02092019-man-05} depend on the space time-coordinates $x\equiv x^\pm,x^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$. By definition, fields \rf{02092019-man-05} satisfy the hermitian conjugation condition given by \begin{equation} \label{02092019-man-06} \phi_{\lambda\,;\, i_1\ldots i_q }^\dagger(x) = \frac{(-)^{\frac{1}{4}{\cal N}-\frac{1}{2}q - \frac{1}{2} e_{\frac{1}{2}q}}}{({\cal N}-q)!}\varepsilon^{i_1\ldots i_q i_{q+1}\ldots i_{\cal N}} \phi_{-\lambda\,;\, i_{q+1}\ldots i_{\cal N} }(x)\,, \end{equation} where $\varepsilon^{i_1\ldots i_{\cal N}}$ stands for the Levy-Civita symbol of the $su({\cal N})$ algebra, $\varepsilon^{1 \ldots {\cal N}}=1$, while the quantity $e_\lambda$ is defined by the relations \begin{equation} \label{02092019-man-07} e_\lambda =0 \hspace{0.5cm} \hbox{ for } \lambda \in \mathbb{Z}\,,\hspace{1.4cm} e_\lambda = 1 \hspace{0.5cm} \hbox{ for } \ \lambda \in \mathbb{Z}+\frac{1}{2}\,. \end{equation} From now, in place of position-space fields \rf{02092019-man-05}, we prefer to use momentum-space fields which are defined by the Fourier transform with respect to the coordinates $x^-$, $x^{\scriptscriptstyle R}$, and $x^{\scriptscriptstyle L}$, \begin{equation} \label{02092019-man-09} \phi_{\lambda; i_1\ldots i_q}(x) = \int \frac{ d^3p }{ (2\pi)^{3/2} } e^{{\rm i}(\beta x^- + p^{\scriptscriptstyle R} x^{\scriptscriptstyle L} + p^{\scriptscriptstyle L} x^{\scriptscriptstyle R})} \phi_{\lambda;i_1\ldots i_q}(x^+,p)\,,\qquad d^3p \equiv d\beta dp^{\scriptscriptstyle R} dp^{\scriptscriptstyle L}\,, \end{equation} where we use the notation $\beta$ for the momentum in the plus light-cone direction $\beta\equiv p^+$. Note also that the argument $p$ of fields $\phi_{\lambda;i_1\ldots i_q}(x^+,p)$ stands as a shortcut for the momenta $\beta$, $p^{\scriptscriptstyle R}$, $p^{\scriptscriptstyle L}$. In terms of the momentum-space fields $\phi_{\lambda;i_1\ldots i_q}(x^+,p)$, the hermicity condition \rf{02092019-man-06} can be represented as \begin{equation} \label{02092019-man-10} \phi_{\lambda\,;\, i_1\ldots i_q }^\dagger(p) = \frac{(-)^{\frac{1}{4}{\cal N}-\frac{1}{2} q - \frac{1}{2} e_{\frac{1}{2} q}}}{({\cal N}-q)!}\varepsilon^{i_1\ldots i_q i_{q+1}\ldots i_{\cal N}} \phi_{-\lambda\,;\, i_{q+1}\ldots i_{\cal N} }(-p)\,. \end{equation} Here and below, dependence of the momentum-space fields $\phi_{\lambda;i_1\ldots i_q}(p)$ on the evolution parameter $x^+$ is implicit. Let us also to note our convention $\phi_{\lambda;i_1\ldots i_q}^\dagger(p) \equiv( \phi_{\lambda;i_1\ldots i_q}(p))^\dagger$. \noindent {\bf Superfield formulation}. To develop superfield formulation let us introduce a Grassmann-odd momentum $p_\theta^i$, $\{p_\theta^i,p_\theta^j\}=0$. The Grassmann momentum $p_\theta^i$ is a contravariant vector of the $su({\cal N})$ algebra. The light-cone momentum superspace is parametrized by the light-cone evolution parameter $x^+$, the spatial momenta $p^{\scriptscriptstyle R}$, $p^{\scriptscriptstyle L}$, $\beta$ and the Grassmann momentum $p_\theta^i$, \begin{equation} \label{02092019-man-11} x^+\,, \beta\,, \ p^{\scriptscriptstyle R}\,, \ p^{\scriptscriptstyle L}\,, \ p_\theta^i\,. \end{equation} Using the Grassmann momentum $p_\theta^i$, we collect component fields \rf{02092019-man-05},\rf{02092019-man-09} into superfield $\Phi_\lambda(p,p_\theta)$ defined as \begin{eqnarray} \label{02092019-man-12} && \Phi_\lambda(p,p_\theta) = \sum_{q=0}^{{\cal N}} \frac{1}{q!}\beta^{\frac{1}{4}{\cal N}- \frac{1}{2} q + \frac{1}{2} e_\lambda - \frac{1}{2} e_{\lambda - \frac{1}{2} q } }\,\, p_\theta^{i_1}\ldots p_\theta^{i_q} \phi_{\lambda - \frac{1}{2} q+\frac{1}{4}{\cal N}\,;\, i_1\ldots i_q }(p)\,, \\ \label{02092019-man-12-a01} && \hspace{2cm} \lambda = \hbox{$0,\pm \frac{1}{2} ,\pm 1,\pm \frac{3}{2},\ldots, \pm \infty$}\,, \hspace{1cm} {\cal N} \in 4 \mathbb{N}\,, \end{eqnarray} where $e_\lambda$ is defined in \rf{02092019-man-07}. Often, we use a shortcut $\Phi_\lambda\equiv \Phi_\lambda(p,p_\theta)$. From \rf{02092019-man-05-a1},\rf{02092019-man-12}, we see that \begin{eqnarray} && \Phi_\lambda \ \hbox{ are Grassmann even for } \ \lambda\in \mathbb{Z}; \nonumber\\[-16pt] \label{02092019-man-12-a1} && \\[-16pt] && \Phi_\lambda \ \hbox{ are Grassmann odd for } \ \lambda\in \hbox{$\mathbb{Z}+\frac{1}{2}$}\,. \nonumber \end{eqnarray} We note that, for ${\cal N}=4$, the scalar superfield $\Phi_0$ describes famous ${\cal N}=4$ supersymmetric YM theory, while, for ${\cal N}=8$, the scalar superfield $\Phi_0$ describes ${\cal N}=8$ supergravity theory. In order to obtain a field theoretical realization we need a realization of the ${\cal N}$-extended Poincar\'e superalgebra in terms of differential operators. The realization in terms of differential operators acting on our light-cone superfield $\Phi_\lambda(p,p_\theta)$ takes the following form: \begin{eqnarray} \label{02092019-man-14} && P^{\scriptscriptstyle R} = p^{\scriptscriptstyle R}\,, \qquad P^{\scriptscriptstyle L} = p^{\scriptscriptstyle L}\,, \hspace{2cm} P^+=\beta\,,\qquad P^- = p^-\,, \qquad p^-\equiv - \frac{p^{\scriptscriptstyle R} p^{\scriptscriptstyle L}}{\beta}\,,\qquad \\ \label{02092019-man-15} && J^{+{\scriptscriptstyle R}}= {\rm i} x^+ P^{\scriptscriptstyle R} + \partial_{p^{\scriptscriptstyle L}}\beta\,, \hspace{2.4cm} J^{+{\scriptscriptstyle L}}= {\rm i} x^+ P^{\scriptscriptstyle L} + \partial_{p^{\scriptscriptstyle R}}\beta\,, \ \\ \label{02092019-man-16} && J^{+-} = {\rm i} x^+P^- + \partial_\beta \beta + M_\lambda^{+-}\,, \hspace{1cm} J^{{\scriptscriptstyle R}{\scriptscriptstyle L}} = p^{\scriptscriptstyle R}\partial_{p^{\scriptscriptstyle R}} - p^{\scriptscriptstyle L}\partial_{p^{\scriptscriptstyle L}} + M_\lambda^{{\scriptscriptstyle R}{\scriptscriptstyle L}}\,, \\ \label{02092019-man-17} && J^{-{\scriptscriptstyle R}} = -\partial_\beta p^{\scriptscriptstyle R} + \partial_{p^{\scriptscriptstyle L}} p^- + M_\lambda^{{\scriptscriptstyle R}{\scriptscriptstyle L}}\frac{p^{\scriptscriptstyle R}}{\beta} - M_\lambda^{+-} \frac{p^{\scriptscriptstyle R}}{\beta}\,, \\ \label{02092019-man-18} && J^{-{\scriptscriptstyle L}} = -\partial_\beta p^{\scriptscriptstyle L} + \partial_{p^{\scriptscriptstyle R}} p^- - M_\lambda^{{\scriptscriptstyle R}{\scriptscriptstyle L}}\frac{p^{\scriptscriptstyle L}}{\beta} - M_\lambda^{+-} \frac{p^{\scriptscriptstyle L}}{\beta}\,, \\ \label{02092019-man-18-a1} && \hspace{1.2cm} M_\lambda^{+-} = \frac{1}{2} p_\theta^i\partial_{p_\theta^i} - \frac{1}{4} {\cal N} - \frac{1}{2} e_\lambda\,, \qquad M_\lambda^{{\scriptscriptstyle R}{\scriptscriptstyle L}} = \lambda -\frac{1}{2} p_\theta^i\partial_{p_\theta^i} + \frac{1}{4} {\cal N}\,, \\ \label{02092019-man-19} && Q_i^{+{\scriptscriptstyle R}} = (-)^{e_\lambda} \beta \partial_{p_\theta^i}\,, \hspace{2.3cm} Q^{+{\scriptscriptstyle L} i} = (-)^{e_\lambda} p_\theta^i\,, \\ \label{02092019-man-20} && Q^{-{\scriptscriptstyle R} i} = (-)^{e_\lambda} \frac{1}{\beta} p^{\scriptscriptstyle R} p_\theta^i\,, \hspace{2cm} Q_i^{-{\scriptscriptstyle L}} = (-)^{e_\lambda} p^{\scriptscriptstyle L} \partial_{p_\theta^i}\,, \\ \label{02092019-man-21} && J^i{}_j = p_\theta^i\partial_{p_\theta^j} - \frac{1}{{\cal N}}\delta_j^i p_\theta^k \partial_{p_\theta^k}\,, \\ && \hspace{3cm}\partial_\beta \equiv \partial/\partial \beta\,, \hspace{0.5cm} \partial_{p^{\scriptscriptstyle R}} \equiv \partial/\partial p^{\scriptscriptstyle R}\,, \hspace{0.5cm} \partial_{p^{\scriptscriptstyle L}} \equiv \partial/\partial p^{\scriptscriptstyle L}\,, \end{eqnarray} where $\partial_{p_\theta^i}\equiv\partial/\partial p_\theta^i$ is left derivative with respect to the Grassmann momentum $p_\theta^i$. To express hermicity condition in terms of the superfield we find it convenient to introduce new superfield $\Phi_\lambda^*$ defined by the relation \begin{equation} \label{02092019-man-22} \Phi_\lambda^*(p,p_\theta) \equiv \beta^{\frac{{\cal N}}{2}}\int d^{\cal N} p_\theta^\dagger\, e^{ \frac{p_\theta^{i\dagger} p_\theta^i}{\beta} } (\Phi_\lambda(p,p_\theta))^\dagger\,. \end{equation} It is easy to verify then that, in terms of the superfields $\Phi_\lambda$ and $\Phi_\lambda^*$, the hermicity condition \rf{02092019-man-10} takes the following simple form: \begin{equation}\label{02092019-man-23} \Phi_{-\lambda}^*(-p,-p_\theta) = \Phi_\lambda(p,p_\theta)\,. \end{equation} Sometimes, we use a shortcut $\Phi_\lambda^*\equiv \Phi_\lambda^*(p,p_\theta)$. From \rf{02092019-man-23}, we see that the superfields $\Phi_\lambda$ and $\Phi_\lambda^*$ are not independent of each other. From \rf{02092019-man-12-a1} and \rf{02092019-man-23}, we note that the superfield $\Phi_\lambda^*$ is Grassmann even for $\lambda\in \mathbb{Z}$ and Grassmann odd for $\lambda\in \mathbb{Z}+\frac{1}{2}$. Some helpful relations for the superfield $\Phi_\lambda^*$ may be found in Appendix B. We now ready to provide a field theoretical realization of the Poincar\'e superalgebra.This is to say that, to quadratic order in fields, a field theoretical realization of the ${\cal N}$-extended Poincar\'e superalgebra generators in terms of the superfields $\Phi_\lambda$ takes the following form: \begin{equation} \label{02092019-man-24} G_{\scriptscriptstyle [2]} = \sum_{\lambda=-\infty}^{+\infty} G_{{\scriptscriptstyle [2]},\,\lambda} \qquad G_{{\scriptscriptstyle [2]},\, \lambda} = \int d^3p\, d^{\cal N} p_\theta \,\, \beta^{e_{\lambda+\frac{1}{2}}} \Phi_\lambda^* G_{{\rm diff},\,\lambda } \Phi_\lambda\,, \end{equation} where a quantity $G_{{\rm diff},\, \lambda}$ stands for the realization of the ${\cal N}$-extended Poincar\'e superalgebra generators in terms of differential operators given in \rf{02092019-man-14}-\rf{02092019-man-21}. By definition, the superfields $\Phi_\lambda$ and $\Phi_\lambda^*$ satisfy the Poisson-Dirac equal-time (anti)commutator given by \begin{equation} \label{02092019-man-25} [\Phi_\lambda(p,p_\theta),\Phi_{\lambda'}^*(p',p_\theta')]_\pm = \frac{1}{2} \beta^{-e_{\lambda+\frac{1}{2}}} \,\, \delta^3(p-p') \delta^{\cal N}(p_\theta-p_\theta') \delta_{\lambda,\lambda'}\,, \end{equation} where the notation $[a,b]_\pm$ is used for a graded commutator, $[a,b]_\pm=(-)^{\epsilon_a\epsilon_b+1}[b,a]_\pm$. With the help of relations \rf{02092019-man-24},\rf{02092019-man-25}, we verify that the following equal-time (anti)commutator between the generators and the superfield $\Phi_\lambda$ \begin{equation} \label{02092019-man-26} [\Phi_\lambda,G_{\scriptscriptstyle [2]}]_\pm = G_{{\rm diff},\,\lambda} \Phi_\lambda \,, \end{equation} holds true, where the operators $G_{{\rm diff},\,\lambda}$ are defined in \rf{02092019-man-14}-\rf{02092019-man-21}. In conclusion of this section, we recall that the light-cone gauge action can be presented as \begin{equation} \label{02092019-man-27} S = \frac{1}{2}\sum_{\lambda=-\infty}^\infty \int dx^+ d^3p d^{\cal N} p_\theta \,\, \beta^{-e_\lambda} \Phi_\lambda^* \big( 2{\rm i} \beta \partial^- - 2p^{\scriptscriptstyle R} p^{\scriptscriptstyle L} \big) \Phi_\lambda +\int dx^+ P_{\rm int}^-\,, \end{equation} where $\partial^-\equiv\partial/\partial x^+$, while $P_{\rm int}^-$ stands for light-cone gauge Hamiltonian that describes interacting fields. \newsection{ \large $n$-point dynamical generators of ${\cal N}$-extended Poincar\'e superalgebra} \label{sec-03} As we have already noted we follow the method proposed in Ref.\cite{Dirac:1949cp} that reduces the problem of finding dynamical system to the problem of finding a solution of commutation relations for algebra of basic symmetries. This implies that, for theories of interacting fields with extended supersymmetries in flat space, we should find interaction dependent deformation of the dynamical generators of the extended Poincar\'e superalgebra. In other words, in theories of interacting fields, one has the following expansion in fields for the dynamical generators of the extended Poincar\'e superalgebra \begin{equation} \label{03092019-man-01} G^{\rm dyn} = \sum_{n=2}^\infty G_{\scriptscriptstyle [n]}^{\rm dyn}\,, \end{equation} where $G_{\scriptscriptstyle [n]}^{\rm dyn}$ \rf{03092019-man-01} is functional that has $n$ powers of superfields $\Phi^*$. Expressions for $G_{\scriptscriptstyle [2]}^{\rm dyn}$ have been obtained in the previous section. Our aim in this Section is to discuss constraints on the dynamical generators $G_{\scriptscriptstyle [n]}^{\rm dyn}$ with $n\geq 3$ which are obtained by using the kinematical symmetries of the Poincar\'e superalgebra. We describe the constraints in turn. \noindent {\bf Kinematical $P^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $P^+$, $Q^{+{\scriptscriptstyle L} i}$ symmetries.}. Using (anti)commutators between the kinematical generators $P^{\scriptscriptstyle R}$, $P^{\scriptscriptstyle L}$, $P^+$, $Q^{+{\scriptscriptstyle L} i}$ and the dynamical generators \rf{02092019-man-04}, we find that the dynamical generators $G_{\scriptscriptstyle [n]}^{\rm dyn}$ with $n\geq 3$ can be presented as: \begin{eqnarray} \label{03092019-man-02} && P_{\scriptscriptstyle [n]}^- = \int\!\! d\Gamma_{\scriptscriptstyle [n]}\,\, \langle \Phi_{\scriptscriptstyle [n]}^* | p_{\scriptscriptstyle [n]}^-\rangle\,, \\ \label{03092019-man-03} && Q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R} i} = \int\!\! d\Gamma_{\scriptscriptstyle [n]}\,\, \langle \Phi_{\scriptscriptstyle [n]}^* | q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R} i}\rangle\,, \\ \label{03092019-man-04} && Q_{i {\scriptscriptstyle [n]}}^{-{\scriptscriptstyle L}} = \int\!\! d\Gamma_{\scriptscriptstyle [n]}\,\, \langle \Phi_{\scriptscriptstyle [n]}^* | q_{i {\scriptscriptstyle [n]}}^{-{\scriptscriptstyle L}} \rangle\,, \\ \label{03092019-man-05} && J_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R}} = \int\!\! d\Gamma_{\scriptscriptstyle [n]}\,\, \langle\Phi_{\scriptscriptstyle [n]}^* | j_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R}}\rangle + \langle {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle R} \Phi_{\scriptscriptstyle [n]}^* | p_{\scriptscriptstyle [n]}^-\rangle - \langle {\bf X}_{\theta\, i\,{\scriptscriptstyle [n]}} \Phi_{\scriptscriptstyle [n]}^* |q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R} i} \rangle\,, \\ \label{03092019-man-06} && J_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle L}} = \int\!\! d\Gamma_{\scriptscriptstyle [n]}\,\, \langle \Phi_{\scriptscriptstyle [n]}^* | j_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle L}}\rangle + \langle {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle L} \Phi_{\scriptscriptstyle [n]}^* | p_{\scriptscriptstyle [n]}^- \rangle + \frac{1}{n} {\cal P}_{\theta\,{\scriptscriptstyle [n]}}^i \langle \Phi_{\scriptscriptstyle [n]}^* | q_{i {\scriptscriptstyle [n]}}^{-{\scriptscriptstyle L}} \rangle\,, \end{eqnarray} where, in \rf{03092019-man-02}-\rf{03092019-man-06} and below, we use the following notation: \begin{eqnarray} \label{03092019-man-07} && d\Gamma_{\scriptscriptstyle [n]} = d\Gamma_{\scriptscriptstyle [n]}^p d\Gamma_{\scriptscriptstyle [n]}^{p_\theta} \,, \\ \label{03092019-man-08} && d\Gamma_{\scriptscriptstyle [n]}^p = (2\pi)^3 \delta^{3}(\sum_{a=1}^n p_a)\prod_{a=1}^n \frac{d^3p_a}{(2\pi)^{3/2} }\,, \qquad d^3 p_a = dp_a^{\scriptscriptstyle R} dp_a^{\scriptscriptstyle L} d\beta_a\,, \\ \label{03092019-man-09} && d\Gamma_{\scriptscriptstyle [n]}^{p_\theta} \equiv \delta^{\cal N}(\sum_{a=1}^n p_{\theta_a} ) \prod_{a=1}^n d^{\cal N} p_{\theta_a}\,, \\ \label{03092019-man-10} && {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle R} = - \frac{1}{n}\sum_{a=1}^n \partial_{p_a^{\scriptscriptstyle L}}\,, \hspace{1cm} {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle L} = - \frac{1}{n}\sum_{a=1}^n\partial_{p_a^{\scriptscriptstyle R}}\,, \\ \label{03092019-man-11} && {\bf X}_{\theta i\,{\scriptscriptstyle [n]}} = \frac{1}{n}\sum_{a=1}^n \partial_{p_{\theta_a}^i}\,,\hspace{1cm} {\cal P}_{\theta\,{\scriptscriptstyle [n]}}^i = \sum_{a=1}^n \frac{p_{\theta_a}^i}{\beta_a}\,, \end{eqnarray} and the index $a=1,\ldots,n$ is used to label superfields (and their arguments) entering $n$-point interaction vertex. We note also that, in \rf{03092019-man-02}-\rf{03092019-man-06}, we use the shortcuts $\langle \Phi_{\scriptscriptstyle [n]}^*| p_{\scriptscriptstyle [n]}^-\rangle$, $\langle \Phi_{\scriptscriptstyle [n]}^*| q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\rangle$, and $\langle \Phi_{\scriptscriptstyle [n]}^*| j_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\rangle$ for the following expressions \begin{eqnarray} \label{03092019-man-12} && \langle \Phi_{\scriptscriptstyle [n]}^*| p_{\scriptscriptstyle [n]}^-\rangle \quad \equiv \sum_{\lambda_1\ldots\lambda_n} \Phi_{\lambda_1\ldots\lambda_n}^* p_{\lambda_1\ldots\lambda_n}^-\,, \\ \label{03092019-man-13} && \langle \Phi_{\scriptscriptstyle [n]}^*| q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} \rangle \equiv \sum_{\lambda_1\ldots\lambda_n} \Phi_{\lambda_1\ldots\lambda_n}^* q_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\,, \\ \label{03092019-man-14} && \langle \Phi_{\scriptscriptstyle [n]}^*| j_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} \rangle \equiv \sum_{\lambda_1\ldots\lambda_n} \Phi_{\lambda_1\ldots\lambda_n}^* j_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\,, \\ \label{03092019-man-15} && \hspace{2cm} \Phi_{\lambda_1\ldots\lambda_n}^* \equiv \Phi_{\lambda_1}^*(p_1,p_{\theta_1}) \ldots \Phi_{\lambda_n}^*(p_n,p_{\theta_n}) \,. \end{eqnarray} The quantities $p_{\lambda_1\ldots\lambda_n}^-$, $q_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, and $j_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ appearing in \rf{03092019-man-12}-\rf{03092019-man-14}, will be referred to as $n$-point densities. For brevity, we denote these densities as $g_{\lambda_1\ldots\lambda_n}$, \begin{equation} \label{03092019-man-16} g_{\lambda_1\ldots\lambda_n} = p_{\lambda_1\ldots\lambda_n}^-,\quad q_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R}\,i},\quad q_{i;\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle L}},\quad j_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle R}},\quad j_{\lambda_1\ldots\lambda_n}^{-{\scriptscriptstyle L}}\,. \end{equation} In general, the densities $g_{\lambda_1\ldots\lambda_n}$ \rf{03092019-man-16} depend on the spatial momenta $p_a^{\scriptscriptstyle R}$, $p_a^{\scriptscriptstyle L}$, $\beta_a$, the Grassmann momenta $p_{\theta_a}^i$, and helicities $\lambda_a$, $a=1,2\ldots,n$, \begin{equation} \label{03092019-man-17} g_{\lambda_1\ldots\lambda_n} = g_{\lambda_1\ldots\lambda_n}(p_a,p_{\theta_a})\,. \end{equation} Note that the argument $p_a$ in delta-function \rf{03092019-man-08}, superfields \rf{03092019-man-15} and densities \rf{03092019-man-17} stands for the spatial momenta $p_a^{\scriptscriptstyle R}$, $p_a^{\scriptscriptstyle L}$, and $\beta_a$. We note also that, in \rf{03092019-man-05},\rf{03092019-man-06}, the operators ${\bf X}_{\scriptscriptstyle [n]}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, ${\bf X}_{\theta i\,{\scriptscriptstyle [n]}}$ defined in \rf{03092019-man-10},\rf{03092019-man-11} are acting only on the arguments of the superfields. Namely, for example, the shortcut $\langle {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle R} \Phi_{\scriptscriptstyle [n]}^* | g_{\scriptscriptstyle [n]} \rangle$ should read as follows \begin{equation} \label{03092019-man-18} \langle {\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle R} \Phi_{\scriptscriptstyle [n]}^* | g_{\scriptscriptstyle [n]}\rangle = \sum_{\lambda_1,\ldots \lambda_n} ({\bf X}_{\scriptscriptstyle [n]}^{\scriptscriptstyle R}\Phi_{\lambda_1\ldots\lambda_n}^* ) g_{\lambda_1\ldots\lambda_n}\,. \end{equation} Often, we will refer to the density $p_{\scriptscriptstyle [n]}^-$ as $n$-point interaction vertex, while, for $n=3$, the density $p_{\scriptscriptstyle [3]}^-$ will be refereed to as cubic interaction vertex. \noindent {\bf $J^{+-}$-symmetry equations}. Commutators between the kinematical generator $J^{+-}$ and the dynamical generators $P^-$, $Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ lead to the following equations for the densities: \begin{eqnarray} \label{03092019-man-19} && \big( {\bf J}^{+-} - \frac{(n-2){\cal N}}{4}\big) g_{\lambda_1\ldots \lambda_n} = 0\,, \hspace{1.6cm} \hbox{ for } \quad g_{\lambda_1\ldots \lambda_n} = p_{\lambda_1\ldots \lambda_n}^-\,,\,\, j_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\,,\qquad \\ \label{03092019-man-20} && \big( {\bf J}^{+-} - \frac{(n-2){\cal N}+2}{4}\big) g_{\lambda_1\ldots \lambda_n} = 0\,, \hspace{0.8cm} \hbox{ for } \quad g_{\lambda_1\ldots \lambda_n} = q_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R} i}\,,\,\, q_{i;\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}}\,, \\ \label{03092019-man-21} && \hspace{3cm} {\bf J}^{+-} \equiv \sum_{a=1}^n \big( \beta_a\partial_{\beta_a} + \frac{1}{2} p_{\theta_a}^i\partial_{p_{\theta_a}^i} + \frac{1}{2} e_{\lambda_a} \big)\,. \end{eqnarray} \noindent {\bf $J^{{\scriptscriptstyle R}{\scriptscriptstyle L}}$-symmetry equations}. Commutators between the kinematical generator $J^{{\scriptscriptstyle R}{\scriptscriptstyle L}}$ and the dynamical generators $P^-$, $Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ lead to the following equations for the densities: \begin{eqnarray} \label{03092019-man-22} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{(n-2){\cal N}}{4}\big) p_{\lambda_1\ldots \lambda_n}^- = 0 \,, \\ \label{03092019-man-23} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{(n-2){\cal N}-2}{4}\big) q_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R}\,i} = 0 \,, \\ \label{03092019-man-24} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{(n-2){\cal N}+2}{4}\big) q_{i;\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}} = 0 \,, \\ \label{03092019-man-25} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{(n-2){\cal N}-4}{4}\big) j_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R}} = 0 \,, \\ \label{03092019-man-26} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{(n-2){\cal N}+4}{4}\big) j_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}} = 0 \,, \\ \label{03092019-man-27} && \hspace{3cm} {\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} \equiv \sum_{a=1}^n \big( p_a^{\scriptscriptstyle R}\partial_{p_a^{\scriptscriptstyle R}} - p_a^{\scriptscriptstyle L}\partial_{p_a^{\scriptscriptstyle L}} - \frac{1}{2} p_{\theta_a}^i\partial_{p_{\theta_a}^i} + \lambda_a \big)\,. \end{eqnarray} \noindent {\bf $J^i{}_j$-symmetry equations}. Commutators between the kinematical generators $J^i{}_j$ and the dynamical generators $P^-$, $Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ lead to the following equations for the densities: \begin{eqnarray} \label{03092019-man-28} && {\bf J}^i{}_j g_{\lambda_1\ldots \lambda_n} = 0\,, \hspace{1cm} \hbox{ for } \quad g_{\lambda_1\ldots \lambda_n} = p_{\lambda_1\ldots \lambda_n}^-\,, \,\, j_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R},{\scriptscriptstyle L} }\,, \\ \label{03092019-man-29} && {\bf J}^i{}_j q_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R}\, l} = \delta_j^l q_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R}\, i} -\frac{1}{{\cal N}}\delta_j^i q_{\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle R}\, l}\,, \\ \label{03092019-man-30} && {\bf J}^i{}_j q_{l;\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}} = - \delta_l^i q_{j;\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}} + \frac{1}{{\cal N}}\delta_j^i q_{l;\lambda_1\ldots \lambda_n}^{-{\scriptscriptstyle L}}\,, \\ \label{03092019-man-31} && \hspace{3cm} {\bf J}^i{}_j \equiv \sum_{a=1}^n \big( p_{\theta_a}^i\partial_{p_{\theta_a}^j} -\frac{1}{{\cal N}} \delta_j^i p_{\theta_a}^k \partial_{p_{\theta_a}^k} \big)\,. \end{eqnarray} \noindent {\bf $J^{+{\scriptscriptstyle R}}$, $J^{+{\scriptscriptstyle L}}$, $Q^{+{\scriptscriptstyle R}}$-symmetry equations}. Using (anti)commutators between kinematical generators $J^{+{\scriptscriptstyle R}}$, $J^{+{\scriptscriptstyle L}}$, and $Q^{+{\scriptscriptstyle R}}$ and the dynamical generators $P^-$, $Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, we verify that the densities $g_{\lambda_1\ldots\lambda_n}$ \rf{03092019-man-17} can be presented as \begin{equation} \label{03092019-man-32} g_{\lambda_1\ldots\lambda_n} = g_{\lambda_1\ldots\lambda_n} (\mathbb{P}_{ab}^{\scriptscriptstyle R},\mathbb{P}_{ab}^{\scriptscriptstyle L}\,, \mathbb{P}_{\theta\,ab},\beta_a)\,, \end{equation} where we use the notation \begin{equation} \label{03092019-man-33} \mathbb{P}_{ab}^{\scriptscriptstyle R} \equiv p_a^{\scriptscriptstyle R} \beta_b - p_b^{\scriptscriptstyle R} \beta_a\,, \qquad \mathbb{P}_{ab}^{\scriptscriptstyle L} \equiv p_a^{\scriptscriptstyle L} \beta_b - p_b^{\scriptscriptstyle L} \beta_a\,, \qquad \mathbb{P}_{\theta\, ab}^i \equiv p_{\theta_a}^i \beta_b - p_{\theta_b}^i \beta_a\,. \end{equation} In other words, the densities $g_{\lambda_1\ldots\lambda_n}$ \rf{03092019-man-17} turn out to be dependent on $\mathbb{P}_{ab}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and Grassmann momenta $\mathbb{P}_{\theta\, ab}^i$ in place of the respective momenta $p_a^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and the Grassmann momenta $p_{\theta_a}^i$. \noindent {\bf Restriction imposed by Grassmann parity}. In conclusion of this section, we note the following restriction on all densities in \rf{03092019-man-16}: \begin{equation} \label{03092019-man-34} g_{\lambda_1\ldots \lambda_n} = 0 \qquad \hbox{ for } \qquad \sum_{a=1}^n \lambda_a \in \hbox{$\mathbb{Z} + \frac{1}{2}$}\,. \end{equation} Restriction \rf{03092019-man-34} is obtained by considering $J^i{}_j$ symmetries and Grassmann parity of the densities $g_{\lambda_1\ldots \lambda_n}$. Namely, on the one hand, in view of the $J^i{}_j$ symmetries, a dependence of the generators $P_{\scriptscriptstyle [n]}^-$ and $J_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ on the Grassmann momenta $\mathbb{P}_{\theta\, ab}^i$ is realized by means of the Grassmann even quantities \begin{equation} \varepsilon_{i_1 \ldots i_{\cal N}}\mathbb{P}_{a_1b_1}^{i_1} \ldots \mathbb{P}_{a_{\cal N} b_{\cal N}}^{i_{\cal N}}\,, \end{equation} while, a dependence of the supercharges $Q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R} i}$ and $Q_{i{\scriptscriptstyle [n]}}^{-{\scriptscriptstyle L} }$ on the Grassmann momenta $\mathbb{P}_{\theta\, ab}^i$ is realized by means of the respective Grassmann odd quantities \begin{equation} \mathbb{P}_{\theta\, ab}^i\,, \qquad \hspace{1cm} \varepsilon_{i i_2 \ldots i_{\cal N}} \mathbb{P}_{\theta\, a_2b_2}^{i_2} \ldots \mathbb{P}_{\theta\, a_{\cal N} b_{\cal N}}^{i_{\cal N}}\,. \end{equation} On the other hand, by definition, the generators $P_{\scriptscriptstyle [n]}^-$ and $J_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ \rf{03092019-man-02}, \rf{03092019-man-05},\rf{03092019-man-06} should be Grassmann even, while the supercharges $Q_{\scriptscriptstyle [n]}^{-{\scriptscriptstyle R} i}$ $Q_{i{\scriptscriptstyle [n]}}^{-{\scriptscriptstyle L} }$ \rf{03092019-man-03},\rf{03092019-man-04} should be Grassmann odd. Taking into account above said and relations in \rf{02092019-man-12-a1}, we get the restriction \rf{03092019-man-34}. We now proceed to the main theme of our study. \newsection{ \large Complete system of equations for cubic vertices } \label{sec-04} In this Section, we present a complete system of equations required to determine the cubic interaction vertices unambiguously. The complete system of equations is obtained by analysing the following three requirements. \noindent 1) Kinematical symmetries. \noindent 2) Dynamical symmetries. \noindent 3) Light-cone gauge dynamical principle. We now analyse these three requirements in turn. \noindent {\bf Kinematical symmetries of cubic densities}. Kinematical symmetries for arbitrary $n$-point, $n\geq 3$, densities have already been considered in the previous section. For cubic densities, $n=3$, the kinematical symmetry equations can further be simplified in view of the following well known observation. Using the momentum conservation laws \begin{equation} \label{04092019-man-01} p_1^{\scriptscriptstyle R} + p_2^{\scriptscriptstyle R} + p_3^{\scriptscriptstyle R} = 0\,, \quad p_1^{\scriptscriptstyle L} + p_2^{\scriptscriptstyle L} + p_3^{\scriptscriptstyle L} = 0\,, \quad \beta_1 +\beta_2 +\beta_3 =0 \,,\quad p_{\theta_1}^i + p_{\theta_2}^i + p_{\theta_3}^i=0\,, \end{equation} it is easy to see that six momenta $\mathbb{P}_{12}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $\mathbb{P}_{23}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $\mathbb{P}_{31}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and three Grassmann momenta $\mathbb{P}_{\theta\, 12}^i$, $\mathbb{P}_{\theta\, 23}^i$, $\mathbb{P}_{\theta\, 31}^i$ \rf{03092019-man-33} are expressed in terms of the respective two momenta $\mathbb{P}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and one Grassmann momentum $\mathbb{P}_\theta^i$, \begin{equation} \label{04092019-man-02} \mathbb{P}_{12}^{{\scriptscriptstyle R},{\scriptscriptstyle L}} =\mathbb{P}_{23}^{{\scriptscriptstyle R},{\scriptscriptstyle L}} = \mathbb{P}_{31}^{{\scriptscriptstyle R},{\scriptscriptstyle L}} = \mathbb{P}^{{\scriptscriptstyle R},{\scriptscriptstyle L}} \,,\qquad \mathbb{P}_{\theta\, 12}^i =\mathbb{P}_{\theta\, 23}^i = \mathbb{P}_{\theta\, 31}^i = \mathbb{P}_\theta^i \,, \end{equation} where the new momenta $\mathbb{P}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and $\mathbb{P}_\theta^i$ are defined as \begin{eqnarray} && \mathbb{P}^{\scriptscriptstyle R} \equiv \frac{1}{3}\sum_{a=1,2,3} \check{\beta}_a p_a^{\scriptscriptstyle R}\,, \qquad \mathbb{P}^{\scriptscriptstyle L} \equiv \frac{1}{3} \sum_{a=1,2,3} \check{\beta}_a p_a^{\scriptscriptstyle L}\,, \qquad \nonumber\\ \label{04092019-man-04} && \mathbb{P}_\theta^i \equiv \frac{1}{3}\sum_{a=1,2,3} \check{\beta}_a p_{\theta_a}^i\,, \qquad \check{\beta}_a\equiv \beta_{a+1}-\beta_{a+2}\,, \quad \beta_a\equiv \beta_{a+3}\,. \end{eqnarray} Therefore, using the following simplified notation for the cubic densities: \begin{equation} \label{04092019-man-05} p_{\scriptscriptstyle [3]}^- = p_{\lambda_1\lambda_2\lambda_3}^- \,, \qquad q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\,i} = q_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R}\,i}\,,\qquad q_{i{\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} = q_{i;\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle L}}\,, \qquad j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = j_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\,, \end{equation} and taking into account relations \rf{03092019-man-32},\rf{04092019-man-02}, we see that the cubic densities $p_{\scriptscriptstyle [3]}^-$, $q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, and $j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ depend on the momenta $\beta_a$, $\mathbb{P}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, the Grassmann momentum $\mathbb{P}_\theta^i$ and the helicities $\lambda_1$, $\lambda_2$, $\lambda_3$, \begin{eqnarray} \label{04092019-man-06} && p_{\scriptscriptstyle [3]}^- = p_{\lambda_1\lambda_2\lambda_3}^-(\mathbb{P}^{\scriptscriptstyle R},\mathbb{P}^{\scriptscriptstyle L},\mathbb{P}_\theta, \beta_a)\,, \qquad q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = q_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}(\mathbb{P}^{\scriptscriptstyle R},\mathbb{P}^{\scriptscriptstyle L},\mathbb{P}_\theta, \beta_a)\,, \quad \nonumber\\ && j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = j_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}(\mathbb{P}^{\scriptscriptstyle R},\mathbb{P}^{\scriptscriptstyle L},\mathbb{P}_\theta, \beta_a)\,. \end{eqnarray} Now, restricting to the value $n=3$, we represent kinematical symmetry equations obtained in the previous section in terms of densities \rf{04092019-man-06}. \noindent {\bf $J^{+-}$-symmetry equations}: Using \rf{04092019-man-06}, we find that, for $n=3$, equations \rf{03092019-man-19}-\rf{03092019-man-21} can be represented as \begin{eqnarray} \label{04092019-man-07} && \big({\bf J}^{+-} - \frac{1}{4}{\cal N} \big) p_{\scriptscriptstyle [3]}^- = 0\,, \\ \label{04092019-man-08} && \big({\bf J}^{+-} - \frac{1}{4}({\cal N}+2) \big) q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = 0\,, \\ \label{04092019-man-09} && \big({\bf J}^{+-} - \frac{1}{4}{\cal N} \big) j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = 0\,, \end{eqnarray} where operator ${\bf J}^{+-}$ is defined as \begin{eqnarray} \label{04092019-man-10} && {\bf J}^{+-} \equiv N_{\mathbb{P}^{\scriptscriptstyle R}} + N_{\mathbb{P}^{\scriptscriptstyle L}}+ \frac{3}{2} N_{\mathbb{P}_\theta} + \sum_{a=1,2,3} (\beta_a \partial_{\beta_a} + \frac{1}{2} e_{\lambda_a})\,, \\ \label{04092019-man-11} && N_{\mathbb{P}^{\scriptscriptstyle R}} \equiv \mathbb{P}^{\scriptscriptstyle R}\partial_{\mathbb{P}^{\scriptscriptstyle R}}\,, \hspace{1cm} N_{\mathbb{P}^{\scriptscriptstyle L}} \equiv \mathbb{P}^{\scriptscriptstyle L} \partial_{\mathbb{P}^{\scriptscriptstyle L}}\,, \hspace{1cm} N_{\mathbb{P}_\theta} \equiv \mathbb{P}_\theta^i \partial_{\mathbb{P}_\theta^i}\,.\qquad \end{eqnarray} \noindent {\bf $J^{{\scriptscriptstyle R}{\scriptscriptstyle L}}$-symmetry equations}: Using \rf{04092019-man-06}, we find that, for $n=3$, equations \rf{03092019-man-22}-\rf{03092019-man-27} can be represented as \begin{eqnarray} \label{04092019-man-12} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{1}{4}{\cal N}\big) p_{\scriptscriptstyle [3]}^- = 0\,, \\ \label{04092019-man-13} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{1}{4}({\cal N}-2)\big) q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\, i} = 0\,, \\ \label{04092019-man-14} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{1}{4}({\cal N}+2)\big) q_{i\,{\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} = 0\,, \\ \label{04092019-man-15} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{1}{4}({\cal N}-4)\big) j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}} = 0\,, \\ \label{04092019-man-16} && \big({\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{1}{4}({\cal N}+4)\big) j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle L}} = 0\,, \end{eqnarray} where operator ${\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}}$ is defined as \begin{equation} \label{04092019-man-17} {\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} \equiv N_{\mathbb{P}^{\scriptscriptstyle R}} - N_{\mathbb{P}^{\scriptscriptstyle L}} - \frac{1}{2} N_{\mathbb{P}_\theta} +{\bf M}_\lambda\,, \qquad {\bf M}_\lambda \equiv \sum_{a=1,2,3} \lambda_a\,, \end{equation} and we use the notation in \rf{04092019-man-11}. \noindent {\bf $J^i{}_j$-symmetry equations}. Using \rf{04092019-man-06}, we find that, for $n=3$, equations \rf{03092019-man-28}-\rf{03092019-man-31} can be represented as \begin{eqnarray} \label{04092019-man-18} && {\bf J}^i{}_j g_{\scriptscriptstyle [3]} = 0\,, \hspace{1cm} \hbox{ for } \quad g_{\scriptscriptstyle [3]} = p_{\scriptscriptstyle [3]}^-\,, \,\, j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L} }\,, \\ \label{04092019-man-19} && {\bf J}^i{}_j q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\, l} = \delta_j^l q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\, i} -\frac{1}{{\cal N}}\delta_j^i q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\, l}\,, \\ \label{04092019-man-20} && {\bf J}^i{}_j q_{l\,{\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} = - \delta_l^i q_{j\,{\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} + \frac{1}{{\cal N}}\delta_j^i q_{l\,{\scriptscriptstyle [3]} }^{-{\scriptscriptstyle L}}\,, \end{eqnarray} where operator ${\bf J}^i{}_j$ is defined as \begin{equation} \label{04092019-man-21} {\bf J}^i{}_j \equiv \mathbb{P}_\theta^i \partial_{\mathbb{P}_\theta^j} - \frac{1}{{\cal N}} \delta_j^i \mathbb{P}_\theta^k \partial_{\mathbb{P}_\theta^k}\,. \end{equation} We now proceed with studying the restrictions imposed by dynamical symmetries. \noindent {\bf Dynamical symmetries of cubic densities}. Constraints on the cubic densities imposed by (anti) commutators between the dynamical generators are referred to as dynamical symmetry constraints. This is to say that the (anti)commutators to be considered are given by \begin{eqnarray} \label{04092019-man-22} && [P^-,J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}]=0\,, \hspace{2.3cm} [P^-,Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}]=0\,, \\ \label{04092019-man-23} && [J^{-{\scriptscriptstyle R}},J^{-{\scriptscriptstyle L}}]=0\,, \hspace{2.4cm} [Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}},J^{-{\scriptscriptstyle L}}]=0\,, \hspace{1.2cm} [Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}},J^{-{\scriptscriptstyle R}}]=0\,, \\ \label{04092019-man-24} && \{Q^{-{\scriptscriptstyle R} i},Q_j^{-{\scriptscriptstyle L}} \} = - \delta_j^i P^-\,,\hspace{1cm} \{Q^{-{\scriptscriptstyle R} i},Q^{-{\scriptscriptstyle R} j} \} = 0\,, \hspace{1cm} \{ Q_i^{-{\scriptscriptstyle L}},Q_j^{-{\scriptscriptstyle L}} \} = 0\,.\qquad \end{eqnarray} First, we consider the commutators in \rf{04092019-man-22}. In the cubic approximation, the commutators \rf{04092019-man-22} take the form \begin{equation} \label{04092019-man-25} [P_{\scriptscriptstyle [2]}^- ,J_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}}] + [P_{\scriptscriptstyle [3]}^-,J_{\scriptscriptstyle [2]}^{-{\scriptscriptstyle R}}]=0\,, \qquad [P_{\scriptscriptstyle [2]}^-,Q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}] + [P_{\scriptscriptstyle [3]}^-, Q_{\scriptscriptstyle [2]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}]=0\,. \end{equation} We verify that equations \rf{04092019-man-25} allow us to express the densities $q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ and $j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ in terms of the cubic vertex $p_{\scriptscriptstyle [3]}^-$ in the following way: \begin{eqnarray} \label{04092019-man-26} && q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R} i} = - \frac{\mathbb{P}_\theta^i}{\mathbb{P}^{\scriptscriptstyle L}} p_{\scriptscriptstyle [3]}^- \,, \hspace{2.2cm} q_{i {\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} = \frac{\beta}{\mathbb{P}^{\scriptscriptstyle R}} \partial_{\mathbb{P}_\theta^i} p_{\scriptscriptstyle [3]}^- \,, \\ \label{04092019-man-27} && j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}} = -\frac{\beta}{ \mathbb{P}^{\scriptscriptstyle R} \mathbb{P}^{\scriptscriptstyle L} } {\bf J}^{-{\scriptscriptstyle R}} p_{\scriptscriptstyle [3]}^- \,, \hspace{1cm} j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle L}} = -\frac{\beta}{ \mathbb{P}^{\scriptscriptstyle R} \mathbb{P}^{\scriptscriptstyle L} } {\bf J}^{-{\scriptscriptstyle L}} p_{\scriptscriptstyle [3]}^- \,, \end{eqnarray} where operators ${\bf J}^{-{\scriptscriptstyle R}}$, ${\bf J}^{-{\scriptscriptstyle L}}$ are defined as \begin{eqnarray} \label{04092019-man-28} && {\bf J}^{-{\scriptscriptstyle R}} = \frac{\mathbb{P}^{\scriptscriptstyle R}}{\beta} \big( -\mathbb{N}_\beta + \mathbb{M}_\lambda - \frac{1}{2} \mathbb{E}_\lambda\big)\,, \\ \label{04092019-man-29} && {\bf J}^{-{\scriptscriptstyle L}} = \frac{\mathbb{P}^{\scriptscriptstyle L}}{\beta} \big(-\mathbb{N}_\beta - \mathbb{M}_\lambda - \frac{1}{2} \mathbb{E}_\lambda \big)\,, \\ \label{04092019-man-30} && \hspace{1.3cm} \mathbb{N}_\beta = \frac{1}{3}\sum_{a=1,2,3}\check{\beta}_a \beta_a\partial_{\beta_a}\,, \hspace{1cm} \beta \equiv \beta_1\beta_2\beta_3\,, \\ \label{04092019-man-31} && \hspace{1.3cm} \mathbb{M}_\lambda = \frac{1}{3}\sum_{a=1,2,3}\check{\beta}_a\lambda_a\,,\hspace{1cm} \mathbb{E}_\lambda = \frac{1}{3}\sum_{a=1,2,3}\check{\beta}_a e_{\lambda_a}\,, \end{eqnarray} while the symbol $e_\lambda$ entering \rf{04092019-man-31} is given in \rf{02092019-man-07}. Second, we verify that, if the dynamical symmetry equations for all densities \rf{04092019-man-26},\rf{04092019-man-27} and kinematical symmetry equations for the cubic vertex $p_{\scriptscriptstyle [3]}^-$ \rf{04092019-man-07},\rf{04092019-man-12},\rf{04092019-man-18} are satisfied, then all kinematical symmetry equations for the densities $q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ are satisfied automatically. Third, we verify that, if the dynamical symmetry equations for all densities \rf{04092019-man-26},\rf{04092019-man-27} are satisfied, then all the dynamical symmetry equations obtained from (anti)commutators \rf{04092019-man-23},\rf{04092019-man-24} are satisfied automatically. Thus, we see that the kinematical and dynamical symmetry constraints for cubic densities amount to equations for densities \rf{04092019-man-26},\rf{04092019-man-27} and equations for the cubic vertex $p_{\scriptscriptstyle [3]}^-$ \rf{04092019-man-07},\rf{04092019-man-12},\rf{04092019-man-18}. Equations \rf{04092019-man-26},\rf{04092019-man-27} and \rf{04092019-man-07},\rf{04092019-man-12},\rf{04092019-man-18} do not allow us to fix the cubic densities unambiguously. To determine the cubic densities unambiguously we need some additional requirement. We refer to such requirement as light-cone dynamical principle. \noindent {\bf Light-cone gauge dynamical principle}. We formulate the light-cone gauge dynamical principle in the following way: \noindent {\bf i}) Cubic densities $p_{\scriptscriptstyle [3]}^-$, $q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ should be polynomial in the momenta $\mathbb{P}^{\scriptscriptstyle R}$, $\mathbb{P}^{\scriptscriptstyle L}$; \noindent {\bf ii}) Cubic vertex $p_{\scriptscriptstyle [3]}^-$ should respect the following constraint: \begin{equation} \label{04092019-man-32} p_{\scriptscriptstyle [3]}^- \ne \mathbb{P}^{\scriptscriptstyle R}\mathbb{P}^{\scriptscriptstyle L} W\,, \quad W \ \hbox{is polynomial in } \mathbb{P}^{\scriptscriptstyle R},\mathbb{P}^{\scriptscriptstyle L}\,. \end{equation} For the reader convenience, we note that the requirement in {\bf i}) is simply the light-cone counterpart of locality condition which is commonly used in Lorentz covariant formulations. We now comment on the constraint \rf{04092019-man-32}. As is well known, upon field redefinitions, the cubic vertex $p_{\scriptscriptstyle [3]}^-$ for massless fields is changed by terms proportional to $\mathbb{P}^{\scriptscriptstyle R} \mathbb{P}^{\scriptscriptstyle L}$ (see, e.g., the discussion in Appendix B, in Ref.\cite{Metsaev:2005ar}). This implies that all cubic vertices that are proportional to $\mathbb{P}^{\scriptscriptstyle R} \mathbb{P}^{\scriptscriptstyle L}$ can be removed by using field redefinitions. As cubic vertices $p_{\scriptscriptstyle [3]}^-$ that can be removed by exploiting field redefinitions are out of our interest, we use the constraint \rf{04092019-man-32}. \noindent {\bf Complete system of equations for cubic vertex}. We now present all equations we obtained for the cubic vertex. Namely, for the cubic vertex \begin{equation} \label{04092019-man-33} p_{\scriptscriptstyle [3]}^- = p_{\lambda_1\lambda_2\lambda_3}^-(\mathbb{P}^{\scriptscriptstyle R},\mathbb{P}^{\scriptscriptstyle L},\mathbb{P}_\theta, \beta_a)\,, \end{equation} we found the following complete system of equations: \begin{eqnarray} && \hbox{\it Kinematical} \quad J^{+-}, \quad J^{{\scriptscriptstyle R}{\scriptscriptstyle L}},\ \hbox{\it and} \ J^i{}_j \ \hbox{\it symmetries}; \nonumber\\ \label{04092019-man-34} && \big( {\bf J}^{+-} - \frac{{\cal N}}{4} \big) p_{\scriptscriptstyle [3]}^- =0 \,, \hspace{2.5cm} \big( {\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}} + \frac{{\cal N}}{4} \big)p_{\scriptscriptstyle [3]}^- = 0\,, \hspace{1cm} {\bf J}^i{}_j p_{\scriptscriptstyle [3]}^- = 0\,, \\ && \hbox{\it Dynamical} \quad P^-,\quad Q^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}, \ \hbox{\it and} \ J^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} \hbox{\it symmetries} \nonumber\\ \label{04092019-man-35} && q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R}\,i} = - \frac{\mathbb{P}_\theta^i}{\mathbb{P}^{\scriptscriptstyle L}} p_{\scriptscriptstyle [3]}^-\,, \hspace{1.8cm} q_{i{\scriptscriptstyle [3]}}^{-{\scriptscriptstyle L}} = \frac{\beta}{\mathbb{P}^{\scriptscriptstyle R}} \partial_{\mathbb{P}_\theta^i} p_{\scriptscriptstyle [3]}^-\,, \hspace{1.8cm} j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} = - \frac{\beta}{\mathbb{P}^{\scriptscriptstyle R}\mathbb{P}^{\scriptscriptstyle L}}{\bf J}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} p_{\scriptscriptstyle [3]}^- \,,\hspace{2cm} \\ &&\hbox{ \it Light-cone gauge dynamical principle:} \nonumber\\ \label{04092019-man-36} && p_{\scriptscriptstyle [3]}^-\,, \ q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}\,, \ j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}} \hspace{0.5cm} \hbox{ are polynomial in } \mathbb{P}^{\scriptscriptstyle R}, \mathbb{P}^{\scriptscriptstyle L}; \\ \label{04092019-man-37} && p_{\scriptscriptstyle [3]}^- \ne \mathbb{P}^{\scriptscriptstyle R}\mathbb{P}^{\scriptscriptstyle L} W, \hspace{1cm} W \hbox{ is polynomial in } \mathbb{P}^{\scriptscriptstyle R}, \mathbb{P}^{\scriptscriptstyle L}; \qquad \end{eqnarray} where operators ${\bf J}^{+-}$, ${\bf J}^{{\scriptscriptstyle R}{\scriptscriptstyle L}}$, ${\bf J}^i{}_j$, and ${\bf J}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ are given in \rf{04092019-man-10},\rf{04092019-man-17},\rf{04092019-man-21} and \rf{04092019-man-28},\rf{04092019-man-29} respectively. To conclude this Section, it is the equations given in \rf{04092019-man-34}-\rf{04092019-man-37} that constitute the complete system of equations which allow us to fix the cubic densities $p_{\scriptscriptstyle [3]}^-$, $q_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $j_{\scriptscriptstyle [3]}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ unambiguously. As a side of remark we note that by applying our complete system of equations to supersymmetric Yang-Mills and supergravity theories with extended supersymmetries, we verify that the complete system equations allows us to fix the cubic interactions of those supersymmetric theories unambiguously (up to coupling constants). We think therefore that it is worthwhile to apply our complete system of equations to study the cubic vertices of arbitrary spin ${\cal N}$-extended supersymmetric theories. \newsection{ \large Cubic interaction vertices } \label{sec-05} Now we present the solution to our complete system of equations presented in \rf{04092019-man-34}-\rf{04092019-man-37}. Some details of solving these equations may be in Appendix C. This is to say that the general solution for the cubic vertex $p_{\lambda_1\lambda_2\lambda_3}^-$, the supercharges $q_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$, and the angular momenta $j_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R},{\scriptscriptstyle L}}$ is given by \begin{eqnarray} \label{05092019-man-01} && p_{\lambda_1\lambda_2\lambda_3}^- = V_{\lambda_1\lambda_2\lambda_3} + {\bar{V}}_{\lambda_1\lambda_2\lambda_3}\,, \\ \label{05092019-man-02} && \hspace{1.5cm} V_{\lambda_1\lambda_2\lambda_3} = C^{\lambda_1\lambda_2\lambda_3} (\mathbb{P}^{\scriptscriptstyle L})^{\frac{1}{4}{\cal N} + {\bf M}_\lambda } \prod_{a=1,2,3} \beta_a^{-\lambda_a - \frac{1}{2} e_{\lambda_a}}\,, \\ \label{05092019-man-03} && \hspace{1.5cm} {\bar{V}}_{\lambda_1\lambda_2\lambda_3} = {\bar{C}}^{\lambda_1\lambda_2\lambda_3} (\mathbb{P}^{\scriptscriptstyle R})^{ \frac{1}{4}{\cal N} - {\bf M}_\lambda }\, (\varepsilon \mathbb{P}_\theta^{\cal N}) \prod_{a=1,2,3} \beta_a^{ \lambda_a - \frac{1}{2} {\cal N} - \frac{1}{2} e_{\lambda_a}}\,, \\ \label{05092019-man-04} && q_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R}\, i} = - C^{\lambda_1\lambda_2\lambda_3} (\mathbb{P}^{\scriptscriptstyle L})^{\frac{1}{4}{\cal N} + {\bf M}_\lambda -1}\, \mathbb{P}_\theta^i \prod_{a=1,2,3} \beta_a^{-\lambda_a - \frac{1}{2} e_{\lambda_a} }\,, \\ \label{05092019-man-05} && q_{i;\,\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle L}} = {\bar{C}}^{\lambda_1\lambda_2\lambda_3} (\mathbb{P}^{\scriptscriptstyle R})^{\frac{1}{4}{\cal N} - {\bf M}_\lambda - 1} (\varepsilon \mathbb{P}_\theta^{{\cal N}-1})_i \prod_{a=1,2,3} \beta_a^{\lambda_a + 1 - \frac{1}{2} {\cal N} - \frac{1}{2} e_{\lambda_a}}\,, \\ \label{05092019-man-06} && j_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle R}} = - 2 C^{\lambda_1\lambda_2\lambda_3} \mathbb{M}_\lambda (\mathbb{P}^{\scriptscriptstyle L})^{\frac{1}{4}{\cal N} + {\bf M}_\lambda-1} \prod_{a=1,2,3} \beta_a^{-\lambda_a - \frac{1}{2} e_{\lambda_a}}\,, \\ \label{05092019-man-07} && j_{\lambda_1\lambda_2\lambda_3}^{-{\scriptscriptstyle L}} = 2{\bar{C}}^{\lambda_1\lambda_2\lambda_3} \mathbb{M}_\lambda (\mathbb{P}^{\scriptscriptstyle R})^{\frac{1}{4}{\cal N} - {\bf M}_\lambda- 1 }\, (\varepsilon\mathbb{P}_\theta^{\cal N}) \prod_{a=1,2,3} \beta_a^{\lambda_a - \frac{1}{2} {\cal N} - \frac{1}{2} e_{\lambda_a}}\,,\qquad \end{eqnarray} where we use the notation \begin{eqnarray} \label{05092019-man-08} && {\bf M}_\lambda = \sum_{a=1,2,3}\lambda_a\,, \hspace{1cm} \mathbb{M}_\lambda = \frac{1}{3}\sum_{a=1,2,3}\check{\beta}_a \lambda_a\,, \\ \label{05092019-man-09} && (\varepsilon \mathbb{P}_\theta^{\cal N}) \equiv \frac{1}{{\cal N}!} \varepsilon_{i_1\ldots i_{\cal N}} \mathbb{P}_\theta^{i_1} \ldots \mathbb{P}_\theta^{i_{\cal N}}\,, \hspace{1cm} (\varepsilon \mathbb{P}_\theta^{{\cal N}-1})_i \equiv \frac{1}{({\cal N}-1)!} \varepsilon_{i i_2\ldots i_{\cal N}} \mathbb{P}_\theta^{i_2} \ldots \mathbb{P}_\theta^{i_{\cal N}}\,. \qquad \end{eqnarray} Definition of the symbol $e_\lambda$ and momenta $\mathbb{P}^{{\scriptscriptstyle R},{\scriptscriptstyle L}}$, $\mathbb{P}_\theta^i$, $\check{\beta}_a$ may be found in \rf{02092019-man-07} and \rf{04092019-man-04} respectively, while quantity $\varepsilon_{i_1\ldots \ldots i_{\cal N}}$ is the Levy-Civita symbol of the $su({\cal N})$ algebra, $\varepsilon_{1\ldots \ldots {\cal N}}=1$. Quantities $C^{\lambda_1\lambda_2\lambda_3}$, ${\bar{C}}^{\lambda_1\lambda_2\lambda_3}$ entering our solution \rf{05092019-man-01}-\rf{05092019-man-07} are coupling constants. In general, these coupling constants depend on the helicities $\lambda_1$, $\lambda_2$, $\lambda_3$. The coupling constants are nontrivial for the following values of ${\cal N}$ and the superfield helicities $\lambda_1$, $\lambda_2$, $\lambda_3$: \begin{eqnarray} \label{05092019-man-10} && C^{\lambda_1\lambda_2\lambda_3} \ne 0\,, \hspace{ 1cm } \hbox{ for } \quad \frac{1}{4}{\cal N}+ {\bf M}_\lambda -1 \geq 0 \,, \hspace{1cm} {\bf M}_\lambda \in \mathbb{Z}\,; \\ \label{05092019-man-11} && {\bar{C}}^{\lambda_1\lambda_2\lambda_3 } \ne 0 \,, \hspace{ 1cm } \hbox{ for }\quad \frac{1}{4}{\cal N} - {\bf M}_\lambda - 1 \geq 0\,, \hspace{1cm} {\bf M}_\lambda \in \mathbb{Z}\,; \\ \label{05092019-man-12} && C^{\lambda_1\lambda_2\lambda_3 *} = (-)^{{\bf M}_\lambda} {\bar{C}}^{-\lambda_1-\lambda_2-\lambda_3} \,, \end{eqnarray} where, in \rf{05092019-man-12}, the asterisk implies complex conjugation. We make comments on the constraints for the coupling constants presented in \rf{05092019-man-10} -\rf{05092019-man-12}. \noindent {\bf i}) Constraint on $C^{\lambda_1\lambda_2\lambda_3}$ and first constraint on ${\bf M}_\lambda$ and ${\cal N}$ in \rf{05092019-man-10} are obtainable from the requirement the densities \rf{05092019-man-02},\rf{05092019-man-04}, and \rf{05092019-man-06} to be polynomial in the momentum $\mathbb{P}^{\scriptscriptstyle L}$. Accordingly, constraint on ${\bar{C}}^{\lambda_1\lambda_2\lambda_3}$ and first constraint on ${\bf M}_\lambda$ and ${\cal N}$ in \rf{05092019-man-11} are obtainable from the requirement the densities \rf{05092019-man-03},\rf{05092019-man-05}, and \rf{05092019-man-07} to be polynomial in the momentum $\mathbb{P}^{\scriptscriptstyle R}$. \noindent {\bf ii}) Constraint ${\bf M}_\lambda\in \mathbb{Z}$ in \rf{05092019-man-10},\rf{05092019-man-11} is simply obtained from the one in \rf{03092019-man-34} when $n=3$. \noindent {\bf iii}) Constraint on the coupling constants in \rf{05092019-man-12} is obtained from the requirement the cubic Hamiltonian $P_{\scriptscriptstyle [3]}^-$ to be hermitian. This constraint can straightforwardly be derived by using relation \rf{07092019-man-13} in Appendix B. To summarize, relations \rf{05092019-man-10}-\rf{05092019-man-12} give the classification of cubic interaction vertices for ${\cal N}$-extended massless arbitrary spin supermultiplets, while expressions \rf{05092019-man-01}-\rf{05092019-man-03} give the momentum superspace representation for these vertices. \noindent {\bf Cubic interaction vertices in terms of component fields}. For the reader convenience, we now present cubic vertices in terms of the component fields. To this end we focus on interaction of three superfields $\Phi_{\lambda_1}^*$, $\Phi_{\lambda_2}^*$, $\Phi_{\lambda_3}^*$ and represent the cubic Hamiltonian in the following way: \begin{eqnarray} \label{05092019-man-12-ad01} && P_{\scriptscriptstyle [3]}^-(\Phi_{\lambda_1},\Phi_{\lambda_2}\Phi_{\lambda_3}) = \int d\Gamma_{\scriptscriptstyle [3]}^p\,\, C^{\lambda_1\lambda_2\lambda_3} {\bf V}^{ \Phi_{\lambda_1},\Phi_{\lambda_2}\Phi_{\lambda_3} } + h.c.\,, \\ \label{05092019-man-12-ad02} && C^{\lambda_1\lambda_2\lambda_3} {\bf V}^{\Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} } \equiv \int d\Gamma_{\scriptscriptstyle [3]}^{p_\theta}\,\, \Phi_{\lambda_1\lambda_2\lambda_3}^* V_{\lambda_1\lambda_2\lambda_3}\,, \end{eqnarray} where expressions for $d\Gamma_{\scriptscriptstyle [3]}^p$, $d\Gamma_{\scriptscriptstyle [3]}^{p_\theta}$ are obtainable by setting $n=3$ in \rf{03092019-man-08},\rf{03092019-man-09}. It is the vertex ${\bf V}^{\scriptscriptstyle\Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} }$ \rf{05092019-man-12-ad02} that provides us the representation in terms of the component fields. To get explicit representation of ${\bf V}^{\scriptscriptstyle\Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} }$ in terms of component fields \rf{02092019-man-05} we plug \rf{05092019-man-02} into \rf{05092019-man-12-ad02} and use the representation for $\Phi_\lambda^*$ in terms of the component fields given in \rf{07092019-man-01-ad01}. Doing so, we get \begin{eqnarray} \label{05092019-man-12-ad03} && {\bf V}^{\Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} } = \sum_{q_1,q_2,q_3=0\atop q_1+q_2+q_3={\cal N}}^{\cal N} C_{i(q_1)i(q_2)i(q_3)} V_{i(q_1)i(q_2)i(q_3)}^{\Lambda_1\Lambda_2\Lambda_3 }\,, \qquad \\ \label{05092019-man-12-ad03-x1} && \hspace{3cm} V_{i(q_1)i(q_2)i(q_3)}^{\Lambda_1\Lambda_2\Lambda_3 } \equiv (\mathbb{P}^{\scriptscriptstyle L})^{\Lambda_1+ \Lambda_2+ \Lambda_3} \prod_{a=1,2,3} \phi_{\Lambda_a; i(q_a)}^\dagger(p_a) \beta_a^{-\Lambda_a - \frac{1}{2} e_{\Lambda_a}}\,, \qquad \end{eqnarray} where we use the notation \begin{eqnarray} \label{05092019-man-12-ad04} && \Lambda_a \equiv \lambda_a -\frac{q_a}{2} + \frac{{\cal N}}{4}\,, \hspace{1cm} a=1,2,3\,, \\ \label{05092019-man-12-ad05} && C_{ i(q_1)i(q_2)i(q_3)} \equiv \frac{\omega_{q_1q_2q_3}}{q_1!q_2!q_3!}\int d\Gamma_{\scriptscriptstyle [3]}^{p_\theta} (\varepsilon p_{\theta_1}^{{\cal N}-q_1})_{i(q_1)} (\varepsilon p_{\theta_2}^{{\cal N}-q_2})_{i(q_2)} (\varepsilon p_{\theta_3}^{{\cal N}-q_3})_{i(q_3)}\,, \\ \label{05092019-man-12-ad06} && \hspace{2.2cm} \omega_{q_1q_2q_3} \equiv (-)^{e_{\lambda_1-\frac{q_1}{2} } e_{\frac{q_1}{2}} + \, e_{\lambda_3-\frac{q_3}{2} } e_{\frac{q_2}{2}} } \,. \end{eqnarray} In \rf{05092019-man-12-ad03}-\rf{05092019-man-12-ad05}, shortcut $i(q_a)$ stands for the $su({\cal N})$ algebra tensor indices $i_1^a\ldots i_{q_a}^a$, while the quantities $(\varepsilon p_\theta^{{\cal N}-q})_{i(q)}$ appearing in \rf{05092019-man-12-ad05} are defined in \rf{06092019-man-18-ad01}. Also note that, in \rf{05092019-man-12-ad03}, the summation runs over those values of $q_1,q_2,q_3=0,1,\ldots,{\cal N}$ which satisfy the restriction $q_1+q_2+q_3={\cal N}$. Such restriction is appearing in view of \begin{equation} \label{05092019-man-12-ad07} C_{ i(q_1)i(q_2)i(q_3)} \ne 0 \ \ \ \hbox{ only for } \ \ \ \ q_1 + q_2+ q_3 = {\cal N}\,,\qquad 0 \leq q_a \leq {\cal N}\,, \quad a=1,2,3\,. \end{equation} From \rf{05092019-man-12-ad03}, we see that our generic vertex ${\bf V}^{\scriptscriptstyle\Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} }$ is decomposed into elementary vertices denoted by $V_{\scriptscriptstyle i(q_1)i(q_2)i(q_3)}^{\scriptscriptstyle\Lambda_1\Lambda_2\Lambda_3 }$ \rf{05092019-man-12-ad03-x1}. We note that the elementary vertex $V_{\scriptscriptstyle i(q_1)i(q_2)i(q_3)}^{\scriptscriptstyle\Lambda_1\Lambda_2\Lambda_3 }$ describes interaction of three component fields $\phi_{\scriptscriptstyle\Lambda_a; i(q_a)}^\dagger$, $a=1,2,3$, having the respective helicities $\Lambda_1$, $\Lambda_2$, and $\Lambda_3$ \noindent {\bf Internal symmetry}. Let us demonstrate the incorporation of internal symmetry in our model by considering the algebra $o({\sf N})$ as internal symmetry algebra. The internal symmetry can then be incorporated into our model as follows. First, in place of the superfields $\Phi_\lambda$, $\Phi_\lambda^*$, we use matrix-valued superfields $\Phi_\lambda^{{\sf a}{\sf b}}$, $\Phi_\lambda^{*{\sf a}{\sf b}}$, where indices ${\sf a},{\sf b}$ stand for matrix indices of the $o({\sf N})$ algebra, ${\sf a},{\sf b}=1,\ldots,{\sf N}$. By definition, our new matrix-valued superfields satisfy the following algebraic constraints \begin{equation} \label{05092019-man-13} \Phi_\lambda^{{\sf a}{\sf b}} = (-)^{\lambda + \frac{{\cal N}}{4} +\frac{1}{2} \eta_\lambda e_\lambda} \Phi_\lambda^{{\sf b}{\sf a}} \,, \qquad \Phi_\lambda^{*{\sf a}{\sf b}} = (-)^{\lambda + \frac{{\cal N}}{4} + \frac{1}{2} \eta_\lambda e_\lambda} \Phi_\lambda^{*{\sf b}{\sf a}} \,, \qquad \eta_\lambda^2 = 1\,,\quad \eta_{-\lambda} = - \eta_\lambda\,, \end{equation} where $e_\lambda$ is given in \rf{02092019-man-07}. It is easy to check that the constraints \rf{05092019-man-13} are consistent in view of the relation $(-)^{2\lambda +\frac{1}{2} {\cal N} + \eta_\lambda e_\lambda}=1$. Note that, in general, the sign of $\eta_\lambda$ may depend of ${\cal N}$. As in the case of the singlet superfields \rf{02092019-man-23}, the superfields $\Phi_\lambda^{{\sf a}{\sf b}}$ and $\Phi_\lambda^{*{\sf a}{\sf b}}$ are related as \begin{equation} \label{05092019-man-14} \Phi_{-\lambda}^{*{\sf a}{\sf b}}(-p,-p_\theta) = \Phi_\lambda^{{\sf a}{\sf b}}(p,p_\theta)\,. \end{equation} Second, in formulas for generators and the action \rf{02092019-man-24},\rf{02092019-man-27}, in place of $\Phi_\lambda^* \Phi_\lambda$, we use $\Phi_\lambda^{*{\sf a}{\sf b}} \Phi_\lambda^{{\sf a}{\sf b}}$, while, in the cubic vertices, in place of $\Phi_{\lambda_1}^* \Phi_{\lambda_2}^* \Phi_{\lambda_3}^*$, we use the expressions $\Phi_{\lambda_1}^{*{\sf a}{\sf b}} \Phi_{\lambda_2}^{*{\sf b}{\sf c}} \Phi_{\lambda_3}^{*{\sf c}{\sf a}}$. Third, (anti)commutator \rf{02092019-man-25} is represented as \begin{eqnarray} \label{05092019-man-15} && [\Phi_\lambda^{{\sf a}{\sf b}}(p,p_\theta), \Phi_{\lambda'}^{*{\sf a}'{\sf b}'}(p',p_\theta')]_\pm = \frac{1}{2} \beta^{- e_{\lambda+\frac{1}{2}}} \Pi_\lambda^{{\sf a}{\sf b},{\sf a}'{\sf b}'} \delta^3(p-p')\delta^{\cal N}(p_\theta-p_\theta') \delta_{\lambda,\lambda'}\,, \\ \label{05092019-man-16} && \Pi_\lambda^{{\sf a}{\sf b},{\sf a}'{\sf b}'} \equiv \frac{1}{2}\big( \delta^{{\sf a}\asf'} \delta^{{\sf b}\bsf'} + (-)^{\lambda + \frac{{\cal N}}{4} + \frac{1}{2} \eta_\lambda e_\lambda } \delta^{{\sf a}{\sf b}'} \delta^{{\sf b}{\sf a}'} \big)\,, \qquad \Pi_\lambda^{{\sf a}{\sf b},{\sf a}'{\sf b}'} \Pi_\lambda^{{\sf a}'{\sf b}',{\sf c}{\sf e}} = \Pi_\lambda^{{\sf a}{\sf b},{\sf c}{\sf e}}\,. \end{eqnarray} The following remarks are in order. \noindent {\bf i}) For $\lambda_1=0$, $\lambda_2=0$, $\lambda_3=0$, the vertex given in \rf{05092019-man-01} describes self-interacting scalar superfield $\Phi_0$ and such vertex has already been obtained in Ref.\cite{Bengtsson:1983pg}. Thus, our result for the cubic vertex $p_{\lambda_1\lambda_2\lambda_3}^-$ given in \rf{05092019-man-01} agrees with previously reported result related to the particular values $\lambda_1=0$, $\lambda_2=0$, $\lambda_3=0$, and provides expression for the cubic vertex $p_{\lambda_1\lambda_2\lambda_3}^-$ corresponding to arbitrary values of the superfield helicities $\lambda_1$, $\lambda_2$, $\lambda_3$. \noindent {\bf ii}) Our vertices ${\bf V}^{\scriptscriptstyle \Phi_{\lambda_1}\Phi_{\lambda_2}\Phi_{\lambda_3} }$ \rf{05092019-man-12-ad03} can be considered as a supersymmetric completion of cubic vertices for bosonic massless fields in the $4d$ flat space found in Ref.\cite{Bengtsson:1986kh}. We note however that a manifestly Lorentz covariant description of some light-cone gauge vertices presented in Ref.\cite{Bengtsson:1986kh} is not available so far. In Sec.6, in Ref.\cite{Metsaev:2019dqt}, we provided the detailed discussion of vertices in Ref.\cite{Bengtsson:1986kh} that can be translated into manifestly Lorentz covariant form. The reader interested in Lorentz covariant formulation of light-cone gauge vertices is invited to read Sec.6, in Ref.\cite{Metsaev:2019dqt}. \noindent {\bf iii}) Taking into account relations \rf{05092019-man-12-ad04}, the restrictions on $q_a$ in \rf{05092019-man-12-ad07} can entirely be represented in terms of $\lambda_a$ and $\Lambda_a$ as, \begin{eqnarray} \label{05092019-man-17} && \lambda_1+\lambda_2 +\lambda_3 = \Lambda_1+\Lambda_2 +\Lambda_3 - \frac{1}{4}{\cal N}\,, \\ \label{05092019-man-17-ad01} && \Lambda_a - \frac{1}{4}{\cal N} \leq \lambda_a \leq \Lambda_a + \frac{1}{4}{\cal N} \,, \qquad a =1,2,3\,. \end{eqnarray} Restrictions \rf{05092019-man-17},\rf{05092019-man-17-ad01} provide the classification of cubic interactions of the component fields which admit the supersymmetric completion. Namely, the cubic interactions of the three component fields having the helicities $\Lambda_1$, $\Lambda_2$, $\Lambda_3$ are described by the vertex in \rf{05092019-man-12-ad03-x1}. Restrictions \rf{05092019-man-17},\rf{05092019-man-17-ad01} tell us then which superfields $\Phi_\lambda$ are required for the supersymmetric completion of the vertex in \rf{05092019-man-12-ad03-x1}. Also, from restrictions \rf{05092019-man-17},\rf{05092019-man-17-ad01}, we can learn which vertices in \rf{05092019-man-12-ad03-x1} do not admit supersymmetric completion. For the reader convenience, we now illustrate the use of restrictions \rf{05092019-man-17},\rf{05092019-man-17-ad01}. To this end, for three spin-2 component fields, we consider cubic vertices of power $(\mathbb{P}^{\scriptscriptstyle L})^6$ in \rf{05092019-man-12-ad03-x1}. For spin-2 component fields, the helicities take values $\Lambda_a=\pm2$, $a=1,2,3$. From \rf{05092019-man-12-ad03-x1}, we see that, in order to get vertices of power $(\mathbb{P}^{\scriptscriptstyle L})^6$, we should choose $\Lambda_1=\Lambda_2=\Lambda_3=2$. Plugging $\Lambda_a=2$, $a=1,2,3$, into \rf{05092019-man-17},\rf{05092019-man-17-ad01}, we obtain the restrictions \begin{eqnarray} \label{05092019-man-17-ad02} && \lambda_1+\lambda_2 +\lambda_3 = 6 - \frac{1}{4}{\cal N}\,, \\ \label{05092019-man-17-ad03} && 2 - \frac{1}{4}{\cal N} \leq \lambda_a \leq 2 + \frac{1}{4}{\cal N} \,, \qquad a =1,2,3\,. \end{eqnarray} To explore further our illustrative example, we apply the restrictions \rf{05092019-man-17-ad02},\rf{05092019-man-17-ad03} to ${\cal N}=8$ supergravity. We recall that ${\cal N}=8$ supergravity is described by the superfield $\Phi_\lambda$ with $\lambda=0$. Plugging ${\cal N}=8$, $\lambda_1=\lambda_2=\lambda_3=0$ into \rf{05092019-man-17-ad02}, we see that the restriction \rf{05092019-man-17-ad02} is not satisfied. So, on the one hand, using \rf{05092019-man-17-ad02},\rf{05092019-man-17-ad03}, we are led to the well known statement: supersymmetries of ${\cal N}=8$ supergravity do not admit supersymmetric completion of bosonic $R^3$-terms, where $R$ stands for the Riemann curvature tensor. On the other hand, using \rf{05092019-man-17-ad02},\rf{05092019-man-17-ad03}, we can find superfields $\Phi_\lambda$ required for supersymmetric completion of the vertex of power $(\mathbb{P}^{\scriptscriptstyle L})^6$ for the three spin-2 fields. Obviously, to this end we should go beyond ${\cal N}=8$ supergravity. Namely considering, for example, the particular case of the superfields $\Phi_{\lambda_a}$, with $\lambda_1=6-\frac{1}{4}{\cal N}$, $\lambda_2=\lambda_3=0$, and ${\cal N}\geq 8$, we verify that restrictions \rf{05092019-man-17-ad02},\rf{05092019-man-17-ad03} are satisfied. \noindent {\bf Conjecture for coupling constants of ${\cal N}$-extended supersymmetric theory}. Let us set $\Phi_\lambda=0$ for $\lambda\in \mathbb{Z}+\frac{1}{2}$ in \rf{02092019-man-12},\rf{02092019-man-12-a01} and consider ${\cal N}$-extended supersymmetric model described by superfields $\Phi_\lambda$ with all $\lambda \in \mathbb{Z}$. Using \rf{05092019-man-17}, we note that, if we choose the following solution for the cubic couplings constants: \begin{equation} \label{05092019-man-18} C^{\lambda_1\lambda_2\lambda_3} = gk^{\lambda_1+\lambda_2 +\lambda_3 +\frac{1}{4}{\cal N}} \big/(\lambda_1+\lambda_2 +\lambda_3 +\frac{1}{4}{\cal N} - 1)!\,, \end{equation} then, in terms of the helicities $\Lambda_a$ of the component fields appearing in \rf{05092019-man-12-ad03}, we get the relation \begin{equation} \label{05092019-man-19} C^{\lambda_1\lambda_2\lambda_3} = gk^{\Lambda_1+\Lambda_2+\Lambda_3} \big/(\Lambda_1+\Lambda_2 +\Lambda_3 -1)!\,\,. \end{equation} In \rf{05092019-man-18},\rf{05092019-man-19}, the $g$ is a dimensionless coupling constant, while the $k$ is a dimensionful parameter. Relation for coupling constants \rf{05092019-man-19} coincides with one found in Refs.\cite{Metsaev:1991mt,Metsaev:1991nb} for bosonic theories of higher-spin fields. Thus, we see that for bosonic truncation of our ${\cal N}$-extended supersymmetric model, solution given in \rf{05092019-man-18} coincides with the one in Refs.\cite{Metsaev:1991mt,Metsaev:1991nb}. Taking this into account, we then conjecture that generalization of our solution for coupling constants in Refs.\cite{Metsaev:1991mt,Metsaev:1991nb} to the case of ${\cal N}$-extended supersymmetric model is given by the relation in \rf{05092019-man-18}.% \footnote{ Solution for the cubic coupling constants \rf{05092019-man-19} of bosonic higher-spin theories was found in Refs.\cite{Metsaev:1991mt,Metsaev:1991nb} by analyzing the quartic approximation. In order to prove our conjecture for the cubic coupling constants \rf{05092019-man-18} one needs to extend analysis of cubic approximation in this paper to the quartic approximation for the ${\cal N}$-extended supersymmetric higher-spin theories. As a side remark we note that, taking into account relation \rf{05092019-man-17}, it is easy to see that the solution \rf{05092019-man-18} is unique solution that leads to \rf{05092019-man-19}.} Also one can conjecture that solution \rf{05092019-man-18} supplemented by the constraint ${\bar{C}}^{\lambda_1\lambda_2\lambda_3}=0$ provides ${\cal N}$-extended supersymmetric generalization of the bosonic higher-spin chiral model in Ref.\cite{Ponomarev:2016lrm}. \newsection{ \large Conclusions}\label{concl} In this paper, we generalized our previous study of ${\cal N}=1$ massless arbitrary spin supermultiplets in the flat $4d$ space in Ref.\cite{Metsaev:2019dqt} to the case of ${\cal N}$-extended massless arbitrary spin supermultiplets, ${\cal N}=4\mathbb{N}$. For the ${\cal N}$-extended massless supermultiplets, we built unconstrained superfields and used such superfields to develop the light-cone gauge superspace formulation. We used our light-cone gauge superfield formulation to get full list of the cubic interaction vertices for ${\cal N}$-extended massless arbitrary spin (integer and half-integer) supermultiplets. We obtained restrictions on the values of ${\cal N}$ and helicities of superfields which provide the complete classification of cubic vertices for the ${\cal N}$-extended massless supermultiplets studied in this paper. We note also that our treatment of light-cone gauge superfields provides us the attractively simple superspace representation for the cubic interaction vertices. Now we would like to discuss potentially interesting generalizations and applications of our study. \medskip \noindent {\bf i}) Perhaps most interesting generalization of our results in this paper is related to the light-cone gauge higher-spin field theory in AdS. Light-cone gauge formulation of interacting higher-spin massless fields in $AdS_4$ space has recently been developed in Ref.\cite{Metsaev:2018xip}. Namely, in Ref.\cite{Metsaev:2018xip}, we demonstrated that the flat space cubic bosonic vertices obtained in Ref.\cite{Bengtsson:1986kh} enter as building blocks into AdS cubic bosonic vertices. We expect therefore that results, methods, and approaches in this paper and in Ref.\cite{Metsaev:2018xip} will have interesting applications for studying light-cone gauge ${\cal N}$-extended supersymmetric theories in $AdS_4$ space. For example, in Ref.\cite{Metsaev:2018xip}, we shown that the flat light-cone gauge bosonic vertices are in one-to-one correspondence with the AdS light-cone gauge bosonic vertices. For supersymmetric light-cone gauge flat and AdS cubic vertices, we also expect the one-to-one correspondence. This implies then that our classification for the ${\cal N}$-extended flat cubic vertices obtained in this paper provides immediately the classification for ${\cal N}$-extended AdS cubic vertices.% \footnote{ We think that results in this paper might also have interesting applications for the studying supersymmetric extension of the conjectured non-local higher-spin field theories in flat space discussed in Ref.\cite{Roiban:2017iqg}.} Here, for the reader convenience, we note that Vasiliev's equations for higher-spin gauge fields in $AdS_4$ were obtained in Ref.\cite{Vasiliev:1990en}. The complete cubic coupling was found in Ref.\cite{Sleight:2016dba} and the quartic interaction was reconstructed in Refs.\cite{Bekaert:2015tva,Sleight:2017fpc}. Recent development of approach in Ref.\cite{Vasiliev:1990en} may be found in Refs.\cite{Didenko:2018fgx}. In the framework of approach in Ref.\cite{Vasiliev:1990en}, various ${\cal N}$-extended supersymmetric higher-spin gauge field theories in AdS space were studied in Refs.\cite{Konstein:1989ij}-\cite{Alkalaev:2002rq}. Also we mention the metric-like approaches in Refs.\cite{Joung:2012fv}-\cite{Karapetyan:2019psg} which might be helpful for studying higher-spin supersymmetric theories in AdS. Application of collective dipole approach for the investigation of higher-spin interactions may be found in Refs.\cite{Koch:2010cy}. We expect that light-cone gauge approach will be helpful for investigation of the problem of bulk definition of higher-spin theories identified in Ref.\cite{Sleight:2017pcz}. \noindent {\bf ii}) In this paper, we restricted our study to massless supermultiplets in the four dimensions. Generalization of our study to the case of massless supermultiplets in the higher dimensions could be of interest. In this respect, we note that all parity invariant cubic vertices for massless arbitrary spin light-cone gauge bosonic and fermionic fields in the higher dimensions were built in Refs.\cite{Metsaev:2005ar,Metsaev:2007rn}, while the discussion of massless supermultiplets in higher dimensions may be found, e.g., in Ref.\cite{Sorokin:2018djm}.% \footnote{ In the framework of BRST-BV approach and various metric-like Lorentz covariant approaches, cubic interactions for massless higher-spin fields were investigated in the respective Refs.\cite{Bekaert:2005jf}-\cite{Henneaux:2012wg} and Refs.\cite{Manvelyan:2010jr}. Lorentz covariant parity-odd cubic interactions for higher-spin massless fields in $R^{3,1}$ are studied in Ref.\cite{Conde:2016izb}. Recent interesting studies of fermionic fields may be found in Ref.\cite{Khabarov:2019dvi}.} We expect therefore that studies in Refs.\cite{Metsaev:2005ar,Metsaev:2007rn,Sorokin:2018djm} might be helpful for the investigation of interacting supermultiplets in the higher dimensions.% \footnote{ Twistor methods addressed, e.g., in Refs.\cite{Bandos:2019zqp,Uvarov:2018ose}, could also be helpful for studying interactions of massless supermultiplets in higher dimensions.} \noindent {\bf iii}) We expect that our results for supersymmetric {\it massless} higher-spin light-cone gauge fields obtained in this paper might be helpful for the extension of our study to the case of supersymmetric {\it massive} fields. In light-cone gauge approach, interaction vertices for massive arbitrary spin bosonic and fermionic fields in the flat space were studied in Refs.\cite{Metsaev:2005ar,Metsaev:2007rn}. We think that light-cone gauge cubic vertices in Refs.\cite{Metsaev:2005ar,Metsaev:2007rn} will be helpful for the studying supersymmetric theories of massless and massive fields. For the reader convenience, we note that, in Lorentz covariant approach, ${\cal N}=1$ higher-spin massless supermultiplets, by using BRST method, were studied in Ref.\cite{Buchbinder:2015kca}, while the ${\cal N}=1$ massive supermultiplets are considered in Ref.\cite{Zinoviev:2007js}. Cubic self-interactions of massive fields and couplings of massive fields to massless fields were studied by using BRST approach in Ref.\cite{Metsaev:2012uy}. \noindent {\bf iv}) In the recent time, higher-spin theories in three-dimensional flat and AdS spaces have extensively been studied in the literature. Namely, we mention that the interacting massless higher-spin gauge fields in $3d$ flat space have been studied in Ref.\cite{Mkrtchyan:2017ixk}, while massive higher-spin fields have been investigated in Refs.\cite{Buchbinder:2017izy}-\cite{Kuzenko:2016qwo}. Recent applications of conformal geometry for studying $3d$ conformal higher-spin fields may be found in Refs.\cite{Henneaux:2018agj,Kuzenko:2019ill}, while unfolded formulation of $3d$ conformal fields is considered in Refs.\cite{Nilsson:2015pua}. We note, because the massless light-cone gauge higher-spin fields are trivial in $3d$ space, the usefulness of the light-cone formalism for studying such fields is not obvious. However, for the case of massive fields and conformal fields, we expect that the light-cone gauge approach might be helpful for better understanding of various aspect of massive and conformal field theories in three dimensions. For the reader convenience, we note that light-cone formulation of higher-spin massive fields in the $3d$ flat space is well known, while the light-cone gauge formulation of higher-spin massive fields in $AdS_3$ was obtained in Refs.\cite{Metsaev:1999ui,Metsaev:2000qb}. In the framework of ordinary-derivative (2nd-derivative) light-cone gauge formalism, higher-spin conformal fields were studied in Ref.\cite{Metsaev:2016rpa}. \noindent {\bf v}) Quantum properties of bosonic higher-spin gauge field theories were studied in Refs.\cite{Ponomarev:2016jqk,Skvortsov:2018jea}. In Ref.\cite{Skvortsov:2018jea}, the arguments were given for UV finiteness of bosonic chiral higher-spin theory. We note also that, in the framework of light-cone approach, recent discussion of quantum properties of ${\cal N}=8$ supergravity may be found in Ref.\cite{Kallosh:2009db}. We believe that our results for cubic interactions of ${\cal N}$-extended arbitrary spin supermultiplets and methods in Refs.\cite{Kallosh:2009db,Skvortsov:2018jea} might be helpful for study of quantum properties of ${\cal N}$-extended supersymmetric higher-spin field theories. As note in the literature, extended ${\cal N}=8$ supergravity theory is a candidate for UV finite theory (see, e.g., Ref.\cite{Kallosh:2009db} and references therein). We think therefore that supersymmetric (chiral and non-chiral) higher-spin theories are also candidates for UV finite theories. Last but not least motivation for our interest in supersymmetric higher-spin theories is related to the fact that supersymmetry makes study of four point vertices easier. We expect that, as compared to bosonic higher-spin theories, interesting features of the supersymmetric higher-spin theories will be seen upon consideration of four point vertices. For the case of 11d supergravity, example of application of supersymmetry for the studying four point vertices can be found in Sec.5 in Ref.\cite{Metsaev:2004wv}. \noindent {\bf vi}) Application of light-cone gauge approach for studying interacting continuous-spin bosonic field may be found in Ref.\cite{Metsaev:2017cuz,Metsaev:2018moa}. We expect that the methods developed in this paper might be helpful for studying interactions of supersymmetric continuous-spin fields. In the Lorentz covariant frame, the study of interactions of bosonic continuous-spin field may be found in Refs.\cite{Bekaert:2017xin}. Discussion of light-cone gauge continuous-spin field in AdS is given in Refs.\cite{Metsaev:2017myp,Metsaev:2019opn}. \medskip {\bf Acknowledgments}. This work was supported by the RFBR Grant No.17-02-00546. \setcounter{section}{0}\setcounter{subsection}{0}
1,116,691,501,373
arxiv
\section{Introduction and statement of results} Point sets characterized by means of minimizing a suitably defined potential energy function have applications in a surprising number of problems in various fields of science and engineering ranging from physics over chemistry to geodesy and mathematics. We refer the reader to \cite{KuSa1997,HaSa2004,CoKu2007,BrDi2013,BrGr2015,Le2016,SaSe2015,SaSe2015b,RoSe2016,PeSe2015,BeMaOr2016,Be2015,BoDrHaSaSt2017,CoKuMiRaVi2017,Vi2017,Sk2019,BeDrOr2020,BeKnNo2020,PeSe2020,Ba2021,Pau2021,BeEtMaOr2021,BiGlMaPaVl2021,KivanMe2021} and the book \cite{BoHaSaBook2019}. A fundamental question concerns the asymptotic expansion of the minimal energy as the number of points tend to infinity. In general, at best only one or two terms are known; cf. \cite{LoSa2010,BrHaSa2012b,BeEt2020,BeSa2018,LoMcC2021} in case of the sphere and \cite{MaMaRa2004,Bo2012} for curves. A notable exception are the minimal energy asymptotics for the unit circle for a whole class of energy functionals for which equally spaced points are optimal configurations. In these cases the energy formula can be written in a form that provides a complete asymptotic expansion in terms of powers of the number of points (see \cite{Br2016,BrHaSa2012,BrHaSa2009}): for $s \in (-2,\infty)$ with $s \neq 0, 1, 3, 5, \dots$ and for every $p = 1, 2, 3, \dots$, one has for the optimal \emph{Riesz $s$-energy} the asymptotic expansion \begin{equation*} \begin{split} \mathcal{L}_s(N) &= W_s \, N^2 + \frac{2\zetafcn(s)}{(2\pi)^s} \, N^{1+s} + \sum_{n=1}^p \alpha_n(s) \frac{2\zetafcn(s-2n)}{(2\pi)^s} \, N^{1+s-2n} + \BigOh_{s,p}(N^{-1+ s-2p}) \end{split} \end{equation*} as $N \to \infty$, where the constant $W_s$ is explicitly known, $\zetafcn(s)$ is the classical Riemann zeta function, and the explicitly computable coefficients $\alpha_n(s)$, $n\geq0$, satisfy the generating function relation \begin{equation*} \label{sinc.power.0} \left( \frac{\sin \pi z}{\pi z} \right)^{-s} = \sum_{n=0}^\infty \alpha_n(s) \, z^{2n}, \quad |z|<1, \ s\in \mathbb{C}. \end{equation*} The logarithmic energy of $N$ equally spaced points, which provide minimizing configurations, simply is \begin{equation*} \mathcal{L}_0(N) = - N \, \log N. \end{equation*} We remark that for general curves much less is known. We refer to \cite{MaMaRa2004,Bo2012}. In the following we shall utilize the fact that zeros of classical orthogonal polynomials can be characterized as minimizing configurations of certain potential energy functions for logarithmic point interactions. This approach enables us to derive complete asymptotic expansions. Let $A$ be an infinite compact subset of the complex plane $\mathbb{C}$. A configuration of $N$ points $\zeta_1,\dots,\zeta_N \in A$, $N\geq2$, that maximizes the product of all mutual distances $\prod_{j\neq k}|z_j-z_k|$ among $N$-point systems $z_1,\dots,z_N \in A$ is called an {\em $N$-th system of Fekete points of $A$}. The maximum \begin{equation} \label{eq:Nth.discriminant.of.interval} \Delta_N(A) {:=} \max_{z_1,\dots,z_N \in A} \mathop{\prod_{j=1}^N \prod_{k=1}^N}_{j \neq k} \left| z_j - z_k \right| \end{equation} is the {\em $N$-th discriminant of $A$}. A fundamental potential-theoretic result for the {\em transfinite diameter or logarithmic capacity} $\CAP A$ of $A$ is \begin{equation*} \CAP A = \lim_{N\to\infty} \left[ \Delta_N(A) \right]^{1/[N(N-1)]}. \end{equation*} Fekete points, by definition, are points that maximize the Vandermonde determinant that appears in the polynomial Lagrange interpolation formula. It was Fekete~\cite{Fe1926} who investigated the connection between polynomial interpolation and the discrete logarithmic energy problem, which for given $N$ consists of finding those $N$-point configurations with minimal discrete {\em logarithmic energy} \begin{equation} \label{eq:log.energy} E_0(z_1,\dots,z_N) {:=} \mathop{\sum_{j=1}^N \sum_{k=1}^N}_{j \neq k} \log \frac{1}{\left| z_j - z_k \right|}, \qquad z_1,\dots,z_N\in A. \end{equation} We define the {\em logarithmic $N$-point energy of $A$} to be \begin{equation} \label{eq:min.log.energy} \mathcal{E}_0(A; N) {:=} \sup \left\{ E_0(z_1,\dots,z_N) : z_1,\dots,z_N\in A \right\} = - \log \Delta_N(A). \end{equation} One main goal of this paper is to derive the complete asymptotic expansion of $\mathcal{E}_0(A; N)$ as $N\to\infty$ when $A$ is the interval $[-1,1]$; see Theorem~\ref{thm:log.n.point.energy.asymptotics}. Indeed, regarding line-segments, it suffices to consider the interval $[-1,1]$, since the $N$-th discriminant of the rotated, dilated, and translated set $A^\prime=a + \eta e^{i \phi} A$ is given by $\Delta_N(A^\prime) = \eta^{N(N-1)} \Delta_N(A)$ and, therefore, $\mathcal{E}_0(A^\prime; N) - \mathcal{E}_0(A; N) = - ( \log \eta ) N ( N - 1 )$. Let $q > 0$ and $p > 0$ be numbers representing charges at the left endpoint and right endpoint, respectively, of the interval $[-1,1]$. The problem of finding $n$ points $x_1^{(n)},\dots,x_n^{(n)}$, the locations of unit point charges, in the interior of $[-1,1]$ such that the expression \begin{equation} \label{Jacobi.expr} T_n(x_1,\dots,x_n) {:=} \prod_{i=1}^n \left( 1 - x_i \right)^p \, \prod_{j < k} \left| x_j - x_k \right| \, \prod_{\ell=1}^n \left( 1 + x_{\ell} \right)^q \end{equation} is maximized, or equivalently, $\log(1/T_n)$ is minimized over all $n$-point systems $x_1,\dots,x_n$ in $[-1,1]$, is a classical problem that owes its solution to Stieltjes \cite{St1884,St1885} (also see Schur~\cite{Sch1918}). In analogy to the $N$-th discriminant of a compact set $A$ we may define the \emph{$n$-th $(p,q)$-discriminant of $[-1,1]$} as \begin{equation} \label{eq:Nth.pq.discriminant.of.interval} \Delta_n^{(p,q)}([-1,1]) {:=} \max_{x_1,\dots,x_n \in [-1,1]} \left( T_n(x_1,\dots,x_n) \right)^2. \end{equation} The quantity $\log(1/T_n^2)$ can be interpreted as the potential energy of the point charges at $x_1,\dots,x_n$ in an external field exerted by the charge $p$ at $x=1$ and the charge $q$ at $x=-1$, where the 'points' interact according to a logarithmic potential. We shall call such minimal potential energy points {\em {\bf elliptic} Fekete points} in order to distinguish them from the Fekete points defined previously. Stieltjes showed that the points $x_1^{(n)},\cdots,x_n^{(n)}$ of {minimal potential energy} are, in fact, the zeros of the Jacobi polynomial $P_n^{(\alpha,\beta)}$, where $\alpha=2p-1$ and $\beta=2q-1$. A more modern approach is to have external fields in form of appropriate weight functions instead of constraints. (See, e.g., \cite{Is2000b} for a discussion of this model.) We also refer the interested reader to the survey article \cite{MaMaMa2007}. Stieltjes’ ingenious observation that the zeros of classical orthogonal polynomials have an electrostatic interpretation in terms of logarithmic potential enables us to find, for every $n\geq2$, the explicit elliptic Fekete $n$-point configuration for the discrete logarithmic energy problem associated with the given family of orthogonal polynomials. Moreover, since the target functions of the respective maximum problems are closely related to the discriminants of the classical polynomials, the asymptotic expansion of the potential energy of elliptic Fekete $n$-point configurations as $n \to \infty$ can be obtained. Our goal is to derive the complete asymptotic expansion of the potential energy of elliptic Fekete $n$-point configurations associated with the external field problem induced by classical orthogonal polynomials. We remark that the approach used here can be also applied to point systems in $[0,\infty)$ and $\mathbb{R}$ with suitable constraints on the centroid or inertia of the point system which leads to the study of zeros of Laguerre and Hermite polynomials, respectively. A generalization are so-called Menke systems for the real line studied in \cite{MaBrSa2009}; see also \cite{Me1972, Me1974}. Such systems consist of two interlaced sets of points which can be characterized as zeros or extrema of classical orthogonal polynomials. The asymptotic analysis of the associated discriminants is technically much more involved and we leave the presentation of these results to follow up papers. {\bf Outline of the paper:} In the remaining part of the introduction we present the asymptotic expansions for elliptic Fekete points in the interval $[-1,1]$ and compare the results with the expansion for Fekete points. In Section~\ref{sec:aux.res}, we gather asymptotic results for the discriminant of the Jacobi polynomial. The proofs of the main asymptotic results are presented in Section~\ref{sec:proofs.main.res}. The Appendix collects technical results that are frequently used in the asymptotic analysis. \subsection{Preliminaries} Our asymptotic expansions are of Poincar{\'e}-type and we adapt the notion of writing them as infinite series (even if an infinite series does not converge). We make use of the usual computational rules. The coefficients of the asymptotic expansions will be given in terms of the Riemann zeta function~$\zetafcn(s)$ and the Hurwitz zeta function $\zetafcn(s,a)$ and their (partial) derivatives with respect to $s$ evaluated at negative integers $s$. The well-known relation \begin{equation*} \zetafcn( -m, a ) = - \frac{\bernoulliB_{m+1}( a )}{m+1}, \qquad m \in \mathbb{N}_0, \end{equation*} enables us to use instead Bernoulli polynomials $\bernoulliB_m$ and the Bernoulli numbers $B_m$. The {\em Glaisher-Kinkelin} constant (see \cite[p.~135]{Fi2003})\footnote{The established symbol for the Glaisher-Kinkelin constant is $A$ which we also use for a generic compact set. The use of the symbol $A$ should be clear from the context.} is defined by \begin{equation} \label{eq:Glaisher-Kinkelin} A {:=} \lim_{n\to \infty} \frac{1^1 2^2 \cdots n^n}{n^{n(n+1)/2+1/12} e^{-n^2/4}} = 1.28242712\dots. \end{equation} and appears in our computations by means of the well-known relation $\zetafcn^\prime(-1) = 1/12 - \log A$. The {\em polygamma} function is defined by \cite[6.4.1]{AbSt1992} \begin{equation*} \digammafcn^{(n)}(z) = \frac{\,d^{n+1}}{\,d z^{n+1}} \log \gammafcn(z), \qquad n = 1,2,3, \dots. \end{equation*} Using Liouville's fractional integration and differentiation operator, one can also define polygamma functions of negative order (called ``negapolygammas'' in \cite{Go1995}) as (see \cite{Ad1998}) \begin{equation*} \digammafcn^{(-n)}(z) {:=} \frac{1}{(n-2)!} \int_0^z \left( z - t \right)^{n-2} \log \gammafcn(t) \,d t, \quad \mathop{\mathrm{Re}} z > 0, \qquad n=1,2,3,\dots. \end{equation*} Since \begin{equation} \label{eq:negadigamma} \digammafcn^{(-2)}(x) = \int_0^x \log \gammafcn(t) \,d t = \frac{\left(1-x\right)x}{2} + \frac{x}{2} \log 2\pi - \zetafcn^\prime(-1) + \left. \frac{\partial}{\partial s} \zetafcn(s,x) \right|_{s=-1}, \end{equation} we rewrite $\zetafcn^\prime(-1,x) {:=} \frac{\partial}{\partial s} \zetafcn(s,x) \big|_{s=-1}$ in terms of $\digammafcn^{(-2)}(x)$. \subsection{Elliptic Fekete points in the interval $[-1,1]$} Regarding the external field problem associated with relation \eqref{Jacobi.expr}, we are interested in the asymptotic expansion of the minimum value of the {\em potential energy} \begin{equation} \label{eq:jacobi.potential.energy} \mathcal{L}([-1,1], q, p; x_1, \dots, x_n) {:=} 2 \log \frac{1}{T_n(x_1,\dots,x_n)}, \qquad x_1, \dots, x_n \in [-1,1], \end{equation} as $n \to \infty$. An $n$-point configuration $\{x_{1}^{(n)}, \dots, x_{n}^{(n)} \}$ minimizing \eqref{eq:jacobi.potential.energy}, or equivalently, maximizing \eqref{Jacobi.expr} over all $n$-point configurations in $[-1,1]$ is called an {\em elliptic $(p,q)$-Fekete $n$-point configuration} in $[-1,1]$ associated with the external field implied by \eqref{Jacobi.expr}. We remark that taking twice of $\log(1/T_n)$ as the potential energy is consistent with the physicist's point of view that the potential energy contained in the electrostatic field of $N$ charges $q_1,\dots,q_N$ at positions $z_1,\dots,z_N$ in the plane, up to some constant factor arising from the used unit system, is given by $\sum_{j\neq k} q_j q_k \log(1/|z_j-z_k|)$; see, e.g., Jackson~\cite{Ja1998}. \begin{thm} \label{thm:Jacobi} Let $p > 0$ and $q > 0$. The potential energy of elliptic $(p,q)$-Fekete $n$-point configurations in the interval $[-1,1]$ has the Poincar{\'e}-type asymptotic expansion \begin{equation*} \begin{split} \mathcal{L}([-1,1], q, p; n) &= \left( \log 2 \right) n^2 - n \log n + 2 \left( \log 2 \right) \left( p + q - 1 \right) n - 2 \left( \left( p - \frac{1}{4} \right)^2 + \left( q - \frac{1}{4} \right)^2 \right) \log n \\ &\phantom{=}+ C_1(p,q) + \sum_{m=1}^{\infty} \frac{(-1)^{m-1}}{m\left(m+1\right)} \, \mathcal{H}_m(p,q) \, n^{-m}, \end{split} \end{equation*} where \begin{align*} C_1(p,q) &{:=} 2 \left( \left( p + q - 1 \right)^2 - \frac{11}{24} \right) \log 2 - \left( p + q \right) \log \pi - 3 \log A + \digammafcn^{(-2)}( 2p ) + \digammafcn^{(-2)}( 2q ), \\ \mathcal{H}_m(p,q) &{:=} \zetafcn( -m-1 ) + \zetafcn( -m-1, 2p ) + \zetafcn( -m-1, 2q ) + \left( 1 - 2^{-m} \right) \zetafcn( -m-1, 2p + 2q - 1 ). \end{align*} \end{thm} \begin{rmk} The potential energy of elliptic $(p,q)$-Fekete $n$-point configurations on the interval $[-1,1]$ is invariant under translation (and rotation) of the line-segment $[-1,1]$ in the complex plane. From \eqref{Jacobi.expr} it can be seen that for a scaling constant $\eta>0$ there holds \begin{equation*} \mathcal{L}(\eta [-1,1], p , q; n) = \mathcal{L}([-1,1], p , q; n) - \left( \log \eta \right) n^2 - \left( \log \eta \right) \left( 2 p + 2 q -1 \right) n. \end{equation*} Thus, only the $n^2$-term and $n$-term are sensitive to a rescaling of the underlying interval. \end{rmk} \begin{rmk} The $n$-th $(p,q)$-discriminant of the interval $[-1,1]$ is given by (cf. Proof of Theorem~\ref{thm:Jacobi}) \begin{equation*} \Delta_n^{(p,q)}([-1,1]) = 2^{n ( n + 2p + 2q - 1 )} \frac{\prod_{k=1}^n k^k \left( k + 2p - 1 \right)^{k+2p-1} \left( k + 2q - 1 \right)^{k+2q-1}}{\prod_{k=n-1}^{2(n-1)} \left( k + 2p + 2q \right)^{k+2p+2q}} \end{equation*} from which follows an explicit formula for $\mathcal{L}([-1,1], q, p; n)$. An explicit formula in terms of quantities related to Jacobi polynomials is given in \eqref{eq:explicit.cal.L.interval.p.q.n}. \end{rmk} In the symmetric external field case $p = q$ we have the following result. \begin{cor} \label{cor:Jacobi} Let $p>0$. The potential energy of elliptic $(p,p)$-Fekete $n$-point configurations in the interval $[-1,1]$ has the Poincar{\'e}-type asymptotic expansion \begin{equation*} \begin{split} \mathcal{L}([-1,1], p, p; n) &= \left( \log 2 \right) n^2 - n \log n + 2 \left( \log 2 \right) \left( 2 p - 1 \right) n - 4 \left( p - \frac{1}{4} \right)^2 \log n + C_1(p) \\ &\phantom{=}+ \sum_{m=1}^{\infty} \frac{(-1)^{m-1}}{m \left( m + 1 \right)} \, \mathcal{H}_m(p) \, n^{-m}, \end{split} \end{equation*} where \begin{align*} C_1(p) &{:=} 2 \left( \left( 2 p - 1 \right)^2 - \frac{11}{24} \right) \log 2 - 2 p \log \pi - 3 \log A + 2 \digammafcn^{(-2)}( 2p ), \\ \mathcal{H}_m(p,q) &{:=} \zetafcn( -m-1 ) + 2 \zetafcn( -m-1, 2p ) + \left( 1 - 2^{-m} \right) \zetafcn( -m-1, 4p - 1 ). \end{align*} \end{cor} The asymptotic expansion of the {\bf logarithmic energy} of elliptic $(p,q)$-Fekete $n$-point configurations in $[-1,1]$ is given next. \begin{thm} \label{thm:Jacobi.log.energy} Let $p > 0$ and $q > 0$. The logarithmic energy of elliptic $(p,q)$-Fekete $n$-point configurations $\omega_n$ in $[-1,1]$ has the Poincar{\'e}-type asymptotic expansion \begin{equation*} \begin{split} E_0(\omega_n) &= \left( \log 2 \right) n^2 - n \log n - 2 \left( \log 2 \right) n + 2 \left( p^2 + q^2 - \frac{1}{8} \right) \log n + C_1^\prime(p,q) \\ &\phantom{=}+ \sum_{m=1}^{\infty} \frac{(-1)^{m-1}}{m} \, \mathcal{H}_{m}^\prime(p,q) \, n^{-m} \end{split} \end{equation*} as $n \to \infty$, where \begin{align*} C_1^\prime(p,q) &{:=} - 2 \left( \left( p + q \right)^2 - \frac{13}{24} \right) \log 2 - 3 \log A - 2 p \log \gammafcn(2p) + \digammafcn^{(-2)}(2p) - 2 q \log \gammafcn(2q) + \digammafcn^{(-2)}(2q), \\ \mathcal{H}_{m}^\prime(p,q) &{:=} \frac{\zetafcn( -m-1 ) + \zetafcn( -m-1, 2p ) + \zetafcn( -m-1, 2q ) + \left( 1 - 2^{-m} \right) \zetafcn( -m-1, 2p+2q-1)}{m+1} \\ &\phantom{=}- 2p \zetafcn( -m, 2p ) - 2q \zetafcn( -m, 2q ) - 2 \left( 1 - 2^{-m} \right) \left( p + q \right) \zetafcn( -m, 2p + 2q - 1 ). \end{align*} \end{thm} \begin{rmk} Note that the asymptotic expansions of the potential and the logarithmic energy of elliptic $(p,q)$-Fekete $n$-point configurations $\omega_n$ in $[-1,1]$ coincide in the first two leading terms if $p+q\neq 2$ and coincide in the first three leading terms if $p+q=2$. \end{rmk} In the case $p = q = 1$, maximizing relation \eqref{Jacobi.expr} for $n$-point configurations in the interval $[-1,1]$ is equivalent with maximizing the product of all mutual distances of $N=n+2$ points in $[-1,1]$: \begin{equation} \mathop{\prod_{j=0}^{n+1}\prod_{k=0}^{n+1}}_{j\neq k} | x_j - x_k |, \qquad -1 \leq x_0, x_1, \dots, x_n, x_{n+1} \leq 1. \end{equation} (Indeed, if an endpoint of the interval $[-1,1]$ is not in a configuration $\omega_N$, then the product of all mutual distances between points can be increased by rescaling the points in $\omega_N$.) Hence, the elliptic $(1,1)$-Fekete $n$-point configuration in $[-1,1]$ together with the endpoints $\pm 1$ is also a Fekete $N$-point configuration $\omega_N^*$ on the interval $[-1,1]$ with $N = n + 2$ points. From the electrostatic interpretation of the zeros of classical orthogonal polynomials (cf. Theorem~\ref{thm:Jacobi.electrostatic.interpretation} and remark after that theorem), we have that $\omega_N^*$ is the set of all extremal points (including endpoint extremas) of the Legendre polynomial $\LegendreP_{n+1} = \LegendreP_{N-1}$. \begin{thm} \label{thm:log.n.point.energy.asymptotics} The logarithmic $N$-point energy of the interval $[-1,1]$ has the Poincar{\'e}-type asymptotic expansion \begin{equation*} \begin{split} \mathcal{E}_0([-1,1]; N) &= \left( \log 2 \right) N^2 - N \log N - 2 \left( \log 2 \right) N - \frac{1}{4} \log N + \frac{13 \log 2}{12} - 3 \log A \\ &\phantom{=}+ \sum_{m=1}^{\infty} \frac{1}{m(m+1)} \left( 1 - 2^{-m} + 4 \left( 1 - 2^{-(m+2)} \right) \frac{B_{m+2}}{m+2} \right) N^{-m} \end{split} \end{equation*} as $N\to\infty$. Here, $A$ denotes the Glaisher-Kinkelin constant given in \eqref{eq:Glaisher-Kinkelin}. \end{thm} \begin{rmk} The $N$-th discriminant of the interval $[-1,1]$ defined in \eqref{eq:Nth.discriminant.of.interval} can be written as (cf. Proof of Theorem~\ref{thm:log.n.point.energy.asymptotics}) \begin{equation*} \Delta_N([-1,1]) = 2^{N(N-1)} N^N \frac{\prod_{k=1}^{N-1} k^{3k}}{\prod_{k=N-1}^{2(N-1)} k^{k}} \end{equation*} and via \eqref{eq:min.log.energy} we get an explicit formula for $\mathcal{E}_0([-1,1]; N)$. An explicit formula in terms of quantities related to Jacobi polynomials is given in \eqref{eq:min.log.energy.of.interval}. \end{rmk} \subsection{Fekete points in the interval $[-2,2]$} This case has been treated analytically in \cite{Po1964}. More generally, Pommerenke obtained that for a convex compact planar set $A$ of transfinite diameter (logarithmic capacity) $\CAP A$, the $N$-th discriminant of $A$ sastisfies \begin{equation*} N^N \left( \CAP A \right)^{N(N-1)} \leq \Delta_N(A) \leq 2^{2(N-1)} N^N \left( \CAP A \right)^{N(N-1)}. \end{equation*} Let $W(A) {:=} - \log ( \CAP A )$ denote the {\em logarithmic energy of $A$}. Then is follows that the logarithmic $N$-point energy of convex compact planar set $A$ satisfies \begin{equation*} \left( W(A) - \log 4 \right) N + \log 4 \leq \mathcal{E}_0(A; N) - \left( W(A) N^2 - N \log N \right) \leq W(A) N. \end{equation*} Considering the star-shaped curves $S_m = \bigcup_{\nu=1}^m [ 0, 2^{2/m} \zeta^\nu]$ ($\zeta {:=} e^{2\pi i / m}$) of transfinite diameter $1$ defined by the conformal mapping $F(z) = z ( 1 + z^{-m} )^{2/m}$, where $m$ is the number of star branches, he showed that $\Delta_N(S_2) \geq 2^{2(N-1)} N^N$. Consequently, for $A=[-2,2]=S_2$ these results imply \begin{equation*} 2^{2(N-1)} N^N \leq \Delta_N(S_2) \leq 2^{2(N-1)} N^N. \end{equation*} In \cite{BeClDu2004} the electrostatic equilibria of $N$ discrete charges of size $1/N$ on a two-dimensional conductor (domain) are studied. Also \cite{BeClDu2004} is mostly concerned with placement of charges, it provides an interpretation of the terms of the asymptotics of the ground-state energy, which we will follow here. From Theorem~\ref{thm:log.n.point.energy.asymptotics} we have that (note that $\CAP[-2,2]=1$ and therefore $W([-2,2])=0$) \begin{align*} \frac{\mathcal{E}_0([-2,2];N)}{N^2} &= W([-2,2]) & &\text{(continuum correlation energy)} \\ &\phantom{=}- \frac{\log N}{N} & &\text{(self energy)} \\ &\phantom{=}- \frac{\log 2}{N} & &\text{(correlation energy)} \\ &\phantom{=}- \frac{1}{4} \frac{\log N}{N^2} & & \\ &\phantom{=}- \left( \frac{13 \log 2}{12} - 3 \log A \right) \frac{1}{N^2} & & \\ &\phantom{=}+ \cdots, & & \end{align*} where $\log A$ is the logarithm of the Glaisher-Kinkelin constant, see \eqref{eq:Glaisher-Kinkelin}. In fact, Theorem~\ref{thm:log.n.point.energy.asymptotics} gives the complete asymptotic expansion of $\mathcal{E}_0([-1,1];N)$ as $N\to\infty$. Note that only the $N^2$-term and $(\log N)$-term are affected by a change of the transfinite diameter; i.e., as $N \to \infty$: \begin{equation*} \begin{split} \mathcal{E}_0([a,b]; N) &= W([a,b]) \, N^2 - N \log N - \left( \log 2 + W([a,b]) \right) N - \frac{1}{4} \log N + \frac{13 \log 2}{12} - 3 \log A \\ &\phantom{=}+ \sum_{m=1}^{\infty} \frac{1}{m(m+1)} \left( 1 - 2^{-m} + 4 \left( 1 - 2^{-(m+2)} \right) \frac{B_{m+2}}{m+2} \right) N^{-m}. \end{split} \end{equation*} \section{Asymptotics of the discriminant of the Jacobi polynomial} \label{sec:aux.res} For the proof of Theorem~\ref{thm:Jacobi} we need an asymptotic expansion of the leading coefficient, the values at $\pm 1$, and the discriminant of the Jacobi polynomial. We recall the following facts. The Jacobi polynomials $P_n^{(\alpha, \beta)}(x)$ ($n\geq0$, $\alpha, \beta > -1$) are orthogonal on the interval $[-1,1]$ with the weight function $w(x) = ( 1 - x )^\alpha ( 1 + x )^\beta$ and normalized such that $P_n^{(\alpha,\beta)}(1) = \Pochhsymb{1+\alpha}{n} / n!$. Hence \begin{equation*} P_n^{(\alpha,\beta)}(x) = \lambda_n^{(\alpha,\beta)} x^n + \cdots, \qquad \lambda_n^{(\alpha,\beta)} = 2^{-n} \binom{2n+\alpha+\beta}{n}. \end{equation*} We note further that $P_n^{(\alpha,\beta)}(-x) = (-1)^n P_n^{(\beta,\alpha)}(x)$. Therefore, $P_n^{(\alpha,\beta)}(-1) = (-1)^n \Pochhsymb{1+\beta}{n} / n!$. We prove the following Poincar{\'e}-type asymptotic results expressed in terms of the zeta function and the Hurwitz zeta function. \begin{lem} \label{lem:Jacobi.asymptotics} Let $\alpha > -1$ and $\beta > -1$. Then \begin{align*} \log \lambda_n^{(\alpha,\beta)} &= \left( \log 2 \right) n - \frac{1}{2} \log n + \left( \alpha + \beta \right) \log 2 - \frac{1}{2} \log \pi \\ &\phantom{=}+ \sum_{m=1}^{\infty} \frac{(-1)^{m-1}}{m} \Big( \left( 1 - 2^{-m} \right) \zetafcn( -m, \alpha + \beta + 1 ) + \zetafcn( - m ) \Big) n^{-m}, \\ \log P_n^{(\alpha,\beta)}(1) &= \alpha \log n - \log \gammafcn(\alpha+1) + \sum_{m=1}^{\infty} \frac{(-1)^{m}}{m} \, \Big( \zetafcn( -m, \alpha + 1 ) - \zetafcn( -m ) \Big) \, n^{-m}. \end{align*} \end{lem} \begin{proof Since \begin{equation*} \log \lambda_n^{(\alpha,\beta)} = - n \log 2 + \log \gammafcn( 2n + \alpha + \beta + 1 ) - \log \gammafcn( n + \alpha + \beta + 1 ) - \log \gammafcn(n + 1), \end{equation*} application of \eqref{eq:LogGamma.asymptotics} and simplification gives the first result. For the second part we have \begin{equation*} \log P_n^{(\alpha,\beta)}(1) = - \log \gammafcn(\alpha+1) + \log \gammafcn(n+\alpha+1) - \log \gammafcn(n+1) \end{equation*} and application of \eqref{eq:LogGamma.asymptotics} yields the second part. In either part we used $\zetafcn(-m,1) = \zetafcn(-m)$ for $m \geq 1$. \end{proof} The connection between the energy optimization problem and the zeros of certain Jacobi polynomials is established in the following theorem. Uniqueness of the maximal configuration also follows from this fact. \begin{thm}[{\cite[Thm.~6.7.1]{Sz1939}}] \label{thm:Jacobi.electrostatic.interpretation} Let $p > 0$ and $q > 0$, and let $\{x_1, \dots, x_n\}$ be a system of real numbers in the interval $[-1,1]$ for which the expression \eqref{Jacobi.expr} becomes a maximum. Then $x_1, \dots, x_n$ are the zeros of the Jacobi polynomial $P_n^{(\alpha, \beta)}(x)$, where $\alpha = 2 p - 1$, $\beta = 2q - 1$. \end{thm} \begin{rmk} In the particular case of $p = q = 1$, it follows from the well-known relations (cf. \cite[Ch.~18]{DLMF2021.06.15}) \begin{equation*} P_n^{(1,1)}( x ) = \frac{2}{n+2} \, \GegenbauerC_n^{(3/2)}( x ) = \frac{2}{n+2} \, \frac{\,d \LegendreP_{n+1}}{\,d x}(x) \end{equation*} that the unique maximizing configuration for \eqref{Jacobi.expr} in the interval $[-1,1]$ can be characterized as the set of the zeros of the Jacobi polynomial $P_n^{(1,1)}$, the zeros of the Gegenbauer polynomial $\GegenbauerC_n^{(3/2)}$, or the extremas of the Legendre polynomial $\LegendreP_{n+1}$. \end{rmk} An explicit formula for the discriminant of $P_n^{(\alpha,\beta)}(x) = \lambda_n^{(\alpha,\beta)} ( x - x_{1,n} ) \cdots ( x - x_{n,n} )$, defined by \begin{equation} \label{eq:D.n.alpha.beta} D_n^{(\alpha,\beta)} {:=} \left[ \lambda_n^{(\alpha,\beta)} \right]^{2n-2} \mathop{\prod_{j=1}^n \prod_{k=1}^n}_{j<k} \left( x_{j,n} - x_{k,n} \right)^2, \end{equation} can be obtained without computing the zeros of Jacobi polynomials: \begin{thm}[{\cite[Thm.~6.71]{Sz1939}}] \label{thm:Jacobi.electrostatic.interpretation.discriminant} Let $\alpha>-1$ and $\beta>-1$. Then \begin{align*} D_n^{(\alpha,\beta)} &= 2^{-n(n-1)} \prod_{\nu=1}^n \nu^{\nu-2n+2} \left( \nu + \alpha \right)^{\nu-1} \left( \nu + \beta \right)^{\nu-1} \left( \nu + n + \alpha + \beta \right)^{n-\nu}. \end{align*} \end{thm} The logarithm of the discriminant of the Jacobi polynomials admits the following Poincar{\'e}-type asymptotic expansion. The Glaisher-Kinkelin constant $A$ is given in \eqref{eq:Glaisher-Kinkelin} and the negapolygamma function $\digammafcn^{(-2)}$ is given in \eqref{eq:negadigamma}. \begin{lem} \label{lem:Jacobi.discr.asymptotics} Let $\alpha > -1$ and $\beta > -1$. Then for every integer $K \geq1$ there holds \begin{equation*} \begin{split} \log D_n^{(\alpha,\beta)} &= (\log 2 ) n^2 + \left( 2 \left( \alpha + \beta \right) \log 2 - \log \pi¸ \right) n + \frac{1}{2} \left( \frac{5}{2} - \left( \alpha + 1 \right)^2 - \left( \beta + 1 \right)^2 \right) \log n + C(\alpha,\beta) \\ &\phantom{=}- \sum_{m=1}^{\infty} \frac{(-1)^{m-1}}{m} \, \Psi_m(\alpha, \beta) \, n^{-m}, \end{split} \end{equation*} where \begin{align*} C(\alpha,\beta) &{:=} - \frac{1}{8} - \frac{1}{2} \left( \alpha + \beta + \frac{1}{2} \right)^2 + \frac{1}{2} \left( \frac{11}{6} + \left( \alpha + \beta \right)^2 \right) \log 2 + \log \pi + 3 \log A \\ &\phantom{{:=}}+ \left( \alpha + 1 \right) \log \gammafcn(\alpha + 1) - \digammafcn^{(-2)}( \alpha + 1 ) + \left( \beta + 1 \right) \log \gammafcn(\beta + 1) - \digammafcn^{(-2)}( \beta + 1 ), \\ \Psi_m(\alpha, \beta) &{:=} - \frac{2m+1}{m+1} \zetafcn( -m - 1 ) - 2 \zetafcn( - m ) \\ &\phantom{{:=}}+ \left( \alpha + 1 \right) \zetafcn( -m, \alpha + 1 ) - \frac{\zetafcn( -m-1, \alpha + 1 )}{m+1} + \left( \beta + 1 \right) \zetafcn( -m, \beta + 1 ) - \frac{\zetafcn( -m-1, \beta + 1 )}{m+1} \\ &\phantom{{:=}}- \frac{\left( 2 - 2^{-m} \right) m + 1 - 2^{-m}}{m+1} \zetafcn( -m-1, \alpha + \beta + 1 ) + \left( \alpha + \beta \right) \left( 1 - 2^{-m} \right) \zetafcn( -m, \alpha + \beta + 1 ). \end{align*} \end{lem} \begin{proof First, we observe that differentiating the identity \begin{equation*} \sum_{k=m+1}^n \left( k + x + a \right)^{-s} = \zetafcn(s, m + x + a + 1 ) - \zetafcn(s, n + x + a + 1), \qquad 0 \leq m < n, \end{equation*} with respect to $s$ and setting $s=-1$ gives the following formula (using $\zetafcn^\prime( -1, z ) := \frac{\partial}{\partial s} \zetafcn( s, z ) |_{s = - 1}$) \begin{equation} \label{eq:logsum2} \sum_{k=m+1}^n \left( k + x + a \right) \log ( k + x + a ) = \zetafcn^\prime(-1, n + x + a + 1 ) - \zetafcn^\prime(-1, m + x + a + 1 ), \qquad 0 \leq m < n. \end{equation} Hence \begin{equation*} \log D_n^{(\alpha,\beta)} = - n \left( n - 1 \right) \log 2 + \mathfrak{A}_n + \mathfrak{B}_n(\alpha) + \mathfrak{B}_n(\beta) + \mathfrak{C}_n(\alpha+\beta), \end{equation*} where for $\alpha>-1$ and $b>-2$: \begin{align*} \mathfrak{A}_n &{:=} \sum_{\nu=1}^n \left( \nu - 2 n + 2 \right) \log \nu = \zetafcn^\prime(-1, n + 1 ) - \zetafcn^\prime(-1) - 2 \left( n - 1 \right) \log \gammafcn(n+1), \\ \mathfrak{B}_n(\alpha) &{:=} \sum_{\nu=1}^n \left( \nu - 1 \right) \log (\nu + \alpha) = \zetafcn^\prime(-1, n + \alpha + 1 ) - \zetafcn^\prime(-1, \alpha + 1 ) - \left( \alpha + 1 \right) \log \Pochhsymb{\alpha+1}{n}, \\ \mathfrak{C}_n(b) &{:=} \sum_{\nu=1}^n \left( n - \nu \right) \log ( \nu + n + b ) = \left( 2 n + b \right) \log \Pochhsymb{n+b+1}{n} - \zetafcn^\prime(-1, 2 n + b + 1 ) + \zetafcn^\prime(-1, n + b + 1 ). \end{align*} The asymptotic forms follow from applying \eqref{eq:LogGamma.asymptotics} and \eqref{eq:1st.HurwitzZeta.s.derivative.at.minus.1}. Simplification is done with the help of Mathematica. First, we get the Poincar{\'e}-type asymptotics \begin{equation*} \begin{split} \mathfrak{A}_n &= - \frac{3}{2} n^2 \log n + \frac{7}{4} n^2 + \frac{3}{2} n \log n - \left( 2 + \log( 2 \pi ) \right) n + \frac{13}{12} \log n + \log A - \frac{1}{6} + \log( 2 \pi ) \\ &\phantom{=}+ \sum_{m=1}^\infty \frac{(-1)^{m}}{m} \left( 2 \zetafcn( -m ) + \frac{2m + 1}{m+1} \zetafcn( -m - 1 ) \right) n^{-m}, \end{split} \end{equation*} where $A$ is the {\em Glaisher-Kinkelin} constant. We used $\zetafcn^\prime( - 1 ) = \frac{1}{12} - \log A$. Furthermore, \begin{equation*} \begin{split} \mathfrak{B}_n(\alpha) &= \frac{1}{2} n^2 \log n - \frac{1}{4} n^2 - \frac{1}{2} n \log n + \left( \alpha + 1 \right) n + \frac{1}{2} \left( \frac{1}{6} - \left( \alpha + 1 \right)^2 \right) \log n \\ &\phantom{=}+ \log A - \digammafcn^{(-2)}( \alpha + 1 ) + \left( \alpha + 1 \right) \log \gammafcn( \alpha + 1 ) \\ &\phantom{=}+ \sum_{m=1}^\infty \frac{(-1)^{m-1}}{m} \left( \left( \alpha + 1 \right) \zetafcn( -m, \alpha + 1 ) - \frac{\zetafcn( -m - 1, \alpha + 1 )}{m+1} \right) n^{-m}. \end{split} \end{equation*} Here, we used the negapolygamma function defined in \eqref{eq:negadigamma} to simplify the constant term Furthermore, \begin{equation*} \begin{split} \mathfrak{C}_n(b) &= \frac{1}{2} n^2 \log n + \left( 2 \log 2 - \frac{5}{4} \right) n^2 - \frac{1}{2} n \log n + \left( 2 \log 2 - 1 \right) b \, n + \frac{1}{2} \left( b^2 - \frac{1}{6} \right) \log 2 - \frac{1}{2} \left( b \left( b + 1 \right) + \frac{1}{6} \right) \\ &\phantom{=}+ \sum_{m=1}^\infty \frac{(-1)^m}{m} \left( \frac{ \frac{2 - 2^{-m}}{1 - 2^{-m}} \, m + 1}{m+1} \zetafcn( -m - 1, b + 1 ) - b \zetafcn( -m, b + 1 ) \right) \left( 1 - 2^{-m} \right) n^{-m}. \end{split} \end{equation*} Putting everything together, we arrive at the desired result. \end{proof} \section{Proofs of main results} \label{sec:proofs.main.res} \begin{proof}[Proof of Theorem~\ref{thm:Jacobi}] By Theorem~\ref{thm:Jacobi.electrostatic.interpretation}, the elliptic $(p,q)$-Fekete $n$-point configuration in $[-1,1]$ is give by the zeros of the Jacobi polynomial $P_n^{(\alpha,\beta)}$ for $\alpha = 2 p - 1$ and $\beta = 2 q - 1$. We set $\alpha = 2 p - 1$ and $\beta = 2 q - 1$. Let $x_{1,n},\dots, x_{n,n}$ denote the $n$ zeros of $P_n^{(\alpha,\beta)}$. From \eqref{eq:D.n.alpha.beta} and Theorem~\ref{thm:Jacobi.electrostatic.interpretation.discriminant} it follows that \begin{equation} \label{eq:T.n.Jacobi} T_n(x_{1,n},\dots, x_{n,n}) = \frac{\left[ P_n^{(\alpha,\beta)}(1) \right]^p}{\left[ \lambda_n^{(\alpha,\beta)} \right]^p} \frac{\sqrt{D_n^{(\alpha,\beta)}}}{\left[ \lambda_n^{(\alpha,\beta)} \right]^{n-1}} \frac{\left[ (-1)^n P_n^{(\alpha,\beta)}(-1) \right]^q}{\left[ \lambda_n^{(\alpha,\beta)} \right]^q} \end{equation} and therefore (recall, $\alpha = 2p-1$ and $\beta = 2q-1$) \begin{equation} \label{eq:explicit.cal.L.interval.p.q.n} \begin{split} \mathcal{L}([-1,1], q, p; n) &= 2 \left( n + p + q - 1 \right) \log \lambda_n^{(\alpha,\beta)} - \log D_n^{(\alpha,\beta)} \\ &\phantom{=}- 2p \, \log P_n^{(\alpha,\beta)}(1) - 2q \, \log P_n^{(\beta,\alpha)}(1). \end{split} \end{equation} Utilizing Lemma~\ref{lem:Jacobi.asymptotics} and Lemma~\ref{lem:Jacobi.discr.asymptotics}, we get the desired result with the help of Mathematica. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:Jacobi.log.energy}] Recall that $\alpha=2p-1$ and $\beta=2q-1$. From \eqref{eq:D.n.alpha.beta} we obtain \begin{equation} \label{eq:E.0.Jacobi.aux} E_0(x_{1,n}, \dots, x_{n,n}) = 2 \left( n - 1 \right) \log \lambda_n^{(\alpha,\beta)} - \log D_n^{(\alpha,\beta)}. \end{equation} Utilizing Lemma~\ref{lem:Jacobi.asymptotics} and Lemma~\ref{lem:Jacobi.discr.asymptotics}, we get the desired result with the help of Mathematica. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:log.n.point.energy.asymptotics}] Suppose $p>0$ and $q>0$. Set $\alpha = 2p-1$ and $\beta = 2q-1$. Let $\omega_n=\{x_{1,n},\dots,x_{n,n}\}$ be an elliptic $(p,q)$-Fekete $n$-point configurations in $[-1,1]$. Rewriting \eqref{eq:log.energy} and using Theorem~\ref{thm:Jacobi.electrostatic.interpretation}, we get \begin{align*} E_0(\omega_n \cup \{-1, +1\}) &= E_0(\omega_n) + 2 \sum_{k=1}^n \log \frac{1}{\left| -1 - x_{k,n} \right|} + 2 \sum_{k=1}^n \log \frac{1}{\left| 1 - x_{k,n} \right|} + 2 \log \frac{1}{\left| - 1 - 1 \right|} \\ &= E_0(\omega_n) - 2 \log \left| \prod_{k=1}^n \left( -1 -x_{k,n} \right) \right| - 2 \log \left| \prod_{k=1}^n \left( 1 -x_{k,n} \right) \right| - 2 \log 2 \\ &= E_0(\omega_n) - 2 \log \left| \frac{P_n^{(\alpha,\beta)}(-1)}{\lambda_n^{(\alpha,\beta)}} \right| - 2 \log \left| \frac{P_n^{(\alpha,\beta)}(1)}{\lambda_n^{(\alpha,\beta)}} \right| - 2 \log 2 \\ &= 2 \left( n + 1 \right) \log \lambda_n^{(\alpha,\beta)} - \log D_n^{(\alpha,\beta)} - 2 \log P_n^{(\beta,\alpha)}(1) - 2 \log P_n^{(\alpha,\beta)}(1) - 2 \log 2. \end{align*} The substitution for $E_0(\omega_n)$ follows from \eqref{eq:E.0.Jacobi.aux}. For $p = q = 1$ and $n = N - 2$, we get \begin{equation} \label{eq:min.log.energy.of.interval} \mathcal{E}_0([-1,1]; N) = 2 \left( N - 1 \right) \log \lambda_{N-2}^{(1,1)} - \log D_{N-2}^{(1,1)} - 4 \log P_{N-2}^{(1,1)}(1) - 2 \log 2. \end{equation} The asymptotic expansions of Lemma~\ref{lem:Jacobi.asymptotics} and Lemma~\ref{lem:Jacobi.discr.asymptotics} are not in terms of the new asymptotic variable $N$. Instead we simplify the right-hand side above further and use ideas from the proof of Lemma~\ref{lem:Jacobi.discr.asymptotics}. Combining the logarithmic terms and simplification yields \begin{equation*} \mathcal{E}_0([-1,1]; N) = E_0(\omega_n \cup \{-1, +1\}) = - \log \Delta_N([-1,1]), \end{equation*} where \begin{equation*} \Delta_N([-1,1]) = 2^{N(N-1)} N^N \left( \prod_{k=1}^{N-1} k^{3k} \right) \left( \prod_{k=N-1}^{2(N-1)} k^{-k} \right). \end{equation*} Hence, using \eqref{eq:logsum2}, \begin{equation*} \begin{split} \mathcal{E}_0([-1,1]; N) &= - N ( N - 1 ) \log 2 - N \log N + 3 \zetafcn^\prime( -1, 1 ) \\ &\phantom{=}- 3 \zetafcn^\prime( -1, N ) - \zetafcn^\prime( -1, N - 1 ) + \zetafcn^\prime( -1, 2N-1 ). \end{split} \end{equation*} Application of \eqref{eq:1st.HurwitzZeta.s.derivative.at.minus.1} and simplification gives the desired result. We used the following simplification (taking into account that Bernoulli numbers $B_k$ with odd integers $k \geq 3$ vanish): \begin{align*} \left(1 - 2^{-m} \right) \zetafcn( -m-1 -1 ) + 3 \zetafcn( -m-1, 0 ) &= - \left(1 - 2^{-m} \right) \frac{\bernoulliB_{m+2}(-1)}{m+2} - 3 \frac{B_{m+2}}{m+2} \\ &= (-1)^{m-1} \left( 1 - 2^{-m} + 4 \left( 1 - 2^{-(m+2)} \right) \frac{B_{m+2}}{m+2} \right). \end{align*} \end{proof}
1,116,691,501,374
arxiv
\section{Introduction} Predicting the behaviour of interacting electrons is a significant open problem. Most progress to date has been made at low energies where linearisation of the single-particle dispersion led to construction of Fermi\cite{NozieresBook} and, in one dimension, to Luttinger-liquid theories\cite{GiamarchiBook} in which the natural excitations are fermionic quasiparticles and hydrodynamic modes respectively. The only significant progress beyond the linear approximation has been achieved via the heavy impurity model, for Fermi\cite{Nozieres69,Nozieres69_2,NozieresDeDominicis} and Luttinger\cite{GlazmanReview12} liquids, showcasing threshold singularities drastically different from the low energy behaviour. In this paper we investigate one-dimensional (1D) fermions beyond the linear approximation where the natural many-body excitations form a hierarchical structure,\cite{TSFetal15} in sharp contrast with the Fermi quasiparticles or hydrodynamic modes. We obtain the dynamical structure factor, in addition to the already known spectral function, and construct an inductive proof for calculating the form factors that are necessary for the dynamical response functions of the spinless fermion model. Experimentally, we demonstrate control over the interaction energy a 1D wire manifested as a change of the ratio of the charge and spin velocities at low energy scales. We find a new structure resembling the second-level excitations, which dies rapidly away from the first-level mode in a manner consistent with a power law. \begin{figure}[h!] {\centering\includegraphics[width=0.87\columnwidth]{fig_1_scheme} } \protect\caption{\label{fig:fig_main} Regions of the energy-momentum plane dominated by two different principal regimes of the system (bottom): hydrodynamic modes of the Luttinger liquid (top-right) at low energies (marked with cyan colour in the bottom panel) and the hierarchy of modes (top-left) in the rest of the plane. } \end{figure} We analyse theoretically the dynamic response functions\textendash that probe the many-body excitations\textendash for spinless fermions with short-range interactions. Our approach is exact diagonalisation via Bethe ansatz methods: the eigenenergies are evaluated in the coordinate representation and form factors\textendash for the corresponding eigenstates\textendash are derived in the algebraic representation, via Slavnov's formula.\cite{Slavnov89} On the microscopic level the excitations arrange themselves into a hierarchy via their spectral weights\textendash given by the form factors\textendash with different powers of $\mathcal{R}^{2}/L^{2}$, where $\mathcal{R}$ is the particle-particle interaction radius and $L$ is the length of the system. As a result only small numbers of states\textendash out of an exponentially large Fock space of the many-body system\textendash carry the principal spectral power in representative regions on the energy-momentum plane, see Fig.\@ \ref{fig:fig_main}, allowing an analytical evaluation of the observables. At small energy this hierarchy crosses over to hydrodynamic behaviour, see Fig.\@ \ref{fig:fig_main}, that we illustrate by calculating the local density of states. At low energy it is suppressed in a power-law fashion according to the Tomonaga-Luttinger theory. Away from the Fermi point where the Lorentz invariance is reduced to Galilean by the parabolicity of the spectrum, the local density of states is dominated by the first(leading)-level excitations of the hierarchy. This produces a $1/\sqrt{\varepsilon}$ van Hove singularity, where $\varepsilon$ is the energy measured from the bottom of the conduction band. At even higher energies the second-level excitations produce another $1/\sqrt{\left|\varepsilon\right|}$ van Hove singularity on the other side of the band edge, in the forbidden for the non-interacting system region. Using this framework, we study response of the correlated system to adding/removing a particle in detail, given by the spectral function. The first-level excitations form a parabolic dispersion, like a single particle, with a mass renormalised by the Luttinger parameter $K$. \cite{TS14} The continuous spectrum of the second-level excitations produces a power-law line-shape around the first-level mode with a singular exponent $-1$. Around the spectral edges the second-level excitations give a power-law behaviour of the spectral function. For the hole edge the exponent calculated microscopically reproduces the prediction of the phenomenological heavy impurity model in one-dimension.\cite{GlazmanReview12} However, around the particle edge the second-level excitations give a power-law of a new type. Experimentally, momentum-resolved tunneling of electrons confined to a 1D geometry has been used to probe spin-charge separation in a Luttinger liquid.\cite{Yacoby02,Jompol09,Auslaender05,Tserkovnyak} This separation was observed to persist far beyond the energy range for which the Luttinger approximation is valid,\cite{Jompol09} showing the need for more sophisticated theories.\cite{Imambekov092} Particle-hole asymmetry has also been detected in relaxation processes.\cite{Yacoby10} In this paper we measure momentum-resolved tunneling of electrons in the upper layer of a GaAs-AlGaAs double-quantum-well structure from/to a 2D electron gas in the lower layer. This set-up probes the spectral function for spinful fermions. We observe well-resolved spin-charge separation at low energy with appreciable interaction strength---a distinct effect of the spinful generalisation of Luttinger liquid.\cite{GiamarchiBook} The ratio of charge and spin velocities is $v_{{\rm c}}/v_{{\rm s}}\approx1.8$.\cite{Jompol09} At high energy, in addition to the spin and charge curves, we can also resolve structure just above $k_{\rm F}$ that appears to be the edge of the second-level excitations. However, the amplitude decays rapidly and for higher $k$ we find no sign of the higher-level excitations, implying that their amplitude must have become at least three orders of magnitude weaker than for the parabola formed by the first-level excitations. The picture emerging out of these experimental results can only be explained---though only qualitatively---by the hierarchy that we study for spinless fermions. The rest of the paper is organised as follows. In Section II we describe the one-dimensional model of interacting spinless fermions introducing a short range cut-off via lattice. Section III contains a procedure of finding the many-body eigenenergy by means of the coordinate Bethe ansatz. In Section IV we evaluate the form factors needed for the dynamical response functions. We give a construction of the algebraic representation of Bethe ansatz (Subsection IVa) and evaluate the scalar product in this representation (Subsection IVb). We present a calculation of the form factors for the spectral function and the dynamical structure factor for a finite chain (Section IVc). We take the limit of long wavelengths deriving polynomial formulae for the form factors (Subsection IVd). Then, we analyse the obtained form factors establishing hierarchy of excitations (Subsection IVe). Finally we calculate the spectral function around the spectral edges (Subsection IVf). In Section V we illustrate the crossover to Luttinger liquid at low energy by evaluating the local density of states at all energy scales. Section VI describes experiments on momentum-conserved tunnelling of electrons in semiconductor wires. Section VII is dedicated to low energies and in Section VIII we analyse the measurements at high energies connecting the experiment with theory on spinless fermions developed in this paper. Figures below are marked with spinless and spinful logos (such as those in Fig.\@ \ref{fig:SF_hierarchy_states} and \ref{fig:sc_separation}, respectively) to indicate the structure of the paper visually. Appendix A contains details of the derivation of the Bethe equations in the algebraic representation. In Appendix B we derive the expectation value of the local density operator. \section{Model of spinless fermions} We study theoretically the model of interacting Fermi particles without spin in 1D, \begin{equation} H=\int_{-\frac{L}{2}}^{^{\frac{L}{2}}}dx\left(-\frac{1}{2m}\psi^{\dagger}\left(x\right)\Delta\psi\left(x\right)+UL\rho\left(x\right)^{2}\right),\label{eq:H} \end{equation} where the field operators $\psi\left(x\right)$ satisfy the Fermi commutation relations, $\left\{ \psi\left(x\right),\psi^{\dagger}\left(x'\right)\right\} =\delta\left(x-x'\right)$, $\rho\left(x\right)=\psi^{\dagger}\left(x\right)\psi\left(x\right)$ is the particle density operator, and $m$ is the bare mass of a single particle. Below we consider periodic boundary conditions, $\psi\left(x+L\right)=\psi\left(x\right)$, restrict ourselves to repulsive interaction $U>0$ only, and take $\hbar=1$. Non-zero matrix elements of the interaction term in Eq.\@ (\ref{eq:H}) require a finite range of the potential profile for Fermi particles. Here, we will introduce a lattice with next-neighbour interaction which lattice parameter and interaction radius is $\mathcal{R}$. The model in Eq.\@ (\ref{eq:H}) becomes \begin{equation} H=\sum_{j=-\frac{\mathcal{L}}{2}}^{\frac{\mathcal{L}}{2}}\left[\frac{-1}{2m}\left(\psi_{j}^{\dagger}\psi_{j+1}+\psi_{j}^{\dagger}\psi_{j-1}\right)+U\rho_{j}\rho_{j+1}\right],\label{eq:H_lattice} \end{equation} where $j$ is the site index on the lattice, the dimensionless length of the system is $\mathcal{L}=L/\mathcal{R}$, the operators obey $\left\{ \psi_{j},\psi_{j}^{\dagger}\right\} =\delta_{ij}$, and $\rho_{j}=\psi_{j}^{\dagger}\psi_{j}$. The long wavelength limit of the discrete model corresponds to the model in Eq.\@ (\ref{eq:H}) while the interaction radius $\mathcal{R}$ provides microscopically an ultraviolet cutoff in the continuum regime. For $N$-particle states of the lattice model we additionally impose the constraint of low particle density, $N/\mathcal{L}\ll1$, to stay within the conducting regime; a large occupancy $N\sim\mathcal{L}$ might lead to Wigner crystal physics at sufficiently strong interactions that would localise the system. This procedure is analogous to the point splitting regularisation technique \cite{vonDelftSchoellerReview} which is usually introduced within the framework of the Luttinger liquid mode in the linear regime. \section{Spectral properties} The model in Eq.\@ (\ref{eq:H_lattice}) can be diagonalised via the Bethe ansatz approach which is based on the observation that the eigenstates are superpositions of plain waves. This method is also called coordinate Bethe ansatz.\cite{KorepinBook} The eigenstates, following Ref.\@ \onlinecite{KorepinBook}, can be parameterised with sets of $N$ quasimomenta $k_{j}$, \begin{equation} \Psi=\sum_{\mathcal{P},j_{1}<\dots<j_{N}}e^{i\sum_{l}k_{P_{l}}j_{l}+i\sum_{l<l'}\varphi_{P_{l},P_{l'}}}\psi_{j_{1}}^{\dagger}\dots\psi_{j_{N}}^{\dagger}\left|\textrm{vac}\right\rangle.\label{eq:psiN_coordinates} \end{equation} Their corresponding eigenenergies, $H\Psi=E\Psi$, are $E=\sum_{j=1}^{N}\left(1-\cos k_{j}\right)/m.$ Here $\left|\textrm{vac}\right\rangle $ is the vacuum state, the scattering phases are fixed by the two-body scattering problem, \begin{equation} e^{i2\varphi_{ll'}}=-\frac{e^{i\left(k_{l}+k_{l'}\right)}+1+2mUe^{ik_{l}}}{e^{i\left(k_{l}+k_{l'}\right)}+1+2mUe^{ik_{l'}}}\label{eq:phi_llp} \end{equation} and $\sum_{\mathcal{P}}$ is a sum over all permutation of quasimomenta. The periodic boundary condition quantises the whole set of $N$ quasimomenta simultaneously, \begin{equation} \mathcal{L}k_{j}-2\sum_{l\neq j}\varphi_{jl}=2\pi I_{j}\label{eq:BA} \end{equation} where $I_{j}$ are sets of non-equal integer numbers. Generally, the system of equations in Eq.\@ (\ref{eq:BA}) has to be solved numerically to obtain the full spectral structure of the observables. However, in the long-wavelength regime the solutions can be evaluated explicitly. In this limit the scattering phases in Eq.\@ (\ref{eq:phi_llp}) are linear functions of quasimomenta, $2\varphi_{ll'}=\left(k_{l}-k_{l'}\right)/\left(1+\left(mU\right)^{-1}\right)+\pi$, which makes the non-linear system of Bethe ansatz equations in Eq. (\ref{eq:BA}) a linear system.\cite{TS14} Then, solving the linear system for $\mathcal{L}\gg1$ via the matrix perturbation theory up to the first subleading order in $1/\mathcal{L}$ we obtain \begin{equation} k_{j}=\frac{2\pi I_{j}}{\mathcal{L}-\frac{mUN}{mU+1}}-\frac{mU}{mU+1}\sum_{l\neq j}\frac{2\pi I_{l}}{\left(\mathcal{L}-\frac{mUN}{mU+1}\right)^{2}}.\label{eq:kj} \end{equation} Note that this calculation is valid for any interaction strength at low densities. The corresponding eigenenergy and total momentum (protected by the translational invariance of the system) are \begin{equation} E=\sum_{j}\frac{k_{j}^{2}}{2m}\label{eq:E_kj} \end{equation} and $P=\sum_{j}k_{j}$. The spectrum of the many body states is governed by the first term in Eq.\@ (\ref{eq:kj}). Reduction of the quantisation length in the denominator of the first term in Eq.\@ (\ref{eq:kj}) is an exclusion volume taken by the sum of interaction radii of all particles. Thus all $N$-particle eigenstates at an arbitrary interaction strength are given straightforwardly by the same sets of integer numbers $I_{j}$ as the free fermions' states, \emph{e.g.} the ground state corresponds to $I_{j}=-N/2\dots N/2$. For example, this result can be used to calculate the low energy excitations explicitly that define the input parameters of the Luttinger-liquid model, the velocity of the sound wave $v$ and of the Luttinger parameter $K$. The first pair of the particle-like excitations, when an extra electron is added just above the Fermi energy, have $I_{N+1}=N/2+1$ and $I_{N+1}=N/2+2$. The difference in their energies and momenta are $E_{2}-E_{1}=\left(2\pi\right)^{2}N/\left[2m\left(\mathcal{L}-\frac{mUN}{mU+1}\right)^{2}\right]$ and $P_{2}-P_{1}=2\pi/\mathcal{L}$. Evaluating the discrete derivative, which gives the slope of the dispersion around the Fermi energy, as $v=\left(E_{2}-E_{2}\right)/\left(P_{2}-P_{1}\right)$ we obtain \begin{equation} v=\frac{v_{\rm F}}{\left(1-\frac{NmU}{\mathcal{L}\left(1+mU\right)}\right)^{2}}\:\textrm{and}\:K=\left(1-\frac{NmU}{\mathcal{L}\left(1+mU\right)}\right)^{2},\label{eq:SF_Luttinger_params} \end{equation} where $v_{\rm F}=\pi N/\left(mL\right)$ is the Fermi velocity and the relation $vK=v_{\rm F}$ between the Luttinger parameters for Galilean invariant systems\cite{Haldane81} was used. \section{matrix elements} Now we turn to calculation of matrix elements. But first we need to select operators that corresponds to specific observables. Our interest lies in the dynamical response functions that correspond to adding/removing a single particle to/from a correlated system and to creating an electron-hole pair excitation out of the ground state of a correlated system. For example, the first type of dynamics can be realised in experiments using semiconductor nano-structures\cite{Yacoby02,Jompol09} where an electrical current, generated by electrons tunnelling into/from the nano-structure with their momentum and energy under controlled, probes the system. The response of the many-body system to a single-particle excitation at momentum $k$ and energy $\varepsilon$ is described by spectral function\cite{AGD} $A\left(k,\varepsilon\right)=-\textrm{Im}\left[\int dxdte^{i\left(kx-\varepsilon t\right)}G\left(x,0,t\right)\right]\textrm{sgn}\left(\varepsilon-\mu\right)/\pi$. Here $\mu$ is the chemical potential and $G\left(x,x',t\right)=-i\left\langle T\left(e^{-iHt}\psi\left(x\right)e^{iHt}\psi^{\dagger}\left(x'\right)\right)\right\rangle $ is Green function at zero temperature. In terms of the eigenstates the spectral function reads \begin{multline} A\left(k,\varepsilon\right)=L\sum_{f}\left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}\delta\left(\varepsilon-E_{f}+E_{0}\right)\delta\left(k-P_{f}\right)\\ +L\sum_{f}\left|\left\langle 0|\psi\left(0\right)|f\right\rangle \right|^{2}\delta\left(\varepsilon+E_{f}-E_{0}\right)\delta\left(k+P_{f}\right),\label{eq:A_continuum} \end{multline} where $E_{0}$ is the energy of the ground state $\left|0\right\rangle $, and $P_{f}$ and $E_{f}$ are the momenta and the eigenenergies of the eigenstates $\left|f\right\rangle $; all eigenstates are assumed normalised. Creation of an electron-hole pair out of the correlated state at zero temperature at momentum $k$ and energy $\varepsilon$ is described by dynamical structure factor\cite{AGD} $S\left(k,\varepsilon\right)=\int dxdte^{i\left(kx-\varepsilon t\right)}\left\langle \rho\left(x,t\right)\rho\left(0,0\right)\right\rangle $, where $\rho\left(x,t\right)=e^{-iHt}\rho\left(0\right)e^{iHt}$ is the density operator evolving under the Hamiltonian to time $t$ and the average $\left\langle \dots\right\rangle $ is taken over the ground state. In term of the eigenstates the dynamical structure factor reads \begin{equation} S\left(k,\varepsilon\right)=L\sum_{f}\left|\left\langle f|\rho\left(0\right)|0\right\rangle \right|^{2}\delta\left(\varepsilon-E_{f}\right)\delta\left(k-P_{f}\right).\label{eq:S_continuum} \end{equation} Thus, we will be analysing the expectation values of the local operators $\psi\left(0\right)$ and $\rho\left(0\right)$. To proceed with this calculation we will borrow the result from Ref.\@ \onlinecite{Kitaine99, Kitaine00} for Heisenberg chains. Our strategy is to perform the full calculation for the discrete model in Eq.\@ (\ref{eq:H_lattice}) obtaining the matrix elements of $\psi_{j}$ and $\rho_{j}$ as determinants. Then we will take the long wavelength limit to evaluate the form factors for the continuum model explicitly which will be the main technical result in the theoretical part of this paper. Below we will construct the algebraic form of Bethe ansatz, use the Slavnov's formula\cite{Slavnov89} to express the scalar product and the normalisation factors in this representation, and finally calculate the matrix elements of the local operators. \subsection{Algebraic Bethe ansatz} The wave function of the $N$-particle eigenstates are factorised in the algebraic representation which allows the general calculation of various scalar products between them. Here we will follow the construction in Ref.\@ \onlinecite{KorepinBook} for XXZ spins chains changing basis from 1/2-spins to spinless fermions. The so called $R$-matrix acts on a tensor product $V_{1}\otimes V_{2}$ space and depends on an auxiliary parameter $u$, where $V_{1}$ and $V_{2}$ are element-element subspaces each of which consists of two states $\left|0\right\rangle _{j}$ and $\left|1\right\rangle _{j}$. It is a solution of Yang-Baxter equation $R_{12}\left(u_{1}-u_{2}\right)R_{13}\left(u_{1}\right)R_{23}\left(u_{2}\right)=R_{23}\left(u_{2}\right)R_{13}\left(u_{1}\right)R_{12}\left(u_{1}-u_{2}\right)$. For the lattice model in Eq.\@ (\ref{eq:H_lattice}) the $R$-matrix reads \begin{multline} R_{12}=1-\left(1-b\left(u\right)\right)\left(c_{1}^{\dagger}c_{1}+c_{2}^{\dagger}c_{2}\right)\\ -2b\left(u\right)c_{1}^{\dagger}c_{1}c_{2}^{\dagger}c_{2}+c\left(u\right)\left(c_{1}^{\dagger}c_{2}+c_{2}^{\dagger}c_{1}\right) \end{multline} where \begin{equation} b\left(u\right)=\frac{\sinh\left(u\right)}{\sinh\left(u+2\eta\right)},\:c\left(u\right)=\frac{\sinh\left(2\eta\right)}{\sinh\left(u+2\eta\right)}.\label{eq:bc_def} \end{equation} Here $\eta$ is the interaction parameter, and the tensor product space is defined using fermionic basis $\left|0\right\rangle _{j}$ and $\left|1\right\rangle _{j}$ with corresponding fermionic operators $\left\{ c_{i},c_{j}^{\dagger}\right\} =\delta_{ij}$ that act in these bases as $c_{j}^{\dagger}\left|0\right\rangle _{j}=\left|1\right\rangle _{j}$. The latter will account for anticommuting nature of the lattice fermions on different sites in contrast to the commutation relation of the spin operators of a spin chain model.\cite{Korepin00, Sakai08} All further calculation are identical to spin chains where the anti-commutation relations of the Fermi particles are however automatically fulfilled. This approach is more convenient than direct mapping of the results for spin chains using Jordan-Wigner transformation.\cite{JordanWigner} A two-states subspace of $R$-matrix can be identified with the two-states fermionic subspace of the lattice site $j$ of the model in Eq.\@ (\ref{eq:H_lattice}). Then, the quantum version of the Lax operator (the so called $L$-matrix) can be defined as $L_{j}=R_{\xi j}$. In the auxiliary subspace $\xi$ its matrix and operator forms are \begin{multline} L_{j}=\left(\begin{array}{cc} \frac{\cosh\left(u-\eta\left(2\rho_{j}-1\right)\right)}{\cosh\left(u-\eta\right)} & -i\frac{\sinh2\eta c_{j}^{-}}{\cosh\left(u-\eta\right)}\\ -i\frac{\sinh2\eta c_{j}^{\dagger}}{\cosh\left(u-\eta\right)} & -\frac{\cosh\left(u+\eta\left(2\rho_{j}-1\right)\right)}{\cosh\left(u-\eta\right)} \end{array}\right),\\ =A^{j}\left(1-c_{\xi}^{\dagger}c_{\xi}\right)+c_{\xi}^{\dagger}C^{j}+B^{j}c_{\xi}+D^{j}c_{\xi}^{\dagger}c_{\xi}.\label{eq:L-matrix} \end{multline} Here the top left element element of the matrix is a transition between $\left|0\right\rangle _{\xi}$ and $\left\langle 0\right|_{\xi}$ states of the auxiliary subspace, $c_{j}$ and $\rho_{j}$ are the fermionic operators of the lattice model in Eq.\@ (\ref{eq:H_lattice}), and $A^{j},B^{j},C^{j},D^{j}$ label the matrix elements of $L_{j}$. The prefactor in front of $L_{j}$ was chosen such that for $u=i\pi/2-\eta$ it becomes a permutation matrix and for $\eta=0$ the $L$-operator is diagonal. By construction the $L$-operator satisfies algebra generated by Yang-Baxter equation, \begin{equation} R\left(u-v\right)\left(L_{j}\left(u\right)\otimes L_{j}\left(v\right)\right)=\left(L_{j}\left(v\right)\otimes L_{j}\left(u\right)\right)R\left(u-v\right).\label{eq:YB4L} \end{equation} The entries give commutation relations between the matrix elements of $L$-matrix. Here we write down three of them that will be used later, \begin{equation} \left\{ B_{u}^{j},C_{v}^{j}\right\} =\frac{c\left(u-v\right)}{b\left(u-v\right)}\left(D_{v}^{j}A_{u}^{j}-D_{u}^{j}A_{v}^{j}\right),\label{BC} \end{equation} \begin{equation} A_{u}^{j}C_{v}^{j}=\frac{1}{b\left(v-u\right)}C_{v}^{j}A_{u}^{j}-\frac{c\left(v-u\right)}{b\left(v-u\right)}C_{u}^{j}A_{v}^{j},\label{eq:AC} \end{equation} \begin{equation} D_{u}^{j}C_{v}^{j}=-\frac{1}{b\left(u-v\right)}C_{v}^{j}D_{u}^{j}+\frac{c\left(u-v\right)}{b\left(u-v\right)}C_{u}^{j}D_{v}^{j}.\label{eq:DC} \end{equation} These relations can be also be checked explicitly by direct use of the definition in Eq.\@ (\ref{eq:L-matrix}) and the Fermi commutation relations. The transition matrix $T\left(u\right)$ for a chain with $\mathcal{L}$ sites\textendash the so called monodromy matrix\textendash can be defined similarly to the classical problem as \begin{equation} T\left(u\right)=\sum_{j=1}^{\mathcal{L}}L_{j}\left(u\right).\label{eq:T-matrix} \end{equation} If all single-site $L$-matrices satisfy Eq.\@ (\ref{eq:YB4L}) then the $T$-matrix also satisfies the same Yang-Baxter equation, e.g. see proof in Ref.\@ \onlinecite{KorepinBook}. Therefore the matrix elements of $T=A\left(1-c_{\xi}^{\dagger}c_{\xi}\right)+c_{\xi}^{\dagger}C+Bc_{\xi}+Dc_{\xi}^{\dagger}c_{\xi}$ in the 2x2 auxiliary space $\xi$ obey the same commutation relations in Eqs. (\ref{BC}-\ref{eq:DC}). The transfer matrix for the whole chain, \begin{equation} \tau=\textrm{str}T=A\left(u\right)-D\left(u\right),\label{eq:transfer_matrix} \end{equation} is the super trace of $T$-matrix due to the fermionic definition of the auxiliary space.\cite{Korepin00, Sakai08} The latter gives a family of commuting matrices $\left[\tau\left(u\right),\tau\left(v\right)\right]=0$, which contain all conserved quantities of the problem including the Hamiltonian. The vacuum state $\left|0\right\rangle$\textendash in the Fock space of the model in Eq.\@ (\ref{eq:H_lattice})\textendash is an eigenstate of the transfer matrix $\tau$. The corresponding eigenvalue, $\tau\left(u\right)\left|0\right\rangle =\left(a\left(u\right)-d\left(u\right)\right)\left|0\right\rangle $, is the difference of the eigenvalues of the $A$ and $D$ operators which can be obtained directly by use of the definitions in Eqs.\@ (\ref{eq:L-matrix}, \ref{eq:T-matrix}). Noting that for $\mathcal{L}=2$ Eq.\@ (\ref{eq:T-matrix}) gives $A\left(u\right)\left|0\right\rangle =a_{1}\left(u\right)a_{2}\left(u\right)\left|0\right\rangle $ and $D\left(0\right)\left|0\right\rangle =d_{1}\left(u\right)d_{2}\left(u\right)\left|0\right\rangle $, where $a_{1}\left(u\right)=a_{2}\left(u\right)=\cosh\left(u+\eta\right)/\cosh\left(u-\eta\right)$ and $d_{1}\left(u\right)=d_{2}\left(u\right)=1$, and generalising this observation for arbitrary $\mathcal{L}$ one obtains \begin{equation} a\left(u\right)=\frac{\cosh\left(u+\eta\right)^{\mathcal{L}}}{\cosh\left(u-\eta\right)^{\mathcal{L}}},\quad\textrm{and}\quad d\left(u\right)=1.\label{eq:vacuum_ad} \end{equation} A general state of $N$ particles'\textendash Bethe state\textendash is constructed by applying the operator $C\left(u\right)$ $N$ times with different values of the auxiliary variable $u_{j}$, \begin{equation} \Psi=\prod_{j=1}^{N}C\left(u_{j}\right)\left|0\right\rangle ,\label{eq:psiN_algebraic} \end{equation} where a set of $N$ values $u_{j}$ corresponds to $N$ quasimomenta $k_{j}$ in coordinate representation of Bethe states in Eq.\@ (\ref{eq:psiN_coordinates}). The state in Eq.\@ (\ref{eq:psiN_algebraic}) with an arbitrary set of $u_{j}$ is not an eigenstate of the transfer matrix $\tau$. For instance, it can be seen by commuting the operators $A$ and $D$ from left to right through all operators $C\left(u_{j}\right)$ that generates many different states according to the commutation relations in Eqs.\@ (\ref{eq:AC},\ref{eq:DC}). However the contribution of all of the states that are non-degenerate with $\Psi$ can made to be zero by choosing particular sets of $u_{j}$ that satisfy the following set of equations \begin{equation} \frac{a\left(u_{j}\right)}{d\left(u_{j}\right)}=\left(-1\right)^{N-1}\prod_{l=1\neq j}^{N}\frac{b\left(u_{l}-u_{j}\right)}{b\left(u_{j}-u_{l}\right)}\label{eq:BAequation_ABA} \end{equation} (see Appendix A for details). Under the substitution of the vacuum eigenvalues $a\left(u_{j}\right)$ and $d\left(u_{j}\right)$ of $A$ and $D$ operators from Eq.\@ (\ref{eq:vacuum_ad}) and $b\left(u_{l}-u_{j}\right)$ \textendash{} which define the commutation relations Eqs. (\ref{eq:AC},\ref{eq:DC}) \textendash{} from Eq. (\ref{eq:bc_def}) this so called eigenvalue equation above becomes \begin{equation} \frac{\cosh\left(u_{j}-\eta\right)^{\mathcal{L}}}{\cosh\left(u_{j}+\eta\right)^{\mathcal{L}}}=\left(-1\right)^{N-1}\prod_{l=1\neq j}^{N}\frac{\sinh\left(u_{j}-u_{l}-2\eta\right)}{\sinh\left(u_{j}-u_{l}+2\eta\right)}.\label{eq:eigenvalue_ABA} \end{equation} Thus all the sets of $u_{j}$ that satisfy the above equation give eigenstates of the transfer matrix in the representation of Eq.\@ (\ref{eq:psiN_algebraic}) with the corresponding eigenvalues $\tau\left(u\right)\Psi=\mathcal{T}\left(u\right)\Psi$ where \begin{equation} \mathcal{T}\left(u\right)=a\left(u\right)\prod_{j=1}^{N}\frac{1}{b\left(u_{j}-u\right)}-\left(-1\right)^{N}d\left(u\right)\prod_{j=1}^{N}\frac{1}{b\left(u-u_{j}\right)}.\label{eq:tau_eigenvalue} \end{equation} This eigenvalue equation in the algebraic framework is the direct analog of the Bethe ansatz equation (\ref{eq:BA}) in the coordinate representation. Direct mapping between the two is done by the substitution of \begin{equation} e^{ik_{j}}=\frac{\cosh\left(u_{j}-\eta\right)}{\cosh\left(u_{j}+\eta\right)},\quad mU=-\cosh2\eta,\label{eq:ABA_to_CBA_mapping} \end{equation} in Eq.\@ (\ref{eq:BA}) and by taking its exponential. !The original lattice Hamiltonian can be obtained from the transfer matrix $\tau(u)$ that contains all of the conserved quantities of the problem. Logarithmic derivatives of $\tau(u)$ give the global conservation laws by means of the so called trace identities, see Ref.\@ \onlinecite{KorepinBook}. The linear coefficient in the Taylor series around the point $u=\frac{i\pi}{2}-\eta$ is proportional to the Hamiltonian itself. After restoring the correct prefactor the expression reads \begin{equation} H=-\frac{\sinh\eta}{2m}\partial_{u}\left.\ln\tau\left(u\right)\right|_{u=\frac{i\pi}{2}-\eta}. \end{equation} Substitution of the interaction parameter $\eta$ from Eq.\@ (\ref{eq:ABA_to_CBA_mapping}) in terms of the particle-particle interaction constant, $U$, in to the right hand side of the above relation recovers the lattice model in Eq.\@ (\ref{eq:H_lattice}). \subsection{Scalar product} The basic quantity, which calculations of expectation values will be based on, is the scalar product of two wave functions. A general way of evaluating it is the commutation relations in Eqs. (\ref{BC}-\ref{eq:DC}) and the vacuum expectation values of the $A$ and $D$ operators. The result of such a calculation simplifies greatly if one of the Bethe states is an eigenstate of the transfer matrix $\tau(u)$, as was first shown by Slavnov.\cite{Slavnov89} Then the same result was rederived in Ref.\@ \onlinecite{Kitaine99, Kitaine00} using the so-called factorising $F$-matrix,\cite{MailletSanchez00} which is a representation of a Drinfeld twist.\cite{Drinfeld86} The latter will not be used in this Subsection but it will be needed later in calculations of the matrix elements of the local operators. Let $\left|\mathbf{u}\right\rangle =\prod_{j=1}^{N}C\left(u_{j}\right)\left|0\right\rangle $ be an eigenstate of the transfer matrix so that $N$ parameters $u_{j}$ satisfy the Bethe equation in Eq.\@ (\ref{eq:eigenvalue_ABA}). And let $\left\langle \mathbf{v}\right|=\left\langle 0\right|\prod_{j=1}^{N}B\left(v_{j}\right)$ be another Bethe state parametrised by a set of $N$ arbitrary values $v_{j}$. The scalar product of these two states $\langle\mathbf{v}|\mathbf{u}\rangle$ can be evaluated by commuting each operator $B\left(v_{j}\right)$ though the product of $C\left(u_{j}\right)$ operators using the commutation relation in Eq.\@ (\ref{BC}), which generates the $A$ and $D$ operators with all possible values of $u_j$ and $v_j$. They, in turn, have also to be commuted to the right through the remaining products of the $C\left(u_{j}\right)$ operators. Finally products of the $A$ and $D$ operators, which act upon the vacuum state, just give products of their vacuum eigenvalues $a(u_j), d(u_j)$ and $a(v_j), d(v_j)$ according to Eq.\@ (\ref{eq:vacuum_ad}). The resulting sums of products can be written, using the relation between $u_j$ in Eq.\@ (\ref{eq:eigenvalue_ABA}), in a compact form as a determinant of an $N\times N$ matrix\cite{Slavnov89} \begin{equation} \left\langle \mathbf{v}|\mathbf{u}\right\rangle =\frac{\prod_{i,j=1}^{N}\sinh\left(v_{j}-u_{i}\right)}{\prod_{j<i}\sinh\left(v_{j}-v_{i}\right)\prod_{j<i}\sinh\left(u_{j}-u_{i}\right)}\det\hat{S}\label{eq:scalar_product} \end{equation} where the matrix elements are $S_{ab}=\partial_{u_{a}}\mathcal{T}\left(v_{b}\right)$. Under substitution of the eigenvalues of the transfer matrix from Eq.\@ (\ref{eq:tau_eigenvalue}) these matrix elements read in explicit form as \begin{widetext} \begin{equation} S_{ab}=-\frac{\cosh^{L}\left(v_{b}+\eta\right)}{\cosh^{L}\left(v_{b}-\eta\right)}\frac{\sinh\left(2\eta\right)}{\sinh^{2}\left(u_{a}-v_{b}\right)}\prod_{j=1\neq a}^{N}\frac{\sinh\left(u_{j}-v_{b}+2\eta\right)}{\sinh\left(u_{j}-v_{b}\right)}-\left(-1\right)^{N}\frac{\sinh\left(2\eta\right)}{\sinh^{2}\left(v_{b}-u_{a}\right)}\prod_{j=1\neq a}^{N}\frac{\sinh\left(v_{b}-u_{j}+2\eta\right)}{\sinh\left(v_{b}-u_{j}\right)}.\label{eq:scalar_product_matrix_elements} \end{equation} \end{widetext} For $N=1$ the result in Eqs. (\ref{eq:scalar_product}, \ref{eq:scalar_product_matrix_elements}) follows directly from Eq.\@ (\ref{BC}). For arbitrary $N$ the proof is more complicated: it employs the residue formula\cite{Slavnov89} (the function $\left\langle \mathbf{v}|\mathbf{u}\right\rangle$ has first order poles when $v_{i}\rightarrow u_{j}$) and the recurrent relation for the scalar product of $N+1$ particles in terms of the scalar product of $N$ particles, see also details in Ref.\@ \onlinecite{KorepinBook}. The normalisation factor of Bethe states can be obtained from Eq. (\ref{eq:scalar_product}) by taking the limit $\mathbf{v}\rightarrow\mathbf{u}$. The first order singularities, $\left(v_{b}-u_{b}\right)^{-1}$, in the off-diagonal matrix elements Eq.\@ (\ref{eq:scalar_product_matrix_elements}) are cancelled by zeros in the numerator in Eq.\@ (\ref{eq:scalar_product}). The diagonal $a=b$ matrix elements contain second order singularities $\left(v_{b}-u_{b}\right)^{-2}$ for $\mathbf{v}\rightarrow\mathbf{u}$. However, the numerator also becomes zero when $\mathbf{v}\rightarrow\mathbf{u}$ in the leading order. Its expansion up to the first subleading order cancels the second order singularity of the denominator giving a finite expression for the matrix elements in the limit. The normalisation factor is found to be \begin{equation} \left\langle \mathbf{u}|\mathbf{u}\right\rangle =\sinh^{N}\left(2\eta\right)\prod_{i\neq j=1}^{N}\frac{\sinh\left(u_{j}-u_{i}+2\eta\right)}{\sinh\left(u_{j}-u_{i}\right)}\det\hat{Q} , \label{eq:norm} \end{equation} where the matrix elements are\begin{widetext} \begin{equation} Q_{ab}=\begin{cases} -\mathcal{L}\frac{\sinh2\eta}{\cosh\left(u_{a}+\eta\right)\cosh\left(u_{a}-\eta\right)}-\sum_{j\neq a}\frac{\sinh4\eta}{\sinh\left(u_{a}-u_{j}-2\eta\right)\sinh\left(u_{a}-u_{j}+2\eta\right)} & ,\; a=b,\\ \frac{\sinh4\eta}{\sinh\left(u_{b}-u_{a}+2\eta\right)\sinh\left(u_{b}-u_{a}-2\eta\right)} & ,\; a\neq b. \end{cases}\label{eq:Qab} \end{equation} \end{widetext} The last formula was originally derived by Gaudin using quantum mechanical identities in the coordinate representation of Bethe ansatz.\cite{Gaudin1981} Mapping of the resulting expression in Ref.\@ \onlinecite{Gaudin1981} to the algebraic representation by means of Eq.\@ (\ref{eq:ABA_to_CBA_mapping}) gives directly the result in Eqs. (\ref{eq:scalar_product}, \ref{eq:scalar_product_matrix_elements}) with a different prefactors due to different normalisation factors in the definitions of the states in Eq.\@ (\ref{eq:psiN_coordinates}) and of the states in Eq.\@ (\ref{eq:psiN_algebraic}). We will use the algebraic form in Eq.\@ (\ref{eq:psiN_algebraic}) for the calculation of the local matrix elements below. \subsection{Expectation values of local operators} Operators of the algebraic Bethe ansatz in Eqs. (\ref{eq:L-matrix}, \ref{eq:T-matrix}) are non-local in the basis of the original fermionic operators of the lattice model in Eq.\@ (\ref{eq:H_lattice}). Thus, the first non-trivial problem in calculating the matrix elements of the local operators $\psi_{j}^{\dagger}$ and $\rho_1$ in the algebraic representation of Bethe states in Eq.\@ (\ref{eq:psiN_algebraic}) is expressing the operators of our interest in terms of the non-local $A,B,C$ and $D$ operators from Eqs. (\ref{eq:L-matrix}, \ref{eq:T-matrix}). Alternatively these Bethe operators can be expressed in terms of the local operators of the lattice model. The latter approach is much more complicated since the product of matrices in Eq.\@ (\ref{eq:T-matrix}) is a large sum (exponential in the number of sites in the chain) restricting severely the ability to do explicit calculations using the fermionic representation in practice. An alternative way was found by constructing the $F$-matrix representation of a Drinfeld twist.\cite{MailletSanchez00} In the $F$-basis the monodromy matrix in Eq.\@ (\ref{eq:T-matrix}) becomes quasi-local, i.e. its diagonal elements $A$ and $D$ become direct products of diagonal matrices on each site over all sites of the chain and the off-diagonal $B$ and $C$ are single sums over such direct products. Direct calculations become much easier in this basis. Specifically, analysis of $A,B,C,D$ operators leads to a simple result for representing the $\psi_{j}$ operator in terms of algebraic Bethe ansatz operators, which then is shown to be basis independent,\cite{Kitaine99, Kitaine00} \begin{equation} \psi_{j}^{\dagger}=\tau^{j-1}\left(\frac{i\pi}{2}-\eta\right)C\left(\frac{i\pi}{2}-\eta\right)\mathcal{\tau}^{\mathcal{L}-j}\left(\frac{i\pi}{2}-\eta\right).\label{eq:psij_aba} \end{equation} Here $\tau\left(u\right)=A\left(u\right)-D\left(u\right)$ is the super trace of the monodromy matrix and $C\left(u\right)$ is its matrix element. The transfer matrices in the right hand side of the above equation give only a phase prefactor in the expectation values with respect to the Bethe states in Eq.\@ (\ref{eq:psiN_algebraic}). Let $\left|\mathbf{u}\right\rangle $ be an eigenstate of the transfer matrix with $N$ particles, let $\left|\mathbf{v}\right\rangle $ be an eigenstate with $N+1$ particles, and let us consider the case of $j=1$. Acting with the $\tau^{\mathcal{L}-1}\left(i\pi/2-\eta\right)$ operator on the eigenstates $\left|\mathbf{u}\right\rangle $ gives the eigenvalue $\prod_{j=1}^{N}\cosh^{\mathcal{L}-1}\left(u_{j}-\eta\right)/\cosh^{\mathcal{L}-1}\left(u_{j}+\eta\right)$ according to Eq.\@ (\ref{eq:tau_eigenvalue}). Then, using the mapping to the coordinate representation in Eq.\@ (\ref{eq:ABA_to_CBA_mapping}) and the Bethe equation in the form of Eq.\@ (\ref{eq:BA}), this eigenvalue can be expressed as $\exp\left[i P_u \left(\mathcal{L}-1\right)\right]$ where $P_u$ is the total momentum of the state $u_j$, a quantum number. Similar phase factors for $j\neq 1$ are evaluated in an analogous way and each of them cancels out under modulus square in the form factor in Eq.\@ (\ref{eq:A_continuum}) making the local form factors independent of $j$ in full accord with the translational invariance of the system and the observable in Eq.(\ref{eq:A_continuum}). Thus we will only calculate the value of $\left\langle \mathbf{v}|\psi_{1}^{\dagger}|\mathbf{u}\right\rangle $. Since $C\left(\frac{i\pi}{2}-\eta\right)\prod_{j=1}^{N}C\left(u_{j}\right)\left|0\right\rangle $ is also a Bethe state $\left|\frac{i\pi}{2}-\eta,u_{j}\right\rangle $, though it is not an eigenstate, the expectation value can be calculated using the result for the scalar product $\left\langle \mathbf{v}|\psi_{j}^{\dagger}|\mathbf{u}\right\rangle =\left\langle \mathbf{v}|\frac{i\pi}{2}-\eta,u_{j}\right\rangle $. Substituting $\frac{i\pi}{2}-\eta,u_{j}$ in Eqs. (\ref{eq:scalar_product},\ref{eq:scalar_product_matrix_elements}) explicitly one obtains\begin{widetext} \begin{equation} \left\langle \mathbf{v}|\psi_{1}^{\dagger}|\mathbf{u}\right\rangle =\left(-1\right)^{N+1}i\frac{\prod_{j=1}^{N+1}\cosh\left(v_{j}-\eta\right)}{\prod_{j=1}^{N}\cosh\left(u_{j}+\eta\right)}\frac{\sinh^{N+1}\left(2\eta\right)\det\hat{M}}{\prod_{j<i=2}^{N}\sinh\left(u_{j}-u_{i}\right)\prod_{j<i=2}^{N+1}\sinh\left(v_{j}-v_{i}\right)} \; ,\label{eq:psi_1_ABA} \end{equation} where the matrix elements are \begin{eqnarray} M_{ab} & = & \frac{\left(-1\right)^{N-1}}{\sinh\left(u_{b}-v_{a}\right)}\left(\prod_{j=1\neq b}^{N}\frac{\sinh\left(u_{b}-u_{j}+2\eta\right)}{\sinh\left(u_{b}-u_{j}-2\eta\right)}\prod_{j=1\neq a}^{N+1}\sinh\left(u_{b}-v_{j}-2\eta\right)+\prod_{j=1\neq a}^{N+1}\sinh\left(u_{b}-v_{j}+2\eta\right)\right) \; , \label{eq:Mab} \end{eqnarray} \end{widetext}for $b<N+1$, and \begin{eqnarray} M_{ab} & = & \frac{1}{\cosh\left(v_{a}-\eta\right)\cosh\left(v_{a}+\eta\right)}\; , \label{eq:MaN1} \end{eqnarray} for $b=N+1$. Here the Bethe equation in Eq.\@ (\ref{eq:eigenvalue_ABA}) was used to express $a\left(v_{j}\right)/d\left(v_{j}\right)$ in the matrix elements, and some factors in the matrix elements and the overall prefactor cancel out. This result can be checked by numerical evaluation of the sums over spacial variables using the coordinate representation in Eq.\@ (\ref{eq:psiN_coordinates}) for a small number of particles $N=1,2,3$ which we have done. The determinant results in Eqs.\@ (\ref{eq:norm}, \ref{eq:scalar_product}, \ref{eq:psi_1_ABA}) can be checked by numerical evaluation of the sums over spacial variables using the coordinate representation in Eq.\@ (\ref{eq:psiN_coordinates}). However the latter summation over many coordinates has factorial complexity which already limits numerical calculations to a few particles on chains of a few dozens sites. The results of the algebraic Bethe ansatz calculations have a power-law complexity that allows general studies, at least numerically, of systems with hundreds of particles on arbitrary long chains without making any approximations, e.g. the studies of correlation functions in one-dimensional systems in Refs. \onlinecite{JS05,JS05_SM, JS07,Pereira06,Pereira09,Links03}. \subsection{The long wavelength limit} We now turn to the evaluation of the long wavelength limit for matrix elements in the determinant form with the aim of calculating the determinants explicitly. The resulting expressions will then be used to study physical observables. Such an analysis is more convenient in the coordinate representation. For small $k_{j}$ the non-linear mapping in Eq.\@ (\ref{eq:ABA_to_CBA_mapping}) becomes linear, similarly to Bethe equation in Eq.\@ (\ref{eq:BA}) in this limit. Then, a simple inversion of the linear function gives \begin{equation} u_{j}=\frac{i}{2}\sqrt{\frac{mU+1}{mU-1}}k_{j}\quad\textrm{and}\quad\eta=-\frac{1}{2}\textrm{acosh}\left(mU\right).\label{eq:ABA_CBA_lwl} \end{equation} Note that $\left|u_{j}\right|$ and $k_{j}$ are simultaneously much smaller than one, while the interaction strength $U$ can be of an arbitrary magnitude. We start from the expansion of the normalisation factor in Eq.\@ (\ref{eq:norm}) up to the leading non-vanishing order in the quasimomenta. We first substitute Eq.\@ (\ref{eq:ABA_CBA_lwl}) in the matrix elements in Eqs. (\ref{eq:Qab}), then expand them up the leading non-vanishing order in $k_{j}\ll1$, and obtain the diagonal matrix elements as follows, \begin{equation} Q_{aa}=2\mathcal{L}\sqrt{\frac{mU-1}{mU+1}}-\frac{2\left(N-1\right)mU}{\sqrt{m^{2}U^{2}-1}} \; , \end{equation} and \begin{equation} Q_{ab}=\frac{2mU}{\sqrt{m^{2}U^{2}-1}} \; , \end{equation} for $a\neq b$. The off-diagonal matrix elements are small compared to the diagonal entries as $Q_{ab}/Q_{aa}\sim1/\mathcal{L}$ so the leading contribution to the determinant is accumulated on the diagonal. Also expanding the prefactor of Eq.\@ (\ref{eq:norm}) in small $k_{j}$ we obtain the following expression for the normalisation in the long wavelength limit, \begin{equation} \left\langle \mathbf{k}|\mathbf{k}\right\rangle =\frac{2^{N^{2}}\left(-1\right)^{N}\left(1-mU\right)^{N^{2}}\left(\mathcal{L}-\frac{mUN}{mU+1}\right)^{N}}{i^{N\left(N-1\right)}\prod_{i\neq j}\left(k_{j}-k_{i}\right)}\label{eq:norm_lwl} \end{equation} where $k_{j}$ are quasimomenta in the coordinate representation of Bethe ansatz. Our primary interest lies in the spectral function which contains the local matrix element of $\psi^{\dagger}_j$ operators so here we will focus on the determinant result in Eq.\@ (\ref{eq:psi_1_ABA}). Similarly to the calculation of the normalisation factor we substitute Eq.\@ (\ref{eq:ABA_CBA_lwl}) into Eq.\@ (\ref{eq:Mab}), which however becomes zero in the zeroth order in $k_{j}$. Expanding it up to linear order in $k_{j}$ we obtain \begin{equation} M_{ab}=2mU\left(m^{2}U^{2}-1\right)^{\frac{N-1}{2}}\frac{\sum_{j=1}^{N}k_{j}^{u}-\sum_{j=1\neq a}^{N+1}k_{j}^{v}}{k_{b}^{u}-k_{a}^{v}} \end{equation} for $b<N+1$, where $\Delta P=\sum_{j}k_{j}^{u}-\sum_{j}k_{j}^{v}$ is the difference of two conserved quantities, the momenta of two states $\mathbf{k}^{u}$ and $\mathbf{k}^{v}$. The matrix elements in Eq. (\ref{eq:MaN1}) are already non-zero in the zeroth order in $k_{j}$ giving \begin{equation} M_{ab}=\frac{2}{mU+1} \;, \end{equation} for $b=N+1$. Also expanding the prefactor in Eq.\@ (\ref{eq:psi_1_ABA}) and rearranging the expressions by taking a common factor out of the matrix elements we obtain \begin{multline} \left\langle \mathbf{k}^{v}|\psi^{\dagger}\left(0\right)|\mathbf{k}^{u}\right\rangle =\left(-1\right)^{N+1}i^{N^{2}}2^{N^{2}+N+\frac{1}{2}}\\1 \times\frac{\left(mU-1\right)^{N^{2}+\frac{1}{2}}m^{N}U^{N}\mathcal{D}}{\prod_{j<i}^{N}\left(k_{j}^{u}-k_{i}^{u}\right)\prod_{j<i}^{N+1}\left(k_{j}^{v}-k_{i}^{v}\right)}\label{eq:psi_1_lwl} \end{multline} where the entries of the matrix under the determinant, $\mathcal{D}=\det\hat{\mathcal{M}}$, for $b<N+1$ are \begin{equation} \mathcal{M}_{ab}=\frac{\Delta P+k_{a}^{v}}{k_{b}^{u}-k_{a}^{v}}\quad\textrm{and}\quad\mathcal{M}_{a,N+1}=1.\label{eq:cMab} \end{equation} All matrix elements are of the the same order so the determinant in Eq.\@ (\ref{eq:psi_1_lwl}) is a sum of a large number of terms unlike the normalisation factor in Eq.\@ (\ref{eq:norm_lwl}). Doing the summation we find an explicit expression in the form of a fraction of two polynomials in quasimomenta of the initial and the final states, \begin{multline} \mathcal{D}=\left(-1\right)^{N+1}\frac{\prod_{j}\left(\Delta P+k_{j}^{u}\right)}{\prod_{i,j}\left(k_{j}^{v}-k_{i}^{u}\right)}\\ \prod_{j<i}^{N}\left(k_{j}^{u}-k_{i}^{u}\right)\prod_{j<i}^{N+1}\left(k_{j}^{v}-k_{i}^{v}\right).\label{eq:cD} \end{multline} For $N=1$ the result above is evaluated straightforwardly as a determinant of a $2 \times 2$ matrix with the matrix elements in Eq.\@ (\ref{eq:cMab}). For arbitrary $N$ we prove it by induction. Using the Laplace development on the $N+1$ row, the determinant for $N+1$ particles can be expressed as a sum of minors given, in turn, by determinants for $N$ particles, $\mathcal{D}_{N+1}=\sum_{a=1}^{N+2}\left(-1\right)^{N+1+a}\mathcal{M}_{a,N+1}\textrm{minor}_{a,N+1}$, which \textendash{} let us assume for purposes of the inductive method \textendash{} are given by Eq.\@ (\ref{eq:cD}) \begin{multline} \textrm{minor}_{a,N+1}=\left(-1\right)^{N+1}\frac{\prod_{j=1}^{N}\left(\Delta P+k_{j}^{u}\right)}{\prod_{i=1,j=1\neq a}^{N,N+2}\left(k_{j}^{v}-k_{i}^{u}\right)}\\ \prod_{j<i}^{N}\left(k_{j}^{u}-k_{i}^{u}\right)\prod_{j<i\neq a}^{N+2}\left(k_{j}^{v}-k_{i}^{v}\right) \end{multline} Here $\mathcal{M}_{a,N+1}$ are given by the matrix elements in Eq. (\ref{eq:cMab}), $N$ quasimomenta $k_{j}^{u}$ are labeled by $j=1\dots N$, and $N+1$ quasimomenta $k_{j}^{v}$ are labeled by $j=1\dots N+2$ with $a^{th}$ elements excluded. Under taking common factor in front of the sum, the determinant for $N+1$ particles becomes \begin{widetext} \begin{multline} \mathcal{D}_{N+1}=\left(-1\right)^{N+2}\frac{\prod_{j=1}^{N+1}\left(\Delta P+k_{j}^{u}\right)\prod_{j<i}^{N+1}\left(k_{j}^{u}-k_{i}^{u}\right)\prod_{j<i}^{N+2}\left(k_{j}^{v}-k_{i}^{v}\right)}{\prod_{i,j}\left(k_{j}^{v}-k_{i}^{u}\right)}\\ \times\frac{1}{\Delta P+k_{N+1}^{u}}\sum_{a=1}^{N+2}\frac{\left(\Delta P+k_{a}^{v}\right)\prod_{j=1\neq a}^{N+2}\left(k_{j}^{v}-k_{N+1}^{u}\right)\prod_{j=1}^{N}\left(k_{j}^{u}-k_{a}^{v}\right)}{\prod_{j=1}^{N}\left(k_{j}^{u}-k_{N+1}^{u}\right)\prod_{j=1\neq a}^{N+2}\left(k_{j}^{v}-k_{a}^{v}\right)}.\label{eq:cDN1} \end{multline} \end{widetext}The sum in the above expression gives, by direct calculation, $\sum_{a=1}^{N+2}\dots=\Delta P+k_{N+1}^{u}$ which makes the whole second line unity. The determinant is equal to the first line of Eq.\@ (\ref{eq:cDN1}) which is also equal to the result in Eq.\@ (\ref{eq:cD}) for $N+1$ particles. Thus we obtained the same result for $N+1$ particles starting from Eq.\@ (\ref{eq:cD}) for $N$ particles. Hence, it is proved by induction. Finally, the form factor in the Eq.\@ (\ref{eq:A_continuum}) is the modulus squared of Eq.\@ (\ref{eq:psi_1_lwl}). Normalising the initial and the final state wave functions using Eq.\@ (\ref{eq:norm_lwl}) as $\left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=\left|\left\langle \mathbf{k}^{f}|\psi^{\dagger}\left(0\right)|\mathbf{k}^{0}\right\rangle \right|^{2}\left\langle \mathbf{k}^{f}|\mathbf{k}^{f}\right\rangle ^{-1}\left\langle \mathbf{k}^{0}|\mathbf{k}^{0}\right\rangle ^{-1}$ we obtain \begin{multline} \left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=\frac{Z^{2N}}{\mathcal{L}}\frac{\prod_{j}^{N}\left(k_{j}^{0}-P_{f}\right)^{2}}{\prod_{i,j}^{N,N+1}\left(k_{j}^{f}-k_{i}^{0}\right)^{2}}\\ \prod_{i<j}^{N}\left(k_{j}^{0}-k_{i}^{0}\right)^{2}\prod_{i<j}^{N+1}\left(k_{j}^{f}-k_{i}^{f}\right)^{2},\label{eq:FF_N} \end{multline} where $Z=mU/\left(mU+1\right)/\left(\mathcal{L}-NmU/\left(1+mU\right)\right)$, $k_{j}^{f}$ and $k_{j}^{0}$ are the quasimomenta of the eigenstate $\left|f\right\rangle $ and the ground state $\left|0\right\rangle $, and $P_{0}=0$ for the ground state. Calculation of $\left\langle f|\rho\left(0\right)|0\right\rangle $ is done in a similar way by expressing the local density operator $\rho_{1}$, within the framework of the lattice model, in terms of the algebraic Bethe ansatz operators $A$, $B$, $C$, $D$ and, then, by using the Slavnov formula. Details are given in appendix B. In the long wavelength limit we obtain \begin{multline} \left|\left\langle f|\rho\left(0\right)|0\right\rangle \right|^{2}=\frac{Z^{2N-2}}{\mathcal{L}^{2}}\frac{P_{f}^{2N}}{\prod_{i,j}^{N,N}\left(k_{j}^{f}-k_{i}^{0}\right)^{2}}\\ \prod_{i<j}^{N}\left(k_{j}^{0}-k_{i}^{0}\right)^{2}\prod_{i<j}^{N}\left(k_{j}^{f}-k_{i}^{f}\right)^{2},\label{eq:FF_dsf} \end{multline} where the final states $\left|f\right\rangle $ have the same number of excitations $N$ as the ground state $\left|0\right\rangle $, unlike in Eq.\@ (\ref{eq:FF_N}), and $P_{0}=0$ for the ground state as in Eq.\@ (\ref{eq:FF_N}). These form factors in Eqs. (\ref{eq:FF_N}, \ref{eq:FF_dsf}) together with the solution of Bethe equations in Eq.\@ (\ref{eq:kj}) is the main technical result in the theory part of our work. We will analyse its physical consequences in the next two Subsections. The similarity between these two expressions means that the hierarchy of modes we will identify below is a general feature of one and two body operators. \subsection{Hierarchy of modes} The results in Eqs. (\ref{eq:FF_N}, \ref{eq:FF_dsf}) have one or more singularities when one or more quasimomenta of an excited state coincide with a quasimomentum of the ground state, $k_{j}^{f}=k_{j}^{0}$. Both results have a multiplicand $Z^{2N}\sim\mathcal{L}^{-2N}$ that becomes virtually zero in the thermodynamic limit, in which $\mathcal{L}\rightarrow\infty$. Thus the product of these two opposite factors produces an uncertainty in the limiting behaviour (of the $0\times\infty$ type) that has to be resolved. Since we are specifically interested in a transport experiment in this paper, in which the spectral function is measured, we will mainly focus on solving the uncertainty problem for result in Eq.\@ (\ref{eq:FF_N}). \begin{figure} {\centering\includegraphics[width=0.95\columnwidth]{states}} \caption{\label{fig:SF_hierarchy_states} Configurations of quasimomenta that solve the Bethe equations in Eqs. (\ref{eq:BA}, \ref{eq:phi_llp}) for the spinless fermions model in Eq.\@ (\ref{eq:H_lattice}): gs) the ground state, a) excitations that form the $a$-level of the hierarchy, and b) excitations that form the $b$-level of the hierarchy.} \end{figure} The maximum number of singularities is $N$ in the extreme case, when quasimomenta $k_{j}^{f}$ of an excited state coincide with all of the $N$ quasimomenta of the ground state $k_{j}^{0}$ given in Fig.\@ \ref{fig:SF_hierarchy_states}gs. The excited states of this kind are given in Fig.\@ \ref{fig:SF_hierarchy_states}a. The divergences in the denominator of Eq.\@ (\ref{eq:FF_N}) occur only in the leading order---the first term in Eq.\@ (\ref{eq:kj})---but the subleading order---the second term in Eq.\@ (\ref{eq:kj})---already provides a self-consistent cutoff within the theory. The interaction shift of the quasimomenta at subleading order does not cancel for the extra added particle in the excited state, making the factors in the denominator of Eq.\@ (\ref{eq:FF_N}) \begin{equation} k_{j}^{f}-k_{j}^{0}=\frac{mU}{mU+1}\frac{k_{N+1}^{f}-k_{j}^{0}}{\mathcal{L}-\frac{mUN}{mU+1}},\label{eq:kfk0_denom} \end{equation} where in the r.h.s. only the first term from Eq.\@ (\ref{eq:kj}) is relevant for $k^f_{N+1}$ and $k^0_j$. The numerator for the states in Fig.\@ \ref{fig:SF_hierarchy_states}a becomes \begin{equation} k_{j}^{0}-P_{f}=k_{j}^{0}-k_{N+1}^{f}.\label{eq:k0Pf_num} \end{equation} Substitution of Eqs. (\ref{eq:kfk0_denom}, \ref{eq:k0Pf_num}) in Eq.\@ (\ref{eq:FF_N}) for one particle, say for $j=N$, cancels one factor $Z^2\sim\mathcal{L}^{-2}$ and the other part of the product for $i\neq j$ in the denominator of first line of Eq.\@ (\ref{eq:FF_N}) cancels partially the products in the second line of Eq.\@ (\ref{eq:FF_N}). The expression for the remaining $N-1$ particles is the same as Eq.\@ (\ref{eq:FF_N}) but the numbers of terms in the products are reduced by one, $N\rightarrow N-1$, giving \begin{multline} \left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=\frac{Z^{2N-2}}{\mathcal{L}}\frac{\prod_{j}^{N-1}\left(k_{j}^{0}-P_{f}\right)^{2}}{\prod_{i,j}^{N-1,N}\left(k_{j}^{f}-k_{i}^{0}\right)^{2}}\\ \prod_{i<j}^{N-1}\left(k_{j}^{0}-k_{i}^{0}\right)^{2}\prod_{i<j}^{N}\left(k_{j}^{f}-k_{i}^{f}\right)^{2}.\label{eq:FF_Nm1} \end{multline} Repeating the procedure $N-1$ times we cancel the remaining $Z^{2N-2}$ factor completely (with the rest of other terms) and obtain \begin{equation} \mathcal{L}\left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=1.\label{eq:Aa} \end{equation} Corrections to this result originate from higher subleading orders in the solutions to Bethe equations in Eq.\@ (\ref{eq:kj}) and are of the order of $O\left(\mathcal{L}^{-1}\right)$. This becomes much smaller than one at leading order of Eq.\@ (\ref{eq:Aa}) in the thermodynamic limit. Substitution of Eq.\@ (\ref{eq:Aa}) in Eq.\@ (\ref{eq:A_continuum}) gives the value of the spectral function $A\left(k,\varepsilon\left(k\right)\right)=1$. The energies and the momenta of the excitations in Fig.\@ \ref{fig:SF_hierarchy_states}a form a single line on the spectral plane, like a single particle with dispersion $\varepsilon\left(k\right)=k^{2}/\left(2m^{*}\right)$, where the effective mass is renormalised by the Luttinger parameter $K$, $m^{*}=mK$.\cite{TS13} Note that, since we still resolve individual levels here, the delta functions in the definition of the spectral function in Eq.\@ (\ref{eq:A_continuum}) become discrete Kronecker deltas. Thus, $A\left(k,\varepsilon\right)$ at each discrete point $k,\varepsilon$ describes the probability of adding (removing) a particle, which is non-negative and is bound by one from above, instead of the probability density as in the continuum case. Dimensional analysis makes this distinction clear immediately. The excitations that have one singularity less ($N-1$ in total) can be visualised systematically as an extra electron-hole pair created in addition to adding an extra particle, see Fig.\ref{fig:SF_hierarchy_states}b. Staring from Eq.\@ (\ref{eq:FF_N}) and using the same procedure as before Eq.\@ (\ref{eq:FF_Nm1}) but $N-1$ instead of $N$ times we obtain \begin{equation} \left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=\frac{Z^{2}}{\mathcal{L}}\frac{\left(k_{2}^{f}-k_{1}^{f}\right)^{2}\left(k_{1}^{0}-P_{f}\right)^{2}}{\left(k_{1}^{f}-k_{1}^{0}\right)^{2}\left(k_{2}^{f}-k_{1}^{0}\right)^{2}},\label{eq:FF_2} \end{equation} where $k_{1}^{f},k_{2}^{f}$ and $k_{1}^{0}$ are positions of two particles and one hole in Fig.\@ \ref{fig:SF_hierarchy_states}b. Substitution of Eq.\@ (\ref{eq:FF_2}) in Eq.\@ (\ref{eq:A_continuum}) gives values of the spectral function $A\left(k,\varepsilon\right)\sim\mathcal{L}^{-2}$ that smaller than the values for the excitations in Fig.\@ \ref{fig:SF_hierarchy_states}a (in Eq.\@ (\ref{eq:A_continuum})) by a factor of $\mathcal{L}^{-2}$. For two singularities less ($N-2$ in total) we find $A\left(k,\varepsilon\right)\sim\mathcal{L}^{-4}$ and so on. This emerging structure separates the plethora of many-body excitations into a hierarchy according to the remaining powers of $\mathcal{L}^{-2}$ in their respective form factors. We label the levels of the hierarchy as $a,\;b,\;c$ reflecting the factors $\mathcal{L}^{-2n}$ with $n=0,\;1,\;2$. While the leading $a$-excitations form a discrete single-particle-like dispersion, see $h0a$ and $p1a(l)$ lines in Fig.\@ \ref{fig:SF_spectral_function}, the spectral properties of the subleading excitations are described by a more complicated continuum of states on the energy-momentum plane. We will explore the $b$-excitations below. \begin{figure} \begin{centering} \includegraphics[width=0.95\columnwidth]{fig_new} \end{centering} \protect\caption{ \label{fig:SF_spectral_function}The spectral function for interacting spinless fermions in the region $-k_{\rm F}<k<k_{\rm F}$ ($k_{\rm F}<k<3k_{\rm F}$) labelled by $0\left(1\right)$. The grey areas mark non-zero values. The green and the blue lines are modes of the hierarchy labelled as follows: $p\left(h\right)$ shows the particle(hole) sector, $k_{\rm F}$ is the Fermi momentum, $a,\;b,\;c$ respectively identify the level in the hierarchy in powers $0,\;1,\;2$ of $\mathcal{R}^{2}/L^{2}$, and $\left(r,l\right)$ specifies the origin in the range\textemdash modes on the edge have no such label.} \end{figure} All simple modes, formed by single particle- and hole-like excitations of the type in Fig.\@ \ref{fig:SF_hierarchy_states}b, in the range $-k_F<k<3 k_F$ are presented in Fig.\@ \ref{fig:SF_spectral_function}. We use the following naming scheme: $p\left(h\right)$ indicates the particle (hole) sector, $0\left(1\right)$ encodes the range of momenta $-k_{\rm F}<k<k_{\rm F}$ $\left(k_{\rm F}<k<3k_{\rm F}\right)$, $a,\;b,\;c$ reflect the terms $\mathcal{L}^{-2n}$ with $n=0,\;1,\;2$. The suffix $\left(r\right)$ or $\left(r\right)$ marks a particle-like mode, e.g. the states with in Fig.\@ \ref{fig:SF_hierarchy_states}b with $k_{1}^{f}=-k_{\rm F}-\gamma$, $k_{1}^{0}=k_{\rm F}$, and $k_{\rm F}>P_{f}=-2k_{\rm F}+k_{2}^{f}>-k_{\rm F}$ forms the mode $p0b(l)$. Hole-like modes have no suffixes, e.g. the states in Fig.\@ \ref{fig:SF_hierarchy_states}b with $k_{1}^{f}=-k_{\rm F}-\gamma$, $k_{2}^{f}=k_{\rm F}+\gamma$, and $-k_{\rm F}<P_{f}=-k_{1}^{0}<k_{\rm F}$ form the mode $p0b$. Simple modes formed by excitations of lower levels of the hierarchy are obtained by translation of the $b$-modes constructed in this paragraph by integer numbers of $\pm 2 k_F$. A couple of simple modes formed by $c$-excitations are presented on Fig.\@ \ref{fig:SF_spectral_function}. They have the same naming scheme as the $b$-modes. Now we evaluate values of the spectral function along all simple $b$-modes in the range $-k_{\rm F}<k<3k_{\rm F}$. Let us start from the $p0b$ mode, see Fig.\@ \ref{fig:SF_spectral_function}. Along this mode the spectral function is a bijective function of $k$, $A\left(k,\varepsilon_{p0b}\left(k\right)\right)$ where $\varepsilon_{p0b}\left(k\right)=k_{\rm F}^{2}/\left(mK\right)-k^{2}/\left(2mK\right)$. The states that form it belong to $b$-excitations in Fig.\@ \ref{fig:SF_hierarchy_states}b with $k_{1}^{f}=-k_{\rm F}-\gamma$, $k_{2}^{f}=k_{\rm F}+\gamma$, and $k=P_{f}=-k_{1}^{0}$. Substituting this parameterisation in Eq.\@ (\ref{eq:FF_2}) we obtain \begin{equation} A\left(k,\varepsilon_{p0b}\left(k\right)\right)=\frac{16Z^{2}k_{\rm F}^{2}k^{2}}{\left(k^{2}-\left(k_{\rm F}+\gamma\right)^{2}\right)^{2}}.\label{eq:Ap0b} \end{equation} The spectral function along all other $b$-modes in Fig.\@ \ref{fig:SF_spectral_function} is calculated in the very same way and the results (together with $a$-modes) are summarised in Table \ref{tab:SF_spectral_function_values}. \begin{table} \begin{ruledtabular} \begin{tabular}{r|c|c} \multicolumn{1}{r|}{$\textrm{ }$ } & $x=0$ & $x=1$\tabularnewline \hline $pxa$ & $-$ & $1$\tabularnewline \hline $hxa$ & $1$ & $-$\tabularnewline \hline $pxb$ & $\frac{16Z^{2}k_{\rm F}^{2}k^{2}}{\left(k^{2}-\left(k_{\rm F}+\gamma\right)^{2}\right)^{2}}$ & $\frac{4Z^{2}\gamma^{2}\left(k-k_{\rm F}+\frac{3}{2}\gamma\right)^{2}}{\left(k-k_{\rm F}+\gamma\right)^{2}\left(k-k_{\rm F}+2\gamma\right)^{2}}$\tabularnewline \hline $pxb\left(l\right)$ & $\frac{4Z^{2}\left(k_{\rm F}+k\right)^{2}}{k_{\rm F}^{2}}$ & $-$\tabularnewline \hline $pxb\left(r\right)$ & $\frac{4Z^{2}\left(k_{\rm F}-k\right)^{2}}{k_{\rm F}^{2}}$ & $-$\tabularnewline \hline $hxb$ & $-$ & $\frac{4Z^{2}\left(3k_{\rm F}-k-\gamma\right)^{2}\left(k_{\rm F}+k\right)^{2}}{k_{\rm F}^{2}\left(k-k_{\rm F}+\gamma\right)^{2}}$\tabularnewline \hline $hxb\left(l\right)$ & $\frac{4Z^{2}\gamma^{2}}{\left(k+k_{\rm F}+2\gamma\right)^{2}}$ & $\frac{Z^{2}k_{\rm F}^{2}k^{2}}{\left(\left(k+\gamma\right)^{2}-k_{\rm F}^{2}\right)^{2}}$\tabularnewline \hline $hxb\left(r\right)$ & $\frac{4Z^{2}\gamma^{2}}{\left(k-k_{\rm F}-2\gamma\right)^{2}}$ & $-$\tabularnewline \end{tabular} \end{ruledtabular} \caption{\label{tab:SF_spectral_function_values}Spectral weights along the $a$- and the $b$-modes for $-k_{\rm F}<k<k_{\rm F}$ ($k_{\rm F}<k<3k_{\rm F}$) labeled by $x=0\left(1\right)$. Terminology is the same as in Fig.\@ \ref{fig:SF_spectral_function}; $\gamma=2\pi/\mathcal{L}$ and $Z=mU/\left(mU+1\right)/\left(\mathcal{L}-NmU/\left(1+mU\right)\right)$.} \end{table} The amplitude of the subleading $b$-excitations does not vanish in the thermodynamic limit, though it is proportional to $1/\mathcal{L}$. The limit involves both ${\cal L\rightarrow\infty}$ and the particle number $N\rightarrow\infty$ but keeps the density $N/{\cal L}$ finite. The spectral weights of the subleading modes $p0b$, $h1b$, and $h1b\left(r\right)$ from Table \ref{tab:SF_spectral_function_values} are proportional to the density squared for some values for $k$, e.g. the modes $p0b$ at $k=k_{\rm F}$ gives \begin{equation} A\left(k_{\rm F},\varepsilon_{p0b}\left(k_{\rm F}\right)\right)=\left(\frac{mU}{1+mU}\right)^{2}\frac{N^{2}}{\left(\mathcal{L}-\frac{NmU}{1+mU}\right)^{2}}, \end{equation} see Table \ref{tab:SF_spectral_function_values} for other modes, and are apparent in the infinite system. Assessing further the continuum of $b$-excitations we consider the spectral function and how it evolves as one moves away slightly from the of the strongest $a$-mode. Just a single step of a single quantum of energy away from the $a$-mode requires the addition of an electron-hole pair on top of the configuration of quasimomenta in Fig.\@ \ref{fig:SF_hierarchy_states}a. This immediately moves such states one step down the hierarchy to $b$-excitations. Let us consider the spectral function as a function of energy only making a cut along a line of constant $k$. The energies of the electron-hole pairs themselves are regularly spaced around the Fermi energy with slope $v_{\rm F}$. However, the degeneracy of the many-body excitations due to the spectral linearity makes the level spacings non-equidistant. We smooth this irregularity using an averaging of the spectral function over energy, \begin{equation} \overline{A}\left(k,\varepsilon\right)=\int_{-\frac{\epsilon_{0}}{2}}^{\frac{\epsilon_{0}}{2}}\frac{d\epsilon}{\epsilon_{0}}A\left(k,\varepsilon+\epsilon\right)\label{eq:average} \end{equation} where $\epsilon_{0}$ is a small energy scale. Then, using the parametrisation of $b$-excitations in Fig.\@ \ref{fig:SF_hierarchy_states}b in the vicinity of the principal parabola, we linearise the energies of the extra electron-hole pairs around the Fermi energy and of the particle around its original position. We then substitute the resulting expressions for $k_{1,2}^{f}$ and $k_{1}^{0}$ in terms of the energy $E$ from Eq.\@ (\ref{eq:E_kj}) in Eq.\@ (\ref{eq:FF_2}), similar to our procedure of obtaining Eq.\@ (\ref{eq:Ap0b}). Finally, we use the averaging rule in Eq.\@ (\ref{eq:average}) and obtain \begin{eqnarray} \overline{A}\left(k,\varepsilon\right) & = & \frac{Z^{2}2k_{\rm F}\left(3k^{2}+k_{\rm F}^{2}\right)\theta\left(\varepsilon_{h0a}\left(k\right)-\varepsilon\right)}{m\gamma K\left(\varepsilon_{h0a}\left(k\right)-\varepsilon\right)}\label{eq:Ashape0}\\ & & \qquad\qquad\qquad\qquad\textrm{for}\;-k_{\rm F}<k<k_{\rm F},\nonumber \\ \overline{A}\left(k,\varepsilon\right) & = & \frac{Z^{2}\left(k+\textrm{sgn}\left(\varepsilon-\varepsilon_{p1a\left(l\right)}\left(k\right)\right)k_{\rm F}\right)^{3}}{m\gamma K\left|\varepsilon-\varepsilon_{p1a\left(l\right)}\left(k\right)\right|}\label{eq:Ashape1}\\ & & \qquad\qquad\qquad\qquad\textrm{for}\;k_{\rm F}<k<3k_{\rm F}\nonumber \end{eqnarray} where $\gamma=2\pi/\mathcal{L}$ and $\varepsilon_{h0a}\left(k\right)=\varepsilon_{p1a\left(l\right)}\left(k\right)=k^{2}/\left(2mK\right)$ is the parabolic dispersion of the $a$-mode. The result in Eqs. (\ref{eq:Ashape0}, \ref{eq:Ashape1}) can be interpreted as the line-shape of the $a$-mode. However, it has an unusual form---namely that of a divergent power-law. The divergence at the parabola is cut-off by the lattice spacing recovering $\overline{A}\left(k,\varepsilon_{h0a,p1a}\left(k\right)\right)=1$ from Eq.\@ (\ref{eq:Aa}). In Eq.\@ (\ref{eq:Ashape1}) the line-shape is asymmetric due to different prefactors $\left(k\pm k_{\rm F}\right)^{3}$ above and below the line. In Eq.\@ (\ref{eq:Ashape0}) the higher energy part ($\varepsilon>\varepsilon_{h0a}\left(k\right)$) is absent due to the absence of the excitation in this region, forbidden by the kinematic constraint. Not every simple mode marks a distinct feature. The states at least on one side, above or below the mode in energy, have to belong to a different level of the hierarchy than the mode itself, which results in a divergence or in a jump of the spectral function in the continuum of excitations. Otherwise the spectral function is continuous across all of the modes that belong to the same level of the hierarchy as the excitations around them. The $a$-modes are distinct since excitations around them belong to a different $b$-level. All modes on the spectral edges, $p0b$, $p1b$, $h1b$, and so on, are distinct since on one side there are no excitations (due to the kinematic constraint) and on the other side there is a finite density of states resulting in a jump of the spectral function. An example of an observable subleading mode in the continuum is $h0b\left(r\right)$. On the higher energy side of this mode, the excitations are described by the same type of states in Fig.\@ \ref{fig:SF_hierarchy_states}b but on the lower energy side creation of an additional electron-hole pair in the quasimomenta results in states that have two non-cancelled singularities in Eq.\@ (\ref{eq:FF_N}), which lowers their corresponding level of the hierarchy to $c$ from $b$. This, in turn, results in an observable feature in the spectral function at the position of the $h0b\left(r\right)$ mode. On the other hand, the $p0b\left(r\right)$ and $h1b\left(l\right)$ modes in continuum are not detectable since excitations on both sides around them belong to the same $b$-level of the hierarchy. Observability of all other modes can be easily assessed in the same way by considering their corresponding states in the form of Fig.\@ \ref{fig:SF_hierarchy_states}a and Fig.\@ \ref{fig:SF_hierarchy_states}b and excitations around the modes. The structure of the matrix element in Eq.\@ (\ref{eq:FF_dsf}) is quite similar to the matrix element in Eq.\@ (\ref{eq:FF_N}). Thus the dynamical structure factor exhibits the same hierarchy of excitations (and modes formed by them) as the spectral function analysed in detail in this Subsection. The strongest excitations correspond to only a single electron hole-pair, the first subleading level corresponds to two electron-hole pairs, and so on. Also, a similar hierarchy of excitations was observed in numerical studies of spin chains, \emph{e.g.} Refs. \onlinecite{JS05,JS05_SM,JS07,Biegel02, Biegel03, Takahashi04}. There, it was found that only a small number of electron-hole pairs are sufficient to saturate the sum rules for the dynamics response functions. For example, integration of Eq.\@ (\ref{eq:A_continuum}) over the energy and momentum, \begin{equation} \int d\varepsilon dkA\left(k,\varepsilon\right)=\mathcal{L}-N,\label{eq:A_sumrule} \end{equation} gives the number of empty sites. If a sum over only a small number of electron-hole pairs in the intermediate state $f$ in Eq.\@ (\ref{eq:A_continuum}) is sufficient to fulfil this rule in Eq.\@ (\ref{eq:A_sumrule}) then a few electron-hole pairs already account for major part of all of the spectral density and the states with more electron-hole pair have vanishing spectral weights, as in the hierarchy of modes established in this work. Our analytic work demonstrates how this can arise in a Bethe Ansatz solution, though the numerical studies of spin chains were done at large fillings ($\mathcal{L}\sim N$), for which our result in Eq.\@ (\ref{eq:FF_N}) is not directly applicable. \subsection{Spectral edge modes} In this Subsection we consider another important role played by the continuum of eigenstates, namely how they form the spectral function close to the spectral edges. These edges separate regions where there are excitations from regions where there are none, see borders between white and grey regions in Fig.\@ \ref{fig:SF_spectral_function}. The recently proposed model of a mobile impurity\cite{Khodas06, Khodas072, Imambekov09, Imambekov092} gives a field-theoretical description of the dynamic response functions around the spectral edges predicting a general (divergent) power-law behaviour $A(k,\varepsilon)\sim\left|\varepsilon-\varepsilon_{\textrm{edge}}\left(k\right)\right|^{-\alpha}$, see Refs. \onlinecite{Pereira06, Khodas06, Pereira072, Imambekov09, Imambekov092, Khodas07, Khodas072, Imambekov08, Kamenev09, Schmidt10, Schmidt102, Essler10}. For spinless fermions the exponent of the spectral function is given by\cite{TS13} \begin{equation} \alpha=1-\frac{K}{2}\left(1-\frac{1}{K^{2}}\right)\label{eq:SF_alpha} \end{equation} for both the particle ($p0b$) and hole edges ($h0a$), where Eq. (\ref{eq:SF_Luttinger_params}) gives the Luttinger parameter $K$ in terms of the microscopic parameter of the model in Eq.\@ (\ref{eq:H}). Here we will compare the field-theoretical result in Eq.\@ (\ref{eq:SF_alpha}) with the microscopic calculation in Eqs. (\ref{eq:kj}, \ref{eq:FF_N}). We find agreement in many cases, but interestingly we also find some cases where the mobile impurity results are not consistent with the analytic solution, suggesting this field-theoretical approach is not the complete story. The hole edge is an $a$-mode, $h0a$, whereas the continuum around it is dominated by $b$-excitations. The spectral function formed by these $b$-excitations has already been calculated in Eq.\@ (\ref{eq:Ashape0}) giving the power-law behaviour with the exponent $\alpha=1$. Note that for spinless fermions the Luttinger parameters have only small deviations from $K=1$ for arbitrary magnitude of the short-range interactions. This makes the result in Eq.\@ (\ref{eq:SF_alpha}) $\alpha=1$ for all values of $U$; small deviations (which are $U$-dependent) make $\alpha<1$ and require subleading terms in $1/\mathcal{L}$ in the Bethe ansatz calculation in Eqs. (\ref{eq:kj}, \ref{eq:FF_N}). Thus the result of the microscopic calculation coincides with the prediction of the mobile-impurity model in Eq.\@ (\ref{eq:SF_alpha}) for the hole edge. \begin{figure} \centering{\includegraphics[width=0.95\columnwidth]{level_spacings}}\protect\caption{\label{fig:SF_edge_level_spacings}Distributions of level spacings in the vicinity ($\textrm{max}\left(E_{f}-\varepsilon_{p0b}\left(k\right)\right)/\varepsilon_{\rm F}=1/100$) of the particle mode $p0b$ accumulated along energy axis for the values of momenta a) $k=0$ and b) $k=0.4355k_{\rm F}$; $N=2\cdot10^{3}$ and $L=2\cdot10^{5}$.} \end{figure} The particle edge is a $b$-mode, $p0b$, and the excitations around it belong to the same $b$-level of the hierarchy as the mode itself. Parameterising the $b$-excitations in this region of the continuum as in Fig.\@ \ref{fig:SF_hierarchy_states}b and using the the averaging procedure in Eq.\@ (\ref{eq:average}) we obtain, repeating the same steps as before, Eqs. (\ref{eq:Ashape0}, \ref{eq:Ashape1}), \begin{equation} \overline{A}\left(\varepsilon\right)\sim\left(\varepsilon-\varepsilon_{p0b}\right)^{3} \end{equation} for $k\approx 0$ to \begin{equation} \overline{A}\left(\varepsilon\right)\sim\textrm{const}-\left(\varepsilon-\varepsilon_{p0b}\right) \end{equation} for $k\approx k_{\rm F}$, where $\varepsilon_{p0b}\left(k\right)=k_{\rm F}^{2}/\left(mK\right)-k^{2}/\left(2mK\right)$. This is a new power-law behaviour characterised by an exponent $\alpha$ changing essentially with $k$ from $\alpha=-3$ for $k=0$ to $\alpha=-1$ for $k\approx\pm k_{\rm F}$ and is {\em different} from the predictions of the mobile-impurity model in Eq.\@ (\ref{eq:SF_alpha}). Here we observe that the phenomenological model in Refs.\@ \onlinecite{Khodas06,Khodas072, Imambekov09} is correct only for the $a$-mode spectral edge but higher-order edges would require a different field-theoretical description. On a more detailed level, difference between the particle and the hole edges manifests itself in different statistics of level spacings around the edges. Evaluation of the density of states, $\nu\left(k,\varepsilon\right)=\sum_{f}\delta\left(\varepsilon-E_{f}\right)\delta\left(k-P_{f}\right)$, is performed using $E_{f}$ from Eq.\@ (\ref{eq:E_kj}) for a fixed momentum $k$. For $b$-excitations in Fig.\@ \ref{fig:SF_hierarchy_states}b we obtain the same results, \begin{equation} \nu\left(k,\varepsilon\right)\sim\left|\varepsilon-\varepsilon_{p0b\left(h0a\right)}\left(k\right)\right|, \end{equation} in the vicinity of both the particle $p0b$ and the hole $h0a$ edges. However the statistics of the level spacings \begin{equation} \mathcal{P}\left(s,k\right)=\sum_{f}\delta\left(s-\left(E_{f+1}-E_{f}\right)\right)\delta\left(k-P_{f}\right), \end{equation} where $E_{f}$ are assumed sorted by their values, is different in the two regions. For the hole edge the energy levels are spaced regularly and are governed by the slope of dispersion at the Fermi energy $\approx v$. This gives a bimodal $\mathcal{P}\left(s,k\right)$ with a sharp peak at $s=0$ (due to many-body degeneracy of almost linear spectrum at $E_{\rm F}$) and at $s\approx v\gamma$. For the particle edge the statistics of the level spacings varies from having a regular level spacing (for $k$ commensurate with $k_{\rm F}$ in Fig.\@ \ref{fig:SF_edge_level_spacings}a) to an irregular distribution (for incommensurate $k$ in Fig.\@ \ref{fig:SF_edge_level_spacings}b). The change in the characteristics of the underlying statistics is another microscopic difference between the particle ($b$-mode) and the hole ($a$-mode) edges that signals a difference in underlying physics for the particles and for the holes spectral edges beyond the low energy region. \section{Local density of states} \begin{figure} \centering{\includegraphics[width=0.95\columnwidth]{fig2}}\protect\caption{\label{fig:SF_LDOS}The local density of states for interacting spinless fermions: the red and the green lines show the contribution of $a$- and $b$-excitations and the blue line indicates the Luttinger-liquid regime. Inset is a log-log plot around the Fermi energy: the points are numerical data for $N=71$, $L=700$, $mV=6$ giving $K=0.843$, and the dashed line is $n\left(\varepsilon\right)=\textrm{const}\left|\varepsilon-\mu\right|^{\left(K+K^{-1}\right)/2-1}$.} \end{figure} Now we turn to another macroscopic observable, the local density of states (LDOS), which describes the probability of tunnelling a particle in or out of the wire at a given position in space and at a given energy. Since the model in Eq.\@ (\ref{eq:H}) is translationally invariant the LDOS depends only on a single variable\textendash energy, making it a more convenient quantity to study qualitatively how the physical properties change from low to high energies. In this section we will show how the power-law result of the Tomonaga-Luttinger model\cite{GiamarchiBook} at low energy crosses over into the hierarchy of modes dominated behaviour at high energy. The probability of local tunnelling at energy $\varepsilon$ and at position $x$ is described by\cite{AGD} $n\left(x,\varepsilon\right)=-\textrm{Im}\big[\int dte^{-i\varepsilon t}G(x,x,t)\big]\textrm{sgn}(\varepsilon-\mu)/\pi$ where $\mu$ is the chemical potential and $G\left(x,x',t\right)=-i\left\langle T\left(e^{-iHt}\psi\left(x\right)e^{iHt}\psi^{\dagger}\left(x'\right)\right)\right\rangle $ is the two point correlation function at zero temperature. In terms of eigenmodes it reads \begin{multline} n\left(\varepsilon\right)=\mathcal{L}\sum_{f}\Big[\left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}\delta\left(\varepsilon-E_{f}+E_{0}\right)\\ +\left|\left\langle 0|\psi\left(0\right)|f\right\rangle \right|^{2}\delta\left(\varepsilon+E_{f}-E_{0}\right)\Big].\label{eq:LDOS_def} \end{multline} where the coordinate dependence drops out explicitly, the eigenenergy $E_{f}$ have already been calculated in Eq.(\ref{eq:E_kj}) and the matrix elements $\left|\left\langle 0|\psi\left(0\right)|f\right\rangle \right|^{2}$ is in Eq.\@ (\ref{eq:FF_N}). Note that the definition in Eq.\@ (\ref{eq:LDOS_def}) is connected to the definition of the spectral function in Eq.\@ (\ref{eq:A_continuum}) via \begin{equation} n\left(\varepsilon\right)=\int dkA\left(k,\varepsilon\right).\label{eq:rho_int_A} \end{equation} The leading contribution for $\varepsilon>0$ comes from $a$-excitations. Substituting the matrix element for the $a$-excitations from Eq. (\ref{eq:Aa}) we sum over the single-particle-like excitations (with $\varepsilon=k^{2}/\left(2mK\right)$) that form the mode and obtain \begin{equation} n\left(\varepsilon\right)=\sqrt{\frac{2mK}{\varepsilon}}\theta\left(\varepsilon\right).\label{eq:LDOS_a} \end{equation} This result gives the same $1/\sqrt{\varepsilon}$ functional dependence \textendash see red line in Fig.\@ \ref{fig:SF_LDOS}\textendash and the same $1/\sqrt{\varepsilon}$ van Hove singularity at the bottom of the band $\varepsilon=0$ as the free-particle model. For $\varepsilon<0$ the leading contribution to $n\left(\varepsilon\right)$ comes from $b$-excitations. Instead of performing a summation in Eq.\@ (\ref{eq:LDOS_def}) over every $b$-excitation in this region we use an intermediate result in Eq.\@ (\ref{eq:Ashape0}) where the matrix elements of $b$-excitations in Eq.\@ (\ref{eq:FF_2}) are already smoothed over many eigenstates and the relation in Eq.\@ (\ref{eq:rho_int_A}). Evaluating the integral over $k$ in Eq.\@ (\ref{eq:rho_int_A}), after the substitution of Eq.\@ (\ref{eq:FF_2}) into it, for $\varepsilon<0$ we obtain \begin{multline} n\left(\varepsilon\right)=\frac{2Z^{2}k_{\rm F}^{2}}{\gamma\mu K}\theta\left(-\varepsilon\right)\\ \times\left[2\left(1-\frac{3\left|\varepsilon\right|}{\mu}\right)\sqrt{\frac{\mu}{\left|\varepsilon\right|}}\cot^{-1}\left(\sqrt{\frac{\left|\varepsilon\right|}{\mu}}\right)+6\right].\label{eq:rho_b} \end{multline} There is a finite probability to find a particle below the bottom of the conduction band\textendash green line in Fig.\@ \ref{fig:SF_LDOS}\textendash which is allowed only due to interactions between many particles. The factor $Z$ is proportional to the interaction strength $V$\textendash see Eq.\@ (\ref{eq:FF_N})\textendash making $n\left(\varepsilon\right)=0$ for $\varepsilon<0$ in the free particle limit of $V=0$. At the bottom of the band below the $\varepsilon=0$ point in Fig.\@ \ref{fig:SF_LDOS}, the result in Eq.\@ (\ref{eq:rho_b}) contains another Van Hove singularity \begin{equation} \rho\left(\varepsilon\right)=\frac{2\pi Z^{2}k_{\rm F}^{2}}{\gamma K\sqrt{\mu\left|\varepsilon\right|}}, \end{equation} which also disappears when $V=0$. The appearance of the identical exponent as in Eq.\@ (\ref{eq:LDOS_a}) exponent $1/\sqrt{\left|\varepsilon\right|}$ seems to coincidental. Around the Fermi energy (the point $\varepsilon=\mu$ in Fig.\@ \ref{fig:SF_LDOS}) the Tomonaga-Luttinger model predicts a power-law suppression of LDOS, \begin{equation} n\left(\varepsilon\right)\sim\left|\varepsilon-\mu\right|^{\left(K+K^{-1}\right)/2-1},\label{eq:LDOS_LL} \end{equation} e.g. see the book in Ref.\@ \onlinecite{GiamarchiBook}. However the result for the $a$-mode in Eq.\@ (\ref{eq:LDOS_a}) is finite at this point, $n\left(\mu\right)=\sqrt{2mK/\mu}$. In order to resolve the apparent discrepancy we evaluate $n\left(\varepsilon\right)$ numerically around the Fermi energy using the determinant representation of the form factors Eqs. (\ref{eq:psi_1_ABA}, \ref{eq:Mab}) instead of Eq.\@ (\ref{eq:FF_N}), which accounts for all orders in $1/\mathcal{L}$, and indeed find a suppression of LDOS around $\varepsilon=\mu$, see blue line in the inset in Fig.\@ \ref{fig:SF_LDOS}. This signals that the leading-order expansion in the $\mathcal{L}\left|\left\langle f|\psi^{\dagger}\left(0\right)|0\right\rangle \right|^{2}=1$ result is insufficient at low energies. Very close to the Fermi point (the linear region of the single-particle dispersion) all $1/\mathcal{L}$ orders of the Bethe ansatz calculation are needed to reproduce the result of the Tomonaga-Luttinger model, see dashed lines in the inset in Fig.\@ \ref{fig:SF_LDOS}. However, away from the linear region the particle-hole symmetry of the Tomonaga-Luttinger model is broken by the finite curvature of the dispersion and only the leading $1/\mathcal{L}$ order in Eq. (\ref{eq:FF_N}) is sufficient to account for the main contribution there. The general picture emerging in Fig.\@ \ref{fig:SF_LDOS} is a power-law crossover between different energy scales. At low energies (blue region in Fig.\@ \ref{fig:SF_LDOS}) Eq.\@ (\ref{eq:FF_N}) breaks down and the collective modes of the Tomonaga-Luttinger model are a better description of the system. At high energies (the red and the green regions in Fig.\@ \ref{fig:SF_LDOS}) the hierarchy of modes, which directly follows from Eqs. (\ref{eq:FF_N}, \ref{eq:kj}), becomes the dominant physical picture. For spinless fermions the extent of the crossover region is large due to only small deviations from $K=1$ for arbitrary short-range interactions. For very small exponents $\left[\left(K+K^{-1}\right)/2-1\right]\ll1$ the power-law in Eq.\@ (\ref{eq:LDOS_LL}) deviates significantly from $1$ only in an extremely narrow region around $\varepsilon=\mu$ having a large window of energies where it overlaps with the $a$-mode result in Eq.\@ (\ref{eq:LDOS_a}). \section{Experiments on spinful fermions} So far, in this paper, we have established the theoretical framework for expecting a hierarchy of modes in a interacting system at high energy. Now we turn to a measurement of tunnelling of electrons in a one-dimensional (1D) nanostructure, which gives experimental evidence for the existence of the hierarchy. Electrons have spin 1/2, which does not correspond directly to the model of spinless fermions in Eq.\@ (\ref{eq:H}), and there is currently no known method for calculating the necessary form factors for spinful fermions. However, the general picture that emerges from the experiment is qualitatively the same as our result in the theory part of this paper. \begin{figure} \includegraphics[width=0.98\columnwidth]{deviceYJ}\protect\caption{\label{fig:device} Schematic of the device made out of a double-well heterostructure. The dark blue and cream layers are the lower and upper quantum wells, respectively. The lower layer hosts the two-dimensional electron gas (2DEG). The wires are defined in the upper layer by gating. The gold top layer represents metallic gates deposited on the surface of the semiconductor heterostructure. The array of parallel `finger' gates defines the 1D wires in the upper well. The white lines represent air bridges joining the finger gates together. Current is injected from the ohmic contact on the right solely into the upper well through the constriction at top right in the diagram. The constriction is formed and pinched off by a split pair of gates, and charge is induced again in the upper well in the constriction by a gate in the centre of the channel. The current then flows into the 1D wires via the narrow, nominally 2D, regions shown in light blue. Tunneling to the 2DEG below is possible, and this gives a small `parasitic' current in parallel with the tunnel current from the 1D wires. To tune this tunneling off resonance in the regions of interest by changing the density, a `p' gate is placed above the `p' regions and a voltage $V_{\rm P}$ is applied. Current is prevented from flowing from the upper well into the left-hand ohmic contact by a barrier gate shown on the left, which only depletes the upper well. The red arrow shows the direction of the externally applied magnetic field $B$, which is in the plane of the wells and perpendicular to the wires.} \end{figure} The design of our device\cite{Jompol09} is based on a high-mobility GaAs--AlGaAs double-quantum-well structure (blue and yellow layers in Fig.\@ \ref{fig:device}), with electron densities around $3$ and $2\times10^{15}$\,m$^{-2}$ in the upper and lower layers, respectively, before application of gate voltages. Electrons in the upper layer are confined to a 1D geometry (`wires') in the $x$-direction by applying a negative voltage to split `finger' gates on the surface (gold layer in Fig.\@ \ref{fig:device}). Electrons underneath the gates are completely depleted, but electrons below the gap between gates are squeezed into a narrow 1D wire. The extremely regular wires are arranged in an array containing $\sim 600$ of them to boost the signal. The small lithographic width of the wires, $\sim 0.18\,\mu$m, provides a large energy spacing between the first and second 1D subbands defined by spatial modes perpendicular to the wires ($\sim 3-5$\,meV, probably somewhat smaller than for overgrown wires\cite{Yacoby02}). This allows a wide energy window for electronic excitations in the single 1D subband that covers a range of a few chemical potentials of the 1D system. The lower 2DEG (blue in Fig.\@ \ref{fig:device}), is separated from the wires by a $d=14$\,nm tunnel barrier. The wafer is doped with Si symmetrically in the AlGaAs barriers above and below the pair of wells. The doping is separated from the wells by spacer layers. The spacing between the centres of the two quantum wells is nominally $d=32$\,nm but we find a value of $d=35$\,nm fits the data better, and this can be explained by the fact that the centres of the wavefunctions will be slightly further apart owing to the opposite electric fields in each well. The 2DEG in the lower (dark blue) layer is used as a controllable injector or collector of electrons for the 1D system.\cite{Kardynal96} The current $I$ tunneling in the $z$-direction between the layers is proportional to the convolution of the 1D and 2D spectral functions (a pair of peaks at $k_x=\pm k_{\rm F}$ broadened in $k_y$ by the 1D confinement, and a circle, respectively). An in-plane magnetic field $B$ applied in the $y$-direction, perpendicular to the wires (show with a red arrow in Fig.\@ \ref{fig:device}), produces a Lorentz force that changes the longitudinal momentum $k_x$ acquired while tunneling between layers by $\Delta k=eBd/\hbar$, where $e$ is the electronic charge. Thus $B$ shifts the spectral functions in $k_x$ relative to each other, and so probes the momentum. One spectral function can also be shifted relative to the other in energy by applying a voltage $V$ between the layers, in order to probe the 1D and 2D dispersion relations at different energies. The conductance $G={\rm d}I/{\rm d}V$ has a peak when sharp features in the spectral functions have significant overlap. At $V=0$ this occurs when $\Delta k$ is equal to the sum or difference of the Fermi wavenumbers $k_{\rm F}$ and $k_2$ of the 1D and 2D systems respectively, so there are two peaks for $B>0$, at $B_\pm=\frac{\hbar}{ed}|k_2\pm k_{\rm F}|$. By sweeping $B$ and $V$ one can map out the dispersion relation of states in each layer. The range of magnetic fields that we apply to the system is still within the regime of Pauli paramagnetism for the electron densities in our samples. \section{Low energy} \begin{figure} \includegraphics[width=0.98\columnwidth]{S-C_zoom_in}\protect\caption{\label{fig:sc_separation} Intensity plot of ${\rm d}G/{\rm d}B$ at low energies around the Fermi point $k_{\rm F}$. Spin (S) and charge (C) dispersions are indicated by dashed lines. The dotted lines indicate the parabolae expected in the non-interacting model. The finger-gate voltage $V_{\rm F}=-0.70$\,V and the temperature $T\sim300$\,mK.} \end{figure} \begin{figure*}[t] \includegraphics[width=2\columnwidth]{fig_stack}\protect\caption{\label{fig:experiment_main} Intensity plots of ${\rm d}G/{\rm d}V$ (left column, in $\mu$S/mV) and ${\rm d}G/{\rm d}B$ (right column, in $\mu$S/T) from below $-k_{\rm F}$ to above $3k_{\rm F}$ and from $\sim-2\mu$ to $\sim 2\mu$, for various finger-gate voltages: $-0.60$\,V (a, b), $-0.65$\,V (c, d), $-0.68$\,V (e, f) and $-0.70$\,V (g--j). The solid black lines map out the dispersion of the lower (2D) layer. The green solid line marks $a$-modes, thick and thin dashed green lines, $p1b$ and $h1b$ modes, respectively, and dashed blue, higher-$k$ modes (as in Fig.\@ \ref{fig:SF_spectral_function}). Dot-dashed yellow (blue) and dotted white (cyan) lines show second and third 1D subbands (2D dispersion measured by those subbands), respectively (though the third is empty, electrons can tunnel into it from the 2D layer and hence there are sometimes signs of its effects for $V>0$, especially near $B=0$). Dotted magenta and blue lines are `parasitic' 2D dispersions of the two layers. The voltage on the gate over this region $V_{\rm P}=0$\,V except for e, f ($V_{\rm P}=0.2$\,V) and i, j ($V_{\rm P}=0.3$\,V), which shifts the parabolae to the right without changing the signal from the 1D wires. The lines have all been adjusted to take account of the capacitive coupling between the layers. Spin (S) and charge (C) modes are indicated with black dashed lines. $T\sim300$\,mK. See Table \ref{tab:vcvs_ratio} for the densities and the ratio $v_{{\rm c}}/v_{{\rm s}}$ for each gate voltage.} \end{figure*} \begin{table} \begin{ruledtabular} \begin{tabular}{c|c|c|c|c} $V_\textrm{F}$ (V) & $n_{\rm 2D} (10^{15}\,{\rm m}^{-2})$ & $n_{\rm 1D} (10^7\,{\rm m}^{-1})$ & $k_{\rm F} (10^7\,{\rm m}^{-1})$ & $v_c/v_s$\tabularnewline \hline $-0.60$ & 1.67 & 5.68 & 8.9 & $1.5$ \tabularnewline \hline $-0.65$ & 1.65 & 4.99 & 7.8 & $1.6$ \tabularnewline \hline $-0.68$ & 1.52 & 4.79 & 7.5 & $1.5$ \tabularnewline \hline $-0.70$ & 1.48 & 4.60 & 7.2 & $1.8$ \tabularnewline \end{tabular} \end{ruledtabular} \caption{\label{tab:vcvs_ratio} Densities of the 2D layer ($n_{\rm 2D}$) and of the 1D wires ($n_{\rm 1D}$), the 1D Fermi wavevector $k_{\rm F}$ (all to about $\pm 1\%$), and the ratio of the charge and the spin velocities at low energies (to about $\pm 5\%$), extracted from the gradients of the S and C lines in Fig. \ref{fig:experiment_main}, for different finger-gate voltages $V_\textrm{F}$.} \end{table} First, we measure the tunnelling conductance $G={\rm d}I/{\rm d}B$ in a small range of voltages and magnetic fields around $V=0$ and $B=B_+=3.15$\,T that corresponds to a region on the momentum-energy plane around the Fermi point ($\varepsilon=\mu, k=k_{\rm F}$), see Fig.\@ \ref{fig:sc_separation}. Below the Fermi energy we observe splitting of the single-particle line into two with different dispersions---spin (S) and charge (C) separation\cite{Jompol09,Yacoby02}---giving two different slopes $v_{\rm s}$ and $v_{\rm c}$ (black dashed lines in Fig.\@ \ref{fig:sc_separation}). We assume that $v_{\rm s}$ is the same as for non-interacting electrons and so take it to be the gradient of the parabola at $V=0$. We estimate $v_{\rm c}$ from the positions of steepest gradient and hence obtain $v_{\rm s}\approx1.2\times10^{5}$\,ms$^{-1}$ and $v_{\rm c}\approx2.3\times10^{5}$\,ms$^{-1}$ at the finger-gate voltage $V_{\rm F}=-0.70$\,V. Theoretically, the low-energy physics of the interacting 1D electrons is described well by a spinful generalisation of the Luttinger-liquid model.\cite{GiamarchiBook} Its excitations are collective hydrodynamic-like modes that are split into charge-only and spin-only excitations. For any finite strength of the interactions between fermions the two types of modes have linear dispersions with different slopes $v_{\rm c}$ and $v_{\rm s}$. In the absence of interactions the difference between the two velocities vanishes in accordance with the free-electron model, in which the spin degree of freedom does not affect the spectrum but results only in the double degeneracy of the fermionic states. Thus the ratio of $v_{\rm c}/v_{\rm s}$ serves as a good measure of the interaction strength. Since the Coulomb interaction between electrons is repulsive the charge branch always has a steeper slope $v_{\rm c}\geq v_{\rm s}$ (see Ref.\@ \onlinecite{GiamarchiBook}). Thus the ratio varies from $1$ for free to $\infty$ for infinitely repelling particles. In our experiment we measure the tunneling of electrons and observe two peaks that we attribute to the charge and the spin dispersions. The pair of velocities above gives a large $v_{{\rm c}}/v_{{\rm s}}\approx1.8\pm0.1$ (for $V_\textrm{F}=-0.70$\,V), confirming that our system is in the strongly interacting regime. \section{High energy} Now we extend the ranges of the voltage and magnetic field measuring the tunneling conductance $G$ across the double quantum well in Fig.\@ \ref{fig:device} accessing a large portion of the 1D spectral function from below $-k_{\rm F}$ to $3k_{\rm F}$ and from $-2\mu$ to $2\mu$, see Fig.\@ \ref{fig:experiment_main}. There is an unavoidable `parasitic' (`p') tunneling from narrow 2D regions (light blue strips in Fig.\@ \ref{fig:device}) that connect the wires to the injector constriction. This superimposes a set of parabolic 2D-2D dispersions on top of the 1D-2D signal, which are marked by magenta and blue dotted lines in Fig.\@ \ref{fig:experiment_main}. Apart from the parasitic and the 2D dispersion signals, we observe only a single 1D parabola away from $B=0$, marked by the solid green line in Fig.\@ \ref{fig:experiment_main}. It extends from the spin-excitation branch at low energy and the position of its minimum multiplied by the electronic charge $e$ gives the 1D chemical potential $\mu\approx 4$\,meV. The $B_-$ and $B_+$ crossings with the line $V=0$, corresponding to momenta $-k_{\rm F}$ and $k_{\rm F}$, give the 1D Fermi momentum $k_{\rm F}\approx8\times10^{7}$\,m$^{-1}$. All other edges of the 1D spectral function are constructed by mirroring and translation of the hole part of the observable 1D dispersion, dashed green and blue lines in Fig.\@ \ref{fig:experiment_main}. \begin{figure}[t] \includegraphics[width=1\columnwidth]{replica_with_linecuts}\protect\caption{\label{fig:p1b} Left column: intensity plots of ${\rm d}G/{\rm d}V$ (in $\mu$S/mV), for various finger-gate voltages and samples: (a, b) $V_{\rm F}=-0.68$\,V, $V_{\rm P}=0.2$\,V, from Fig.\@ \ref{fig:experiment_main}e, sample A, which had $10\,\mu$m-long wires ($T\sim300$\,mK); (c, d) $V_{\rm F}=-0.70$\,V, $V_{\rm P}=0.3$\,V, from Fig.\@ \ref{fig:experiment_main}i, sample A; (e, f) a similar single-subband result from sample B ($18\,\mu$m-long wires, $T<100$\,mK). The replica feature just above $k_{\rm F}$ appears as a pale triangle (slowly varying $G$) between the two green curves, after a red region (sharp rise in $G$). The replica feature for sample B is somewhat weaker than that for sample A, in line with the wire-length dependence predicted in this paper. Right column: $G$ \textit{vs} $V$ at various fields $B$ from 3 to 4.8\,T for the data in the matching plots in the left column; `+' and `$\times$' symbols on each curve indicate, respectively, the voltages corresponding to the dashed and solid ($p1b$ and $p1a(l)$) green lines in the left column (and in Fig.\@ \ref{fig:experiment_main}), showing the enhanced conductance between the two.} \end{figure} For positive voltages in the region just above the higher $V=0$ crossing point ($B_+$, which corresponds to $k_{\rm F}$) we observe a distinctive feature: the 1D peak broadens, instead of just continuing along the non-interacting parabola, with one boundary following the parabola ($p1a(l)$) and the other bending around, analogous to the replica $p1b$. This is visible in the conductance, but is most easily seen in the differentials, particularly ${\rm d}G/{\rm d}V$ (left column of Fig.\@ \ref{fig:experiment_main}). The broadening is observed at temperatures from 100\,mK up to at least 1.5\,K, and in samples with different wire designs (with or without air bridges) and lengths: in Fig.\@ \ref{fig:p1b}, ${\rm d}G/{\rm d}V$ is shown in detail for the broadened `replica' region for the 10\,$\mu$m wires already presented (a--d), and for another sample with wires 18\,$\mu$m long (e, f). $G$ is plotted in Fig.\@ \ref{fig:p1b}b, d and f on cuts along the $V$ axis of the corresponding plots in the left column at various fields $B$ from $B_+$ to 4.8\,T---between the `+' and `$\times$' symbols on each curve is a region of enhanced conductance characteristic of the replica $p1b$. Filling of the second 1D subband changes significantly the screening radius for the Coulomb interaction potential in the first 1D subband. This is manifested by a change of the ratio $v_{\rm c}/v_{\rm s}$ when the occupation of the second subband is changed by varying voltage of the finger gates $V_\textrm{F}$ in Figs. \ref{fig:experiment_main}aceg, see Table \ref{tab:vcvs_ratio}. The ratio $v_{\rm c}/v_{\rm s}$ is a measure of interaction energy. Thus, the finger gates give a degree of experimental control over the interactions within our design of the 1D system. We use the maximum change of the ratio $v_c/v_s$ for different finger gate voltages to estimate the relative change of the interaction strength as $\left(\textrm{max}(v_c/v_s)-\textrm{min}(v_c/v_s)\right)/\textrm{min}(v_c/v_s)$ obtaining a change of about $20\%$. It also has to be noted that the `replica' is visible even when a second subband is present in the 1D wires, see Fig. \ref{fig:experiment_main}a--f. In a and b it appears to go 25--30\% higher in voltage than expected for a precise copy of the usual 1D parabola (even allowing for capacitive correction) due to a contribution of the second subband, which we do not analyse in detail here. At even higher magnetic fields the $p1b$ line passes a `p' parabola. Figs.\@ \ref{fig:p1b}a and c (and the corresponding cuts b and d) show the replica feature for two different positions of the `p' parabolae using a gate above most of the `p' region, showing that the replica feature is independent of the `p' tunneling. The amplitude of the feature dies away rapidly, and beyond the `p' parabolae, we have measured up to 8T with high sensitivity, but find no sign of any feature that can be distinguished from the decaying tails of the other features. In the range of fields where the $p1b$ feature is observed its strength decreases as the $B$ field increases away from the crossing point analogously to the power-law for spinless fermions in Table\@ \ref{tab:SF_spectral_function_values}. On general grounds it is natural to expect that divergence of the spectral weight of a b-mode toward an a-mode is a general feature, but there is no known method for performing a microscopic calculation in the spinful case. A similar feature should mark the $h0b\left(r\right)$ mode (see Fig.\@ \ref{fig:SF_spectral_function} and Table \ref{tab:SF_spectral_function_values}) for negative voltages and for the magnetic field just below the crossing point $k_{\rm F}$, but it would be very difficult to resolve due to the overlaying spin and charge lines. \begin{figure}\includegraphics[width=1\columnwidth]{fig_10}\protect\caption{\label{fig:replicafitting} (a,c) The conductance for $V_{\rm F}=-0.70$ and $-0.68\,V$, respectively, after subtraction of an idealised landscape made up of fits or estimates of the non-interacting 1D-2D and `p' parabolae (see text). The $p1b$ replica is seen clearly as the red region of enhanced conductance. (b) The conductance along the $p1b$ replica parabola, for the data in (a) (green crosses). The conductance on $p1b$ has a large contribution from the `p' region (the line in (a) marked with blue dots, which is blurred to the left by multiple copies at slightly different positions). In order to correct for this contribution, the conductance along a matching parabola shifted along the dotted `p' line in (a) (shown as a dashed magenta line there), is subtracted from the $p1b$ data. This yields the points marked with blue circles, which appears to be non-zero because of the enhancement at $p1b$. The amplitude decays rapidly. There are many uncertainties in the fitting of the other peaks, but the replica appears clearly and the decay of the conductance is consistent with an inverse-square power law $G\propto (k-k_{\rm F})^{-\alpha}$ (labelled $\alpha=2$), which is the behaviour predicted by the theory for $k>k_{\rm F}+\gamma$ where $\gamma\ll k_{\rm F}$ (see Table \ref{tab:SF_spectral_function_values}). (d) The $p1b$ conductance enhancement as shown with circles in (c). Three different methods of fitting the background and the 1D and 2D peaks are compared for each of two gate voltages as shown. The curves are offset vertically for clarity. The lines marked with values of $\alpha$ are guides to the eye. The data are all consistent with $\alpha=2\pm1$.} \end{figure} Making an analogy with the microscopic theory for spinless fermions in the first part of this paper, we estimate the ratio of signals around different spectral edges using the 1D Fermi wavelength, $\lambda_{\rm F}\approx130$\,nm for our samples, as the short-range scale $\mathcal{R}$. The signal from the principal parabola, see Fig.\@ \ref{fig:experiment_main}b, gives the amplitude of the $a$-mode as $G_{a}\approx 5\,\mu$S. Then the amplitude of the signal from the second (third)-level excitations is predicted to be smaller by a factor of more than $\lambda_{\rm F}^{2}/L^{2}\sim 2\times10^{-4}$ ($\lambda_{\rm F}^{4}/L^{4}=3\times10^{-8}$), where the length of a wire is $L=10\,\mu$m. These values $G_{a}\lambda_{\rm F}^{2}/L^{2}\sim 10^{-3}\,\mu$S ($G_{a}\lambda_{\rm F}^{4}/L^{4}\sim 10^{-7}\,\mu$S) are at least two orders of magnitude smaller than the background and noise levels of our experiment $G_{\textrm{noise}}\sim 10^{-2}\,\mu$S, which places an upper limit on the amplitude of any replica away from $k_{\rm F}$. Thus, our observations are consistent with the mode hierarchy picture for fermions. In an effort to quantify the decay of the replica feature we have fitted the gradual background fall in conductance and the non-interacting 1D and 2D peaks (solid green and blue lines in Figs.\@ \ref{fig:sc_separation}--\ref{fig:p1b}) with a Gaussian and/or Lorentzian functions of $B$, at each value of $V>0$. The fitting parameters are then fitted to smooth functions in order to represent the general behaviour of the peaks as a function of $V$. This idealised landscape is then subtracted from the data, see Fig.\@ \ref{fig:replicafitting}a, and the `replica' is then fairly easily observed in the remaining conductance. A copy of a nearby region along the `p' curve is then subtracted too, as an approximation to the rather diffuse signal arising from the main `p' peak and smaller versions of it at slightly different densities. This also reduces errors in the peak and background fitting used in (a). We then plot the conductance along the expected parabola (dashed line in (a)) as a function of $(k-k_{\rm F})/k_{\rm F}=(B-B_+)/((B_+-B_-)/2)$. This data is shown as circles in (c), where all the other contributions to the conductance along the same parabola are shown. Here, $B_+=3.17$\,T and $k_{\rm F}=7.2\times 10^7$\,m$^{-1}$. It is very hard to be sure that this procedure is reliable due to significant error bars imposed by contributions from the various other peaks, but it is clear that the replica feature dies away rapidly as a function of $k-k_\textrm{F}$, and it is consistent with the $1/(k-k_\textrm{F})^2$ law predicted for $p1b$ in Table \ref{tab:SF_spectral_function_values} for $k-k_{\rm F}\gg \gamma$. Though the overall prefactor is unknown theoretically in the spinful case, this singular power law may overcome the reduction factor $\mathcal{R}^2/L^2$ close to $k_\textrm{F}$. \section{Conclusions} In this work, we have shown that a hierarchy of modes emerges in systems of interacting fermions in one dimension at high energy controlled by the system length, in marked contrast to the well-known fermionic quasiparticles of a Fermi liquid and hydrodynamic modes of a Luttinger liquid at low energy. We have obtained theoretically the dynamic response functions for a model of spinless fermions with short-range interactions using the exact diagonalisation methods of the Bethe ansatz for the spectrum and the form factors of the system. Analysing the spectral function in detail, we have found that the first-level (strongest) mode in long systems has a parabolic dispersion, like that of a renormalised free particle. The second-level excitations produce a singular power-law line shape for the first-level mode and different kinds of power-law behaviour at the spectral edges. Evaluating the form factor necessary for the dynamical structure factor we have shown that it has the same general form as the form factor of the spectral function, manifesting the same hierarchy of modes. Using the same many-body matrix elements obtained microscopically, we have also calculated the local density of states. It provides a more convenient way to analyse how the hierarchy at high energy changes into the hydrodynamic modes of the Luttinger liquid at low energies. We have shown, via a full Bethe-ansatz calculation, that the LDOS is suppressed at the Fermi energy in a power-law fashion in full accord with the prediction of the Tomonaga-Luttinger model. Away from the Fermi point, where the Lorentz invariance of the linear dispersion is reduced to Galilean by the parabolicity of the spectrum, the LDOS is dominated by the first (leading) level of the hierarchy. We have demonstrated that the transition from one regime to another is a smooth cross-over. We measure momentum-resolved tunnelling conductance in one-dimensional wires formed in the GaAs/AlGaAs double-well heterostructure by an array of finger gates. In this set-up we probe the spectral function of unpolarised electrons (spinful fermions) and find a pronounced spin-charge separation at low energy with a ratio of the spin and the charge velocities up to 1.8, which confirms that our system is in the strongly interacting regime. By varying the gate voltage that controls the width of our 1D wires, we demonstrate control of the interaction strength of about $20\%$; the deeper confining potential of the wires populates higher 1D subbands as well which in turn screens stronger Coulomb interactions in the principal 1D band reducing the interaction strength. In $ 10\,\mu$m-long wires we find a clear feature resembling the second-level excitations, which dies away rapidly at high momentum. A qualitative fit shows that the feature decays in a fashion that is consistent with the power-law prediction in this paper for spinless electrons. Thus we have shown that the hierarchy is apparently a generic phenomenon at least for one- and two-point correlation functions of fermions without spin, and for a transport experiment for fermions with spin. \begin{acknowledgments} We acknowledge financial support from the UK EPSRC through grant numbers EP/J01690X/1 and EP/J016888/1 and from the DFG though SFB/TRR 49. This research was supported in part by the National Science Foundation under Grant No. NSF PHY11-25915. \end{acknowledgments}
1,116,691,501,375
arxiv
\section{Introduction} We study the maximal supersymmetric gauge quantum mechanics. This model was first considered in papers \cite{CH}, \cite{Flume}, \cite{BRR}. Later this Hamiltonian found applications in the physics of supermembranes \cite{deWHN}, \cite{deWLN} and $D$-particles \cite{Witten}. There is a remarkable conjecture, called Matrix theory, that the same Hamiltonian gives a nonperturbative description of M-theory \cite{BFSS}. This conjecture implies the existence of a unique bound state at threshold for each $SU(N)$ gauge group where $N\ge 2$ (though the existence of the state was conjectured earlier). The $SU(2)$ case was recently studied in a number of papers \cite{HS}, \cite{SS}, \cite{Yi}, \cite{PfW}. In \cite{SS} it was proved, using the computation of Witten index, that at least one ground state exists in $SU(2)$ model. Some progress also have been achieved in the general $SU(N)$ case \cite{PR}, \cite{MNS}, \cite{Hoppe}. In \cite{MNS} the principal contribution to Witten index was computed (see also \cite{GG1} and \cite{GG2}). Calculation of the surface contribution to the index was performed in \cite{GG1} under certain assumptions. The present paper justifies these assumptions. In \cite{HS} an asymptotic form of a wave function was proposed as a candidate ground state. The present paper extends the ideas of \cite{HS} and \cite{PfW} to higher $N$'s. We consider the model in the asymptotic region where the coordinates along the flat directions of the potential become large. Our method is generalized Born-Oppenheimer approximation introduced in \cite{HS}. The authors of \cite{HS} tackle the problem of gauge invariance by working with a complete set of gauge invariant variables. It would be rather hard to implement this idea for higher $N$'s. Instead, we apply the gauge fixing procedure of \cite{deWLN}. The result of our computation is that in the leading order the dynamics of flat coordinates is described by the free Hamiltonian. Possible potential term vanishes due to cancellations between many different terms. This result is not surprising, it is generally expected in supersymmetric theories. It is in agreement with $SU(2)$ calculations of \cite{HS}, \cite{PfW}, \cite{SS}. In the last section of the paper we discuss possible applications of the main result. \section{The model and preliminaries} In this section we introduce the model and do some preliminary technical work needed for the Born-Oppenheimer approximation. Namely, we write down the (partially) gauge fixed Hamiltonian, split it into 4 basic parts: free Hamiltonian for slow (Cartan) degrees of freedom, bosonic and fermionic oscillators, and the interaction part. After that we explain the quantization of the oscillators part. We refer the reader to the seminal paper \cite{deWLN} for all the details of the gauge fixing procedure. The Hamiltonian of matrix theory is that of the 10D super Yang-Mills theory dimensionally reduced to 0+1 dimensions (in $A_{0}=0$ gauge). One can write it in the following form \begin{equation} \label{H} H=\frac{1}{2}\pi^{\mu}_{A} \pi^{\mu}_{A} + \frac{1}{4}f_{ABC}\phi^{\mu}_{B}\phi^{\nu}_{C}f_{ADE}\phi^{\mu}_{D}\phi^{\nu}_{E} -\frac{i}{2}f_{ABC}\phi^{\mu}_{A}\Lambda_{B\alpha}\Gamma^{\mu}_{\alpha \beta}\Lambda_{C\beta} \end{equation} where $\phi^{\mu}_{A}$, $\pi^{\mu}_{A}$ are real bosonic variables and $\Lambda_{A \alpha}$ are real fermionic variables. The lower capital Latin indices are that of the adjoint representation of a real compact Lie algebra $\bf g$ with totally antisymmetric structure constants $f_{ABC}$. Denote by $G$ the Lie group of $\bf g$. The indices $\mu, \nu =1, \dots , 9$, $\alpha=1, \dots , 16$ correspond to vector and real spinor representations of the group $spin(9)$ respectively. The $SO(9)$ gamma matrices $\Gamma^{\mu}$ are assumed to be real and symmetric. The canonical commutation relations between the variables are $$ [\pi^{\mu}_{A}, \phi^{\nu}_{B}]=-i\delta^{\mu \nu}\delta_{AB} \, , \quad \{\Lambda_{A\alpha}, \Lambda_{B\beta}\} = \delta_{\alpha \beta}\delta_{AB} . $$ From now on we set $G=SU(N)$ that corresponds to the Matrix theory with the ``center of mass'' degrees of freedom being excluded. The Lie algebra ${\bf g}=su(N)$ consists of the traceless antihermitean $N\times N$ matrices $\phi$. An invariant, positive definite inner product on $\bf g$ is defined as $(\phi_{1}, \phi_{2})=-2Tr(\phi_{1}\phi_{2})$. The Cartan subalgebra consists then of diagonal matrices $\phi \in {\bf g}$. Let matrices $T_{A} , \enspace A=1, \dots , N^{2}-1$ be an orthonormal basis in $su(N)$ with respect to this inner product such that the indices $A=1, \dots , N-1$ correspond to the Cartan subalgebra. Then $f_{ABC}=(T_{A}, [T_{B}, T_{C}])$. Now we will briefly explain the gauge fixing procedure. Given any element $\phi \in g$ there exist a unique matrix $D$ such that $\phi=UDU^{-1}$ for some $U\in G$ and $D$ is of the form \begin{equation} \label{D} D=i\left( \begin{array}{cccc} \lambda_{1} & 0 & \ldots & 0 \\ 0&\lambda_{2} & \ldots &0 \\ \vdots & \vdots & \ddots & \vdots \\ 0&0& \ldots &\lambda_{N} \end{array} \right) \end{equation} where $\lambda_{n}$ are real numbers such that \begin{equation} \sum_{n=1}^{N}\lambda_{n} = 0 \, , \quad \lambda_{1} \ge \lambda_{2} \ge \ldots \ge \lambda_{N} . \end{equation} The transformation $U$ is defined up to multiplications by an arbitrary element of the Cartan subgroup. Thus, it is clear that we can perform a partial gauge fixing by requiring that $\phi^{9}$ lies within the Cartan subalgebra and has the form (\ref{D}) of the matrix $D$ above. This particular form corresponds to a fixed Weyl chamber within the Cartan subalgebra. The residual gauge group is then $U(1)^{N-1}$. This gauge fixing procedure is described in details in \cite{deWLN}. Following \cite{deWLN} we adopt the convention that the indices $i,j,k, \dots$ are summed from $1$ to $N-1$ (the indices of Cartan subalgebra) , while capital indices $I,J,K, \dots $ from the middle of the alphabet run from $N$ to $ N^{2}-1$ (the indices of the subspace spanned by the roots). Also we assume that the indices $a,b,c, \dots$ run from $1$ to $8$ and correspond to the first eight coordinates in $R^{9}$. Finally, let us adopt a new notation for the bosonic Cartan variables $D_{i}^{\mu}\equiv \phi_{i}^{\mu}$ (that will be convenient in the future when we split the variables into the ``fast'' and ``slow'' ones). The gauge fixed Hamiltonian is $H=H_{0} + H_{B} + H_{F} + H_{4}$, where \begin{equation} \label{H0} H_{0}=-\frac{1}{2}\frac{\partial^{2}}{\partial D_{i}^{\mu}\partial D_{i}^{\mu}} \end{equation} \begin{equation} \label{HB} H_{B}=-\frac{1}{2}\frac{\partial^{2}}{\partial \phi_{I}^{a}\partial \phi_{I}^{a}} + \frac{1}{2}\Omega^{2}_{IJ}\phi_{I}^{a}\phi_{J}^{a} \end{equation} \begin{equation} \label{HF} H_{F}=-\frac{i}{2}\Lambda_{I}z^{\mu}_{IJ}\Gamma^{\mu}\Lambda_{J} \end{equation} \begin{eqnarray} \label{H4} \lefteqn{H_{4}=\frac{1}{4}f_{AIJ}f_{AKL}\phi_{I}^{a}\phi_{J}^{b}\phi_{K}^{a}\phi_{L}^{b} + \frac{1}{2}f_{AiJ}f_{AKL}(D^{a}_{i}\phi_{J}^{b}-D_{i}^{b}\phi_{J}^{a}) \phi_{K}^{a}\phi_{L}^{b} - } \nonumber \\ && -\frac{1}{2}f_{AiJ}f_{AkL}D_{i}^{a}D_{k}^{b}\phi_{J}^{b}\phi_{L}^{a} - \frac{i}{2}f_{IAB}\phi_{I}^{a}\Lambda_{A}\Gamma^{a}\Lambda_{B} + \frac{1}{2}(w^{t}w)_{IJ}\hat L_{I}\hat L_{J} \end{eqnarray} The following notations are used above: $$ \Omega^{2}_{IJ} = (z_{\mu}^{t}z_{\mu})_{IJ} \, , \quad z_{IJ}^{\mu}=D_{i}^{\mu}f_{iIJ} \, , \quad w= (z^{9})^{-1} $$ $$ \hat L_{I}=-f_{IBC}\left( i\phi_{B}^{a}\frac{\partial}{\partial \phi_{C}^{a}} + \frac{i}{2}\Lambda_{B\alpha}\Lambda_{C\alpha} \right) . $$ This Hamiltonian is self-adjoint with respect to the measure $\prod_{a,A}d\phi^{a}_{A}\prod_{i}dD_{i}^{9}$ (the Faddeev-Popov Determinant was eliminated by redefining the Hilbert space). The terms $H_{B}$ and $H_{F}$ are Hamiltonians of bosonic and fermionic harmonic oscillators. To diagonalize these Hamiltonians, note that the eigenvectors of the matrices $z_{IJ}^{\mu}$ are the complex root vectors $E_{mn}^{I}$ ($m,n=1 , \ldots , N \, , m\ne n$). These eigenvectors satisfy \begin{equation} \label{roots1} z_{IJ}^{\mu}E_{mn}^{J}=i(\lambda_{m}^{\mu}-\lambda_{n}^{\mu})E_{mn}^{I} \end{equation} \begin{equation} (E_{mn}^{I})^{*}=E_{nm}^{I} . \end{equation} Here $\lambda_{m}^{\mu}$ are eigenvalues of matrices $D^{\mu}=\sum_{i}D_{i}^{\mu}T^{i}$. The vectors $E_{mn}$ define an orthonormal basis of a root subspace: \begin{equation} \sum_{I} (E_{mn}^{I})^{*}E_{pq}^{I}=\delta_{mp}\delta_{nq} \quad (m\ne n , p\ne q ) \end{equation} \begin{equation} \sum_{m\ne n}(E_{mn}^{I})^{*}E_{mn}^{J} = \delta_{IJ} . \end{equation} Note the equality $E_{mn}\equiv E_{mn}^{I}T^{I}=\frac{i}{\sqrt{2}}e_{mn}$ , where $e_{mn}$ is $N\times N$ matrix with $1$ on the $m,n$-th place and zeros elsewhere. These matrices satisfy the following commutation relations \begin{equation} \label{roots5} [E_{mn}, E_{pq}] = \frac{i}{\sqrt{2}}(\delta_{np}E_{mq} - \delta_{mq}E_{pn}) . \end{equation} The eigenvalues of $H_{B}$ and $H_{F}$ (as we will see further) are \begin{equation} r_{mn}=\sqrt{\sum_{\mu=1}^{9}(\lambda_{m}^{\mu}-\lambda_{n}^{\mu})^{2}} . \end{equation} Thus, we can introduce new variables $\phi_{mn}^{a}$, $\Lambda^{mn}_{\alpha}$, $ m,n =1, \dots ,N , \, m\ne n $ so that \begin{equation} \phi_{I}^{a}=\sum_{m\ne n} \phi_{mn}^{a}E_{mn}^{I} \, , \quad \Lambda_{I\alpha}=\sum_{m\ne n} \Lambda^{mn}_{\alpha}E_{mn}^{I} . \end{equation} The commutation relations for fermions are now $$ \{\Lambda^{mn}_{\alpha}, \Lambda^{pq}_{\beta}\}=\delta_{\alpha\beta}\delta^{np}\delta^{mq} $$ and it is natural to set $(\Lambda^{mn})^{\dagger}=\Lambda^{nm} \, m<n$. However, to diagonalize $H_{F}$ some more work needs to be done. Namely, one should use the $spin(9)$-rotated fermions $$ \tilde \Lambda^{mn}_{\alpha} = R^{mn}_{\alpha\beta}\Lambda^{mn}_{\beta} \, m<n $$ $$ \tilde \Lambda^{mn}_{\alpha} = R^{nm}_{\alpha\beta}\Lambda^{mn}_{\beta} \, m>n $$ where $$ R^{mn}_{\alpha\beta}= \frac{r_{mn} + (\lambda_{m}^{\mu} - \lambda_{n}^{\mu})\Gamma^{9}\Gamma^{\mu}} { 2r_{mn}(r_{mn} + \lambda_{m}-\lambda_{n})} \enspace m<n $$ is an orthogonal matrix. Here and everywhere $\lambda_{n}$ with the suppressed upper index stands for $\lambda_{n}^{9}$. In terms of $\tilde \Lambda^{mn}$ variables $H_{F}$ can be written as \begin{equation} \label{HF2} H_{F}=\sum_{m<n}r_{mn}\left( (\tilde \Lambda_{+}^{mn})^{\dagger}\tilde \Lambda_{+}^{mn} + \tilde \Lambda_{-}^{mn}(\tilde \Lambda_{-}^{mn})^{\dagger} - 8 \right) \end{equation} where $\Lambda_{\pm} $ denotes the chiral components taken with respect to $\Gamma^{9}$. Using (\ref{roots1}) - (\ref{roots5}) we can rewrite all parts of the Hamiltonian in terms of the variables $D_{i}^{\mu}$, $\Lambda_{i\alpha}$, $\phi^{a}_{mn}$, $\Lambda_{\alpha}^{mn}$. The corresponding expressions for $H_{0}$ and $H_{F}$ are given in (\ref{H0}) and (\ref{HF2}). The Hamiltonian for bosonic oscillators reads now as \begin{equation} \label{HB2} H_{B}=-\sum_{m<n}\frac{\partial^{2}}{\partial\phi_{mn}^{a}\partial (\phi_{mn}^{a})^{*}} + \sum_{m<n}r_{mn}^{2}\phi^{a}_{mn}(\phi_{mn}^{a})^{*} . \end{equation} As one can easily see from (\ref{HF2}) and (\ref{HB2}) the ground state energies for the bosonic and fermionic oscillators precisely cancel each other (we have $8$ bosonic modes for each $r_{mn}, m < n$). The expression for $H_{4}$ in terms of the new variables is rather long. We relegate it to the appendix A in order not to complicate the main text. The normalized state vector of the oscillators ground state has the following form \begin{eqnarray} \label{ground} \lefteqn{ |0\rangle =|0_{B}\rangle |0_{F}\rangle } \nonumber \\ |0_{B}\rangle &=& \left( \prod_{m<n}r_{mn}^{4}2^{4}\pi^{-4} \right) exp\left( -\sum_{m<n}r_{mn}\phi_{mn}^{a}\phi_{nm}^{a} \right) \nonumber \\ |0_{F}\rangle &=& \prod_{m<n} \prod_{\alpha=1}^{8}\left( \tilde \Lambda_{-\alpha}^{mn} \right)^{\dagger}|Fock\rangle \end{eqnarray} where $|Fock\rangle$ denotes the Fock vacuum for the fermions $\Lambda^{mn}$. It is not hard to check that (\ref{ground}) is invariant under the residual $U(1)^{N-1}$ gauge transformations. \section{Born-Oppenheimer approximation and perturbation theory} The important feature of Matrix model that makes the existence of a threshold bound state possible is flat directions of the potential. Under the gauge-fixing condition that we employed, the coordinates along the flat directions are Cartan variables $D_{i}^{\mu}$. We group them together with their fermionic counterparts $\Lambda_{i}$. The coordinates in transverse directions are $\phi^{a}_{mn}$ along with their superpartners $\Lambda^{mn}$. Following the terminology of Born-Oppenheimer approximation we call the variables $D_{i}^{\mu}$, $\Lambda_{i}$ ``slow'' and the variables $\phi^{a}_{mn}$ , $\Lambda^{mn}$ ``fast''. When slow variables are considered to be fixed the dynamics of the fast ones is governed by the oscillator Hamiltonians $H_{B}+H_{F}$ and by $H_{4}$. In the asymptotic region where the frequencies $r_{mn}$ of the oscillators are large and assuming that $H_{4}$ can be treated as a perturbation, it is natural to expect that the fast degrees of freedom will remain in the oscillators ground state. The right assymptotic region for our purposes turns out to be $D_{i}^{9}\to \infty$ . This is the same as setting $\lambda_{m}-\lambda_{n} \to \infty$ for all $m<n$ . We will assume that $\lambda_{m}- \lambda_{n} \, , m<n $ are all of the order $r$ where $r \to \infty$. Taking into account that the variables $\phi_{mn}^{a}$ in the oscillators ground state are of the order $1/\sqrt{r_{mn}}$ one can estimate that $H_{4}$ is of the order $1/\sqrt{r}$, that indeed allows one to treat it as a perturbation when $r\to \infty$. Thus, we search for an approximate solution to the spectral problem $$ (H-E)|\Psi\rangle=0 $$ in the form $ |\Psi\rangle=| \cdot\rangle|\Psi(D_{i}, \Lambda_{i})\rangle $ where $ |\cdot\rangle$ is the ground state of superoscillators. The general formalism for this problem was developed in \cite{HS}. We refer the reader for the detailed explanation of such generalized Born-Oppenheimer approximation to that paper. Here we just want to explain the main idea of the formalism and develop a perturbative expansion suitable for the problem at hand. If one introduces a pair of projection operators $P=|\cdot\rangle \langle \cdot|$, $Q=1-P$, the Schrodinger equation breaks into a system of two equations \begin{eqnarray}\label{projeq} P(H-E)P|\Psi\rangle + P(H-E)Q|\Psi\rangle &=& 0 \nonumber \\ Q(H-E)P|\Psi\rangle + Q(H-E)Q|\Psi\rangle &=& 0 . \end{eqnarray} The second equation can be formally solved as \begin{equation} \label{QPsi} Q|\Psi\rangle=-(Q(H-E)Q)^{-1}Q(H-E)P|\Psi \rangle . \end{equation} Substituting this solution into the first equation, we get \begin{equation} \label{redS} [P(H-E)P - P(H-E)Q(Q(H-E)Q)^{-1}Q(H-E)]P|\Psi \rangle \, , \end{equation} i.e. the ``reduced'' Schrodinger equation for $P|\Psi\rangle=|\cdot\rangle|\Psi(D_{i}, \Lambda_{i})\rangle$. The first term in (\ref{redS}) is the conventional effective Hamiltonian for the Born-Oppenheimer approximation. The second one constitutes the correction term. Now note that in the problem at hand $|\cdot\rangle$ is the ground state of superoscillators that has a zero energy. If we split the total Hamiltonian as $H=H_{osc} + H'$ where $H_{osc}$ is the superoscillators Hamiltonian and $H'$ is all the rest, then $H_{osc}|\cdot\rangle=0$. A direct analysis of formulae (\ref{H0})-(\ref{H4}) shows that $QH_{osc}Q=Q(H_{B}+H_{F})Q$ scales like $r$ and $QH'Q$ scales like $O(1)$ (because of the part of $H_{0}$ that depends on $D^{a}_{i}$ variables). This is the reason to treat $QH'Q$ as a perturbation. Now we can apply a perturbation theory in $QH'Q$ to the term $(Q(H-E)Q)^{-1}$ in (\ref{redS}) : \begin{eqnarray} \label{pert1} \lefteqn{(Q(H-E)Q)^{-1} = (QH_{osc}Q + QH'Q - E)^{-1} = } \nonumber \\ &=& \frac{1}{QH_{osc}Q-E} - \frac{1}{QH_{osc}Q-E}QH'Q\frac{1}{QH_{osc}Q-E} + \ldots \end{eqnarray} Here $Q(H-E)Q$ is understood as an operator on the $Q$-projected subspace of the whole Hilbert space and thus operator inverse makes sense (by the same reason we can write $E$ instead of $EQ$ in this equation). Substituting this perturbation expansion into reduced Schrodinger equation (\ref{redS}), we get the following expression for the effective Hamiltonian \begin{eqnarray} \label{perturb} &&\langle\cdot|H'|\cdot\rangle - \langle\cdot|H'Q\frac{1}{QH_{osc}Q-E}QH'|\cdot\rangle + \nonumber \\ &&+ \langle\cdot|H'Q\frac{1}{QH_{osc}Q-E}QH'Q\frac{1}{QH_{osc}Q-E}QH'|\cdot\rangle + \ldots \end{eqnarray} The ``propagator'' $\frac{1}{QH_{osc}Q-E}$ scales like $1/r$ and the last expansion can be used to compute the effective Hamiltonian to any desired order in $1/r$. Although formally $H'$ scales like $O(1)$ because of the $H_{0}$ term, the contribution of the order $1/r^{2}$ comes solely from the first two terms in (\ref{perturb}). This happens due to the fact that $H_{0}|\cdot\rangle$ scales like $1/r$ (see appendix B). Hence, the correction term we need to have the effective Hamiltonian up to the order $1/r^{2}$ is \begin{equation} \label{corr} - \langle\cdot|H_{4}Q\frac{1}{QH_{osc}Q-E}QH_{4}|\cdot\rangle . \end{equation} Another way to get the correction term is using an ansatz method similar to the one developed in \cite{HS}. Both (\ref{corr}) and the ansatz method give the same result. \section{Calculation} First we need to compute the main contribution $\langle\cdot|H|\cdot\rangle = \langle\cdot|H_{0}|\cdot\rangle + \langle\cdot|H_{4}|\cdot\rangle$ up to the second order in $1/r$. For calculations it is convenient to express $H_{0}$ in terms of $\lambda_{n}^{\mu}$ variables. We have a linear correspondence between the variables $D^{\mu}_{i}=A_{i}^{n}\lambda_{n}^{\mu} $. Since by definition $\sum_{i}D_{i}^{\mu}T^{i} = i\sum_{i} \lambda_{i}e_{ii}$ the condition $(D^{\mu}, D^{\nu}) =-2tr(D^{\mu}D^{\nu})=D^{\mu}_{i}D^{\mu}_{i}$ gives $\lambda_{n}^{\mu}=\frac{1}{2}A^{i}_{n}D_{i}^{\mu}$. Therefore $$ H_{0} = -\frac{1}{4}\frac{\partial^{2}}{\partial \lambda_{n}^{\mu}\partial \lambda_{n}^{\mu}} $$ where the last operator is considered as a restriction to the subspace of functions of $\lambda_{1}^{\mu} , \ldots , \lambda^{\mu}_{N}$ which are annihilated by $\sum_{n}^{N} \frac{\partial}{\partial \lambda_{n}^{\mu}} $ (which means that they depend only on differences $\lambda_{m}^{\mu}-\lambda_{n}^{\mu}$ ). Both bosonic and fermionic oscillators ground state vectors depend on $\lambda_{n}^{\mu}$ variables. This should be taken into account when calculating averages of differential operators such as $H_{0}$. Some useful formulae for computing such averages are given in appendix B. After this preliminary work the direct computation yields \begin{equation} \label{H0c} \langle\cdot|H_{0}|\cdot\rangle = -\frac{1}{2}\frac{\partial^{2}}{\partial D_{i}^{\mu}\partial D_{i}^{\mu} } + 9 \sum_{m<n} \frac{1}{r_{mn}^{2}} . \end{equation} In $H_{4}$ we have terms of the order $1/r$ and $1/r^{3/2}$ but it turns out that they contribute only in higher orders than $1/r^{2}$. The remaining terms give the answer \begin{eqnarray} \label{H4c} \lefteqn {\langle\cdot|H_{4}|\cdot\rangle \simeq \frac{7}{2}\sum_{p\ne m} \sum_{m \ne n} \frac{1}{r_{pm}r_{mn}} +} \nonumber \\ &&+ \frac{1}{2} \sum_{p\ne m \ne n \ne p} \left[ \frac{2(\lambda_{m}-\lambda_{n})(\lambda_{p}-\lambda_{m})}{r_{mn}r_{pm}(\lambda_{p}-\lambda_{n})^{2}} + \frac{r_{pm}^{2} + r_{mn}^{2}}{r_{mn}r_{pm}(\lambda_{p}-\lambda_{n})^{2}} \right] . \end{eqnarray} The correction term that gives a contribution of the order $1/r^{2}$ is \begin{eqnarray} \label{corc} \lefteqn{-\langle\cdot |\left( -\frac{i}{2}f_{IAB}\phi_{I}^{a}\Lambda_{A}\Gamma^{a}\Lambda_{B} \right) \frac{1}{H_{osc}}\left( -\frac{i}{2}f_{JCD}\phi_{J}^{b}\Lambda_{C}\Gamma^{b}\Lambda_{D} \right) |\cdot\rangle = } \nonumber \\ &=& 8\sum_{p\ne m \ne n \ne p} \left[ - \frac{1}{r_{mp}(r_{pm}+r_{mn}+r_{np})} + \frac{(\lambda_{n}-\lambda_{m})(\lambda_{p}-\lambda_{n})}{r_{mp}r_{pn}r_{nm}(r_{mp}+r_{pn} + r_{nm})} \right] - \nonumber \\ &-& 16\sum_{m<n} \frac{1}{r^{2}_{mn}} . \end{eqnarray} Evidently the last term in (\ref{corc}) cancels with the analogous terms from (\ref{H0c}) and (\ref{H4c}). Those terms are similar to the ones arising in $SU(2)$ computation (see \cite{PfW}). The remaining terms look, at the first glance, as if they can hardly cancel each other. To see that this indeed happens we rewrite them in terms of $\lambda_{n}$ variables only, using the fact that $$r_{mn} = \lambda_{m} - \lambda_{n} + {\cal O}\left(\frac{1}{r}\right) \, , m<n . $$ Then (\ref{H4c}) contributes $$ \sum_{m<p<n} \left( \frac{7}{(\lambda_{m}-\lambda_{p})(\lambda_{m}-\lambda_{n})} + \frac{9}{(\lambda_{m}-\lambda_{p})(\lambda_{p}-\lambda_{n})} + \frac{7}{(\lambda_{p}-\lambda_{n})(\lambda_{m}-\lambda_{n})} \right) . $$ The same procedure carried on the first two terms in (\ref{corc}) yields $$ -16\sum_{m<p<n} \left( \frac{1}{(\lambda_{m}-\lambda_{p})(\lambda_{m}-\lambda_{n})} + \frac{1}{(\lambda_{p}-\lambda_{n})(\lambda_{m}-\lambda_{n})} \right) . $$ One readily checks that the sum of these two contributions vanishes. Hence, the outcome of our computation is given by the formula $$ H_{eff} = -\frac{1}{2}\frac{\partial^{2}}{\partial D_{i}^{\mu}\partial D_{i}^{\mu} } \, . $$ In appendix B we collected some formulae which we found useful for the computation described above. \section{Discussion } First we would like to explain how the present results can be compared with those obtained in \cite{HS} for the $SU(2)$ gauge group. The authors of \cite{HS} use a gauge invariant variable $R$ to specify the asymptotic region. Namely, $R$ is one of the eigenvalues of matrix $$ \Phi_{ab} = \phi^{\mu}_{a}\phi^{\mu}_{b} . $$ Here lower $a$ and $b$ are $SU(2)$ indices that run from $1$ two $3$. Without loss of generality we can assume that index $3$ corresponds to the Cartan subalgebra and the basis $\phi_{a}^{\mu}$ is such that the matrix $\Phi$ is diagonal (any element of $SU(2)$ can be taken as the Cartan generator). Then, clearly, \begin{displaymath} \Phi= \left( \begin{array}{ccc} *&0&0 \\ 0&*&0 \\ 0&0& r^{2} \end{array} \right) \end{displaymath} where $r^{2} = \phi_{3}^{\mu}\phi_{3}^{\mu} $ coincides with $r_{12}^{2}$ in our old notations. Hence, in this basis $R$ coincides with $r$. The result of \cite{HS} for the effective Hamiltonian (of radial degrees of freedom) is \begin{equation} \label{hHS} H_{eff} = -\frac{1}{2}\frac{d^{2}}{dR^{2}} - \frac{5}{R}\frac{d}{R} - \frac{4}{R^{2}} \end{equation} while the measure on Hilbert space is $R^{10}dR$. Composing this operator with $R$ from the left and with $R^{-1}$ from the right we get the radial part of the Laplacian in 9 dimensions with the radial measure $R^{8}dR$. Therefore, the radial parts of our effective Hamiltonian and the one found by Halpern and Schwartz are the same. It is not hard to check that the angular dependence is the same as well. Once we have the effective Hamiltonian, we can find the asymptotic expression for $P|\Psi \rangle = |\cdot \rangle |\Psi(D_{i} , \Lambda_{i})\rangle $ and then, using (\ref{QPsi}) and (\ref{pert1}), one can get the asymptotic form of the whole state vector $|\Psi\rangle $. Asymptotic solutions to $H_{eff} |\Psi(D_{i} , \Lambda_{i})\rangle = 0$ have a basis of the form $$ D_{1}^{-7-l_{1}}Y_{l_{1}}(D_{1}^{\mu}) \cdot \ldots \cdot D_{N-1}^{-7-l_{N-1}}Y_{l_{N-1}}(D_{N-1}^{\mu}) |\Lambda_{1} , \ldots , \Lambda_{N-1} \rangle $$ where $Y_{l_{i}}$ are SO(9) spherical harmonics. Further constraints of the supersymmetry, SO(9)-invariance and invariance with respect to the Weyl group of $SU(N)$ (i.e. the permutation group $S_{N}$) should be put on $|\Psi(D_{i} , \Lambda_{i})\rangle$ to single out the candidates for the ground state. Another comment we would like to make here is about the computation of Witten index. In paper \cite{SS} it was shown that Witten index for the model at hand has two contributions none of which can be made vanishing by the choice of the limiting procedure. The following expression for the correction (or surface) term was found $$ {\cal I}_{surf} = -\frac{1}{2} \int_{N_{F}(R)} tr e_{n} (-1)^{F} QW' . $$ The main ingredient in this formula is $W'$ - the approximate Green's function in the limit of large separation. The authors of \cite{SS} showed that one can take a free propagator for $W'$ in the $SU(2)$ theory and gave the complete calculation of index in this case. In \cite{GG1} the surface term was calculated under a similar assumption for $W'$ in $SU(N)$ theory. The present paper justifies this assumption and therefore completes the proof that the full Witten index for the $SU(N)$ case is equal to 1. \begin{center} {\bf Acknowledgements} \end{center} I would like to thank M.~Halpern and C.~Schwartz for helpful and stimulating discussions. I also want to express my gratitude to A.~Schwarz for his constant support and interest in my work.
1,116,691,501,376
arxiv
\section{interaction} Finite many-body systems (e.g., nuclei, small metallic grains, metallic clusters) robustly maintain similar regularities, despite their different binding interactions. For example, they all present the odd-even staggering on their binding energies, which are, however, attributed to various mechanisms \cite{oes-1,oes-2,oes-3,oes-4,oes-5}. Particularly in nuclear systems, the nucleon-nucleon interactions numerically exhibit a ``random" pattern with no trace of symmetry groups, whereas nuclear spectra follow some robust dynamical features: the nuclear spectral fluctuation is universally observed \cite{bohigas,haq,shriner}; low-lying spectra of even-even nuclei are orderly and systematically characterized by seniority, vibrational and rotational structures \cite{casten-1,casten-2}, beyond $I^{\pi}=0^+$ ground states without exception. To demonstrate the insensitivity of these robust regularities to the interaction details, and to reveal its underlying origin, random interactions are employed to simulate (or even introduce) the variety and chaos into a finite many-body system. Thus, the predominant behaviors in a random-interaction ensemble correspond to dynamical features in a realistic system. Many efforts have been devoted along this direction \cite{rand-rev-1,rand-rev-2,rand-rev-3,rand-rev-4,rand-book}. For instance, similarly to realistic even-even nuclei, the predominance of the $I=0$ ground states \cite{johnson-prl,johnson-prc} and collective band structures \cite{bijker-prl,bijker-prc} have been observed in random-interaction ensembles. However, there are only few attempts to study the robustness of nuclear quadrupole collectivity against the random interaction. This is partly because a random-interaction ensemble potentially gives weaker E2 transitions than a shell-model calculation with ``realistic" interactions \cite{horoi-be2}. Even so, some robust correlations about the E2 collectivity can be expected. For example, the Alaga ratio between the quadrupole moment ($Q$) of the $2^+_1$ state and B(E2, $2^+_1\rightarrow 0^+_1$) highlights both near-spherical shape and well deformed rotor in random-interaction ensembles \cite{rand-rev-2,horoi-a}; ratios of E2 transition rates between yrast $0^+_1$, $2^+_1$ and $4^+_1$ states are also correlated to the ratio of $2^+_1$ and $4^+_1$ excitation energies \cite{bijker-prl,zhao-be2}. \begin{figure} \includegraphics[angle=0,width=0.45\textwidth]{q_exp.eps} \caption{(Color online) $\langle 2^+_1||E2||2^+_1\rangle$ and $\langle 2^+_2||E2||2^+_2\rangle$ matrix elements from Table I of ref. \cite{allmond} (i.e., [$Q(2^+_1)$, $Q(2^+_2)$] plots scaled by $\sqrt{16\pi/5}\langle 2220|22\rangle$). The $\theta$ parameterization defined in Eq. (\ref{theta}) is illustrated in the red sector. The $Q(2^+_2)= -Q(2^+_1)$ correlation is obvious along the $\theta= -45^{\circ}$ direction (the black dash diagonal line).}\label{q_exp} \end{figure} This work further studies the robust correlation between $Q$ values of the first two $2^+_1$ states, inspired by a recent experimental survey \cite{allmond}. As shown in Fig. \ref{q_exp}, this survey demonstrated a global $Q(2^+_2)= -Q(2^+_1)$ correlation across a wide range of masses, deformations, and $2^+_1$ energies. We will make use of random-interaction ensembles to provide an interacting-particle vision to this correlation, and search for other underlying $Q$ correlations. The statistic analysis based on the Elliott SU(3) model \cite{elliott-su3} and the mean-field Hartree-Bose theory \cite{ibm} is applied. \section{calculation framework}\label{cal} In our random-interaction calculations, the single-particle-energy degree of freedom is switched off to avoid the interference from the shell-structure detail. The two-body interaction matrix element, on the other hand, is denoted by $V_{j_1j_2j_3j_4}^J$ as usual, where $j_1$, $j_2$, $j_3$ and $j_4$ represent the angular momenta of single-particle orbits (half integer for fermions and integer for bosons), and the superscript $J$ labels the total angular momentum of the two-body configurations involved the interaction element. In our calculations, $V_{j_1j_2j_3j_4}^J$ is randomized independently and Gaussianly with $(\mu=0,~\sigma^2=1+\delta_{j_1j_2,j_3j_4})$, which insures the invariance of our random two-body interactions under arbitrary orthogonal transformations \cite{wigner}. All the possibilities of random interactions and their outputs via microscope-calculations construct the two-body random ensemble (TBRE) \cite{tbre-1,tbre-2,tbre-3}. Obviously, in the TBRE, diagonal interaction elements potentially have larger magnitudes. For the shell-model TBRE in this work, four model spaces with either four or six valence protons in either $sd$ or $pf$ shell are considered, correspondingly to four nuclei: $^{24}$Si, $^{26}$S, $^{44}$Cr and $^{46}$Fe. For the IBM1 TBRE, $sd$-boson spaces are constructed for nuclei with valence boson numbers $N_b=$12, 13, 14 and 15, where $s$ and $d$ represents $I=0\hbar$ and $I=2\hbar$ bosons, respectively. It is noteworthy that a single calculation with random interactions does not match, and does not intend to match, to a realistic nucleus. It only presents a pseudo nucleus in the computational laboratory. Thus, in this article, model spaces described above are named as corresponding pseudo nuclei for convenience. For example, the model space with four protons in the $sd$ shell corresponds to pseudo $^{24}$Si. Statistic properties of many random-interaction calculations for pseudo nuclei can be related to the robustness of dynamic features in realistic nuclear systems. To insure the statistic validity of our conclusions, 1 000 000 sets of random interactions are generated for each pseudo nucleus, and inputted into the shell-model or IBM1 calculations. If one calculation produces a $I=0$ ground state, $Q$ matrix elements of $2^+_1$ and $2^+_2$ states will be calculated and recorded for the following statistic analysis. \section{$Q$ correlations in Shell Model}\label{sm} In the Shell Model, the $Q$ matrix element of one $2^+$ state, $|2^+\rangle$, is defined conventionally as \begin{equation} \begin{aligned} &Q(2^+)=\langle 2^+||\hat{Q}||2^+ \rangle,\\ &\hat{Q}=\langle j||r^2Y^2||j^{\prime}\rangle(a^{\dagger}_j\times \tilde{a}_{j^{\prime}})^{(2)}, \end{aligned} \end{equation} where $a^{\dagger}_j$ and $\tilde{a}_{j^{\prime}}$ are single-particle creation and time-reversal operators at orbits $j$ and $j^{\prime}$, respectively. A proportional $Q$ correlation between the first two $2^+_1$ states is normally characterized by the ratio of $Q(2^+_2)/Q(2^+_1)$. Geometrically, such correlation also corresponds to a straight line with the polar angle, \begin{equation}\label{theta} \theta= \arctan\left\{\frac{Q(2^+_2)}{Q(2^+_1)}\right\}, \end{equation} across the origin in the [$Q(2^+_1)$, $Q(2^+_2)$] plane. For example, the experimental $Q(2^+_1)=-Q(2^+_2)$ correlation suggested by Ref. \cite{allmond} can be illustrated by a diagonal $\theta=\arctan(-1)=-45^{\circ}$ line as expected in Fig. \ref{q_exp}. We also visualize the polar-angle scheme of the [$Q(2^+_1)$, $Q(2^+_2)$] plane in Fig. \ref{q_exp}. In this work, we prefer the statistic analysis based on the polar angle $\theta$ over the $Q(2^+_2)/Q(2^+_1)$ ratio because of two reasons. Firstly, the distribution of the $Q(2^+_2)/Q(2^+_1)$ ratio spreads widely, so that the statistic detail about $Q(2^+_2)=-Q(2^+_1)$ correlation may be concealed. In particular, there robustly exists 8\% probability of $|Q(2^+_2)/Q(2^+_1)|>10$ due to the predominance of weak quadruple collectivity, i.e. small $|Q(2^+_1)|$, in the shell-model TBRE \cite{horoi-be2}. However, we intend as comprehensively as possible to present the statistic detail about the experimental $Q(2^+_2)=-Q(2^+_1)=-1$ correlation. The wide statistic range of the $Q(2^+_2)/Q(2^+_1)$ ratio may fails this intention. By converting the $Q(2^+_2)/Q(2^+_1)$ ratio to the $\theta$ value, the statistic range is limited between $(-90^{\circ},90^{\circ})$, and a clearer vision around the $Q(2^+_2)=-Q(2^+_1)$ correlation can be obtained around $\theta=-45^{\circ}$. Secondly, the $\theta$ parameterization intuitively provides a reasonable geometric standard of symmetric sampling. Taking the $Q(2^+_2)=-Q(2^+_1)$ correlation for example, there is actually no (pseudo) nucleus following exact $Q(2^+_2)=-Q(2^+_1)$ relation in experiments or our TBRE, and yet we can take $\theta\in(-50^{\circ},-40^{\circ})$ as the sampling range to represent this correlation. One sees this sampling range indeed covers a symmetric area related to the $Q(2^+_2)=-Q(2^+_1)$ correlation in the [$Q(2^+_1)$, $Q(2^+_2)$] plane. With the $Q(2^+_2)/Q(2^+_1)$ statistic, the determination of symmetric sampling range for one specifically $Q$ correlation can be controversial or simply another representation of the $\theta$ parameterization. Therefore, all the statistic, analyses, and discussions in this work are based on the $\theta$ value. \begin{figure} \includegraphics[angle=0,width=0.45\textwidth]{q_sm.eps} \caption{(Color online) $\theta$ distributions from the experimental survey (Exp) \cite{allmond} and the shell-model TBRE. $\theta= \pm 45^{\circ}$ peaks are highlighted correspondingly to $Q(2^+_2)= \pm Q(2^+_1)$ correlations, respectively. Error bars correspond to statistic error.}\label{q_sm} \end{figure} In Fig. \ref{q_sm}, we present $\theta$ distributions of four pseudo nuclei in the shell-model TBRE compared with the experimental distribution from Ref. \cite{allmond}. The experimental $Q(2^+_1)=-Q(2^+_2)$ correlation is represented by the main peak around $\theta=-45^{\circ}$, which is also reproduced by the TBRE. Furthermore, several weak peaks around $\theta= 45^{\circ}$ are also observed in both experimental data and random-interaction systems, corresponding to the $Q(2^+_2)= Q(2^+_1)$ correlation. As proposed by Ref. \cite{allmond}, nuclear rotor models can give the $\theta=-45^{\circ}$ correlation, even although such correlation experimentally occurs in both rotational or non-rotational nuclei. Therefore, we will further examine whether $\theta=\pm45^{\circ}$ correlations is the symbol of the underlying rotational collectivity in TBRE. Firstly, we verify whether $\theta=\pm 45^{\circ}$ correlations accompany rotational spectra in the TBRE. Secondly, we search statistic signature of the random-interaction elements that provides the $\theta=\pm 45^{\circ}$ correlations, and trace such signature back to the microscopical Hamiltonian of nuclear rotor model, namely the Elliott SU(3) Hamiltonian. \begin{figure} \includegraphics[angle=0,width=0.45\textwidth]{r_sm.eps} \caption{(Color online) $R_{42}$ distributions around $\theta=\pm 45^{\circ}$ correlations (red circles and blue triangles, respectively) compared with those in the whole shell-model TBRE (black squares). Error bars correspond to statistic error.}\label{r_sm} \end{figure} Following previous random-interaction studies \cite{bijker-prl,bijker-prc,zhao-be2,bijker-mf}, potential rotational spectra with $\theta=\pm 45^{\circ}$ correlations can be characterized by the energy ratio $R_{42}=E_{4^+_1}/E_{2^+_1}\simeq10/3$, where $E_{2^+_1}$ and $E_{4^+_1}$ correspond to the excitation energy of yrast $2^+$ and $4^+$ states, respectively. Thus, we plot the $R_{42}$ distributions with $\theta\in (-50^{\circ},-40^{\circ})$ and $\theta\in (40^{\circ},50^{\circ})$, respectively, in Fig. \ref{r_sm}, and compare them with that in the whole TBRE. Except for $^{26}$S, $R$ distributions in both $\theta\in (-50^{\circ},-40^{\circ})$ and $\theta\in (40^{\circ},50^{\circ})$ regions are identical to those in the whole TBRE within statistic error. For $^{26}$S, the $R_{42}$ distribution has an observable enhancement at $R_{42}=1$ with $\theta\in (40^{\circ},50^{\circ})$. Namely, the $\theta=45^{\circ}$ correlation seems to partially originate from the seniority-like level scheme in $^{26}$S space. This observation explains why the $\theta=45^{\circ}$ peak for $^{26}$S is stronger shown in Fig. \ref{q_sm}, given the dominance of pairing-like behaviors in the TBRE \cite{johnson-prl,johnson-prc,rand-sen}. Nevertheless, there is no special favor on rotational spectra from $\theta=\pm 45^{\circ}$ correlations in the shell-model TBRE, consistently with the survey on the realistic nuclear system \cite{allmond}. \begin{table} \caption{$|\overline{V^J_{j_1j_2j_3j_4}}|$ values around $\theta \pm 45^{\circ}$ correlations and $|\langle j_1j_2|\hat{C}_{\rm SU(3)}|j_3j_4\rangle^J|$ elements [``SU(3)" column] in the $sd$ shell. The ``index" column presents the integer, $2j_1\times 10000+2j_2\times 1000+2j_3\times 100+2j_4\times 10+J$, to identify two-body interaction elements. All the data is organized in an increasing order of the index column.}\label{int-sd} \begin{tabular}{cccccccccccccccccccccc} \hline\hline \multirow{2}{*}{Order} & $~~~$ & \multirow{2}{*}{Index} & $~$ & \multicolumn{2}{c}{$|\overline{V^J_{j_1j_2j_3j_4}}|$} & $~$ & \multirow{2}{*}{SU(3)} \\ \cline{5-6} & & & & $\theta\in(-50^{\circ},-40^{\circ})$ & $\theta\in(40^{\circ},50^{\circ})$ & & \\ \hline 1 & & 11110 & & 0.031 & 0.028 & & 20.0 \\ 2 & & 11330 & & 0.009 & 0.006 & & 5.7 \\ 3 & & 11550 & & 0.017 & 0.006 & & 6.9 \\ 4 & & 13131 & & 0.047 & 0.007 & & 7.0 \\ 5 & & 13132 & & 0.022 & 0.053 & & 10.2 \\ 6 & & 13152 & & 0.007 & 0.028 & & 3.9 \\ 7 & & 13332 & & 0.035 & 0.025 & & 2.5 \\ 8 & & 13351 & & 0.006 & 0.007 & & 0.0 \\ 9 & & 13352 & & 0.002 & 0.043 & & 2.3 \\ 10 & & 13552 & & 0.006 & 0.024 & & 3.3 \\ 11 & & 15152 & & 0.045 & 0.087 & & 11.8 \\ 12 & & 15153 & & 0.095 & 0.172 & & 7.0 \\ 13 & & 15332 & & 0.024 & 0.005 & & 3.1 \\ 14 & & 15352 & & 0.016 & 0.014 & & 2.8 \\ 15 & & 15353 & & 0.004 & 0.060 & & 0.0 \\ 16 & & 15552 & & 0.064 & 0.002 & & 4.0 \\ 17 & & 33330 & & 0.118 & 0.039 & & 15.0 \\ 18 & & 33332 & & 0.136 & 0.107 & & 0.7 \\ 19 & & 33352 & & 0.025 & 0.021 & & 4.1 \\ 20 & & 33550 & & 0.014 & 0.014 & & 2.4 \\ 21 & & 33552 & & 0.016 & 0.018 & & 0.4 \\ 22 & & 35351 & & 0.018 & 0.076 & & 13.0 \\ 23 & & 35352 & & 0.015 & 0.104 & & 7.2 \\ 24 & & 35353 & & 0.180 & 0.009 & & 2.0 \\ 25 & & 35354 & & 0.028 & 0.167 & & 2.0 \\ 26 & & 35552 & & 0.015 & 0.028 & & 4.8 \\ 27 & & 35554 & & 0.046 & 0.021 & & 0.0 \\ 28 & & 55550 & & 0.151 & 0.086 & & 16.0 \\ 29 & & 55552 & & 0.046 & 0.069 & & 8.1 \\ 30 & & 55554 & & 0.160 & 0.037 & & 2.0 \\ \hline\hline \end{tabular} \end{table} \begin{table*} \caption{The same as Table \ref{int-sd} except for the $pf$ shell.}\label{int-pf} \begin{tabular}{cccccccccccccccccccccc} \hline\hline \multirow{2}{*}{Order} & $~$ & \multirow{2}{*}{Index} & $~$ & \multicolumn{2}{c}{$|\overline{V^J_{j_1j_2j_3j_4}}|$} & $~$ & \multirow{2}{*}{SU(3)} & $~~~~~~~~~$ & \multirow{2}{*}{Order} & $~$ & \multirow{2}{*}{Index} & $~$ & \multicolumn{2}{c}{$|\overline{V^J_{j_1j_2j_3j_4}}|$} & $~$ & \multirow{2}{*}{SU(3)} \\ \cline{5-6}\cline{14-15} & & & & $\theta\in(-50^{\circ},-40^{\circ})$ & $\theta\in(40^{\circ},50^{\circ})$ & & & & & & & & $\theta\in(-50^{\circ},-40^{\circ})$ & $\theta\in(40^{\circ},50^{\circ})$ & & \\ \hline 1 & & 11110 & & 0.030 & 0.011 & & 32.0 & & 48 & & 35352 & & 0.059 & 0.110 & & 18.9 \\ 2 & & 11330 & & 0.011 & 0.008 & & 12.2 & & 49 & & 35353 & & 0.044 & 0.030 & & 9.1 \\ 3 & & 11550 & & 0.010 & 0.006 & & 9.7 & & 50 & & 35354 & & 0.044 & 0.011 & & 12.3 \\ 4 & & 11770 & & 0.006 & 0.007 & & 0.0 & & 51 & & 35372 & & 0.002 & 0.001 & & 2.3 \\ 5 & & 13131 & & 0.018 & 0.033 & & 23.4 & & 52 & & 35373 & & 0.004 & 0.005 & & 3.8 \\ 6 & & 13132 & & 0.039 & 0.004 & & 27.9 & & 53 & & 35374 & & 0.006 & 0.007 & & 3.8 \\ 7 & & 13152 & & 0.000 & 0.000 & & 6.6 & & 54 & & 35552 & & 0.003 & 0.011 & & 3.7 \\ 8 & & 13332 & & 0.009 & 0.007 & & 3.2 & & 55 & & 35554 & & 0.010 & 0.000 & & 3.3 \\ 9 & & 13351 & & 0.000 & 0.000 & & 0.0 & & 56 & & 35571 & & 0.006 & 0.000 & & 0.0 \\ 10 & & 13352 & & 0.002 & 0.000 & & 3.5 & & 57 & & 35572 & & 0.002 & 0.002 & & 4.1 \\ 11 & & 13372 & & 0.007 & 0.004 & & 8.6 & & 58 & & 35573 & & 0.010 & 0.004 & & 6.2 \\ 12 & & 13552 & & 0.003 & 0.005 & & 4.1 & & 59 & & 35574 & & 0.005 & 0.002 & & 5.1 \\ 13 & & 13571 & & 0.008 & 0.000 & & 9.0 & & 60 & & 35772 & & 0.004 & 0.001 & & 3.3 \\ 14 & & 13572 & & 0.007 & 0.005 & & 5.4 & & 61 & & 35774 & & 0.004 & 0.005 & & 2.6 \\ 15 & & 13772 & & 0.004 & 0.002 & & 0.0 & & 62 & & 37372 & & 0.035 & 0.032 & & 26.8 \\ 16 & & 15152 & & 0.006 & 0.006 & & 20.1 & & 63 & & 37373 & & 0.033 & 0.041 & & 15.3 \\ 17 & & 15153 & & 0.077 & 0.079 & & 6.5 & & 64 & & 37374 & & 0.067 & 0.058 & & 12.9 \\ 18 & & 15173 & & 0.002 & 0.003 & & 1.0 & & 65 & & 37375 & & 0.060 & 0.042 & & 6.0 \\ 19 & & 15332 & & 0.001 & 0.000 & & 4.7 & & 66 & & 37552 & & 0.001 & 0.004 & & 0.9 \\ 20 & & 15352 & & 0.005 & 0.005 & & 7.9 & & 67 & & 37554 & & 0.000 & 0.008 & & 1.0 \\ 21 & & 15353 & & 0.001 & 0.009 & & 1.3 & & 68 & & 37572 & & 0.004 & 0.002 & & 2.4 \\ 22 & & 15372 & & 0.000 & 0.001 & & 2.8 & & 69 & & 37573 & & 0.001 & 0.002 & & 3.3 \\ 23 & & 15373 & & 0.000 & 0.009 & & 1.9 & & 70 & & 37574 & & 0.007 & 0.000 & & 1.5 \\ 24 & & 15552 & & 0.025 & 0.000 & & 6.1 & & 71 & & 37575 & & 0.005 & 0.004 & & 0.0 \\ 25 & & 15572 & & 0.003 & 0.000 & & 2.4 & & 72 & & 37772 & & 0.010 & 0.003 & & 3.4 \\ 26 & & 15573 & & 0.008 & 0.000 & & 2.2 & & 73 & & 37774 & & 0.026 & 0.000 & & 8.6 \\ 27 & & 15772 & & 0.001 & 0.000 & & 0.0 & & 74 & & 55550 & & 0.071 & 0.073 & & 26.4 \\ 28 & & 17173 & & 0.069 & 0.097 & & 18.9 & & 75 & & 55552 & & 0.041 & 0.040 & & 12.2 \\ 29 & & 17174 & & 0.045 & 0.074 & & 12.0 & & 76 & & 55554 & & 0.042 & 0.019 & & 7.1 \\ 30 & & 17353 & & 0.000 & 0.002 & & 1.1 & & 77 & & 55572 & & 0.007 & 0.004 & & 5.8 \\ 31 & & 17354 & & 0.011 & 0.005 & & 5.9 & & 78 & & 55574 & & 0.003 & 0.000 & & 3.6 \\ 32 & & 17373 & & 0.014 & 0.003 & & 8.9 & & 79 & & 55770 & & 0.016 & 0.013 & & 2.1 \\ 33 & & 17374 & & 0.015 & 0.002 & & 2.0 & & 80 & & 55772 & & 0.009 & 0.005 & & 1.2 \\ 34 & & 17554 & & 0.002 & 0.002 & & 3.4 & & 81 & & 55774 & & 0.002 & 0.010 & & 0.2 \\ 35 & & 17573 & & 0.010 & 0.002 & & 4.8 & & 82 & & 57571 & & 0.006 & 0.026 & & 24.6 \\ 36 & & 17574 & & 0.013 & 0.000 & & 6.3 & & 83 & & 57572 & & 0.067 & 0.080 & & 18.0 \\ 37 & & 17774 & & 0.000 & 0.002 & & 0.0 & & 84 & & 57573 & & 0.053 & 0.015 & & 7.2 \\ 38 & & 33330 & & 0.070 & 0.045 & & 40.6 & & 85 & & 57574 & & 0.028 & 0.004 & & 0.2 \\ 39 & & 33332 & & 0.101 & 0.126 & & 25.6 & & 86 & & 57575 & & 0.095 & 0.045 & & 9.0 \\ 40 & & 33352 & & 0.005 & 0.005 & & 2.5 & & 87 & & 57576 & & 0.020 & 0.036 & & 9.0 \\ 41 & & 33372 & & 0.007 & 0.001 & & 6.1 & & 88 & & 57772 & & 0.010 & 0.006 & & 5.4 \\ 42 & & 33550 & & 0.011 & 0.021 & & 2.0 & & 89 & & 57774 & & 0.010 & 0.000 & & 5.1 \\ 43 & & 33552 & & 0.010 & 0.010 & & 0.2 & & 90 & & 57776 & & 0.013 & 0.002 & & 0.0 \\ 44 & & 33572 & & 0.004 & 0.001 & & 4.4 & & 91 & & 77770 & & 0.082 & 0.071 & & 27.0 \\ 45 & & 33770 & & 0.009 & 0.019 & & 10.2 & & 92 & & 77772 & & 0.025 & 0.011 & & 18.6 \\ 46 & & 33772 & & 0.005 & 0.004 & & 6.7 & & 93 & & 77774 & & 0.092 & 0.080 & & 3.1 \\ 47 & & 35351 & & 0.004 & 0.027 & & 27.0 & & 94 & & 77776 & & 0.123 & 0.118 & & 9.0 \\ \hline\hline \end{tabular} \end{table*} In Ref. \cite{horoi-a}, the interaction signature of prolate and oblate shapes is represented by the average values of interaction elements (denoted by $\overline{V^J_{j_1j_2j_3j_4}}$). In this work, we also adopt $\overline{V^J_{j_1j_2j_3j_4}}$ to probe the the interaction signature of $\theta=\pm 45^{\circ}$ correlations. In detail, we collect all the interaction elements within $\theta\in(-50^{\circ},-40^{\circ})$ and $\theta\in(40^{\circ},50^{\circ})$, normalize them by the factor of $\sum\limits_{Jj_1j_2j_3j_4} V^J_{j_1j_2j_3j_4}$, and then calculate all the $\overline{V^J_{j_1j_2j_3j_4}}$ values for both $\theta\in(-50^{\circ},-40^{\circ})$ and $\theta\in(40^{\circ},50^{\circ})$ regions, respectively. Because signs of interaction elements can be changed by different phase conventions, we only discuss magnitudes of $\overline{V^J_{j_1j_2j_3j_4}}$ (denoted by $|\overline{V^J_{j_1j_2j_3j_4}}|$) to avoid the potential ambiguity from phase conventions. To simplify the following discussion, each $|\overline{V^J_{j_1j_2j_3j_4}}|$ is labeled by the index, $2j_1\times 10000+2j_2\times 1000+2j_3\times 100+2j_4\times 10+J$. For example, the pairing force between $s_{1/2}$ or $p_{1/2}$ nucleons, $V^0_{\frac{1}{2}\frac{1}{2}\frac{1}{2}\frac{1}{2}}$, corresponds to index ``11110". We list $|\overline{V^J_{j_1j_2j_3j_4}}|$ values of both $\theta\in(-50^{\circ},-40^{\circ})$ and $\theta\in(40^{\circ},50^{\circ})$ region in an increasing order of their indices in Tables \ref{int-sd} and \ref{int-pf}. \begin{figure*} \includegraphics[angle=0,width=0.8\textwidth]{int_mean.eps} \caption{(Color online) $|\overline{V^J_{j_1j_2j_3j_4}}|$ and $|\langle (j_1j_2)^J|\hat{C}_{\rm SU(3)}|(j_3j_4)^J\rangle|$ values (see text for definitions) against order numbers from Tables \ref{int-sd} and \ref{int-pf}. indices are highlighted for obvious peaks for $|\langle (j_1j_2)^J|\hat{C}_{\rm SU(3)}|(j_3j_4)^J\rangle|$ values.}\label{int_mean} \end{figure*} To comprehensively compare $|\overline{V^J_{j_1j_2j_3j_4}}|$ values between $\theta\in(-50^{\circ},-40^{\circ})$ and $\theta\in(40^{\circ},50^{\circ})$ regions, we plot them against their order numbers (see Table \ref{int-sd} and \ref{int-pf}) in Fig. \ref{int_mean}. Most of $\overline{V^J_{j_1j_2j_3j_4}}$ are close to zero following the ensemble distribution. However, there are several relatively large $|\overline{V^J_{j_1j_2j_3j_4}}|$ values, which presents obvious peaks in Fig. \ref{int_mean}. Peak positions for $\theta\in(-50^{\circ},-40^{\circ})$ are roughly consistent with those for $\theta\in(40^{\circ},50^{\circ})$, which hints that $\theta=\pm 45^{\circ}$ correlations may share the same interaction signature. The interaction signature of $\theta=\pm 45^{\circ}$ correlations can be related to the Elliott Hamiltonian. Such Hamiltonian is dominated by the SU(3) Casimir operator as defined by \begin{equation}\label{h-su3} \hat{C}_{\rm SU(3)}=\frac{1}{4}\hat{Q}\cdot \hat{Q}+\frac{3}{4}\hat{L}\cdot \hat{L}, \end{equation} where $\hat{Q}$ and $\hat{L}$ are quadrupole-moment and orbital-angular-momentum operators. We calculate matrix elements of $\langle (j_1j_2)^J|\hat{C}_{\rm SU(3)}|(j_3j_4)^J\rangle$, and still focus on their magnitudes (denoted by $|\langle j_1j_2|\hat{C}_{\rm SU(3)}|j_3j_4\rangle^J|$), similarly to the treatment for $\overline{V^J_{j_1j_2j_3j_4}}$. $|\langle j_1j_2|\hat{C}_{\rm SU(3)}|j_3j_4\rangle^J|$ is also labeled by index, $2j_1\times 10000+2j_2\times 1000+2j_3\times 100+2j_4\times 10+J$, and thus comparable with $|\overline{V^J_{j_1j_2j_3j_4}}|$ as shown in Table \ref{int-sd}, \ref{int-pf} and Fig. \ref{int_mean}. In Fig. \ref{int_mean}, relatively large $|\langle j_1j_2|\hat{C}_{\rm SU(3)}|j_3j_4\rangle^J|$ also presents several obvious peaks, which have similar pattern to $|\overline{V^J_{j_1j_2j_3j_4}}|$ peaks for both $\theta\in(-50^{\circ},-40^{\circ})$ and $\theta\in(40^{\circ},50^{\circ})$ regions. This observation implies the relation between the SU(3) symmetry and $\theta=\pm 45^{\circ}$ correlations. We also highlight indices for $|\langle j_1j_2|\hat{C}_{\rm SU(3)}|j_3j_4\rangle^J|$ peaks in Fig. \ref{int_mean}, according to which, the SU(3) Casimir operator always has large magnitudes for diagonal matrix elements with $j_1j_2=j_3j_4$. On the other hand, large $|\overline{V^J_{j_1j_2j_3j_4}}|$ for $\theta=\pm 45^{\circ}$ correlations also occurs for diagonal $j_1j_2=j_3j_4$ in Table \ref{int-sd} and \ref{int-pf}. As described in Sec. \ref{cal}, larger magnitudes of diagonal elements is required by the invariance of TBRE under orthogonal transformation of two-body configuration. Therefore, the shell-model TBRE intrinsically maintains part of the SU(3) properties to restore the $\theta=\pm 45^{\circ}$ correlations, even though it spectrally presents no trace of the SU(3) symmetry as illustrated in Fig. \ref{r_sm}. After clarifying the relation between $\theta=\pm 45^{\circ}$ correlations and the SU(3) symmetry, we microscopically describe how these two $Q$ correlations emerge in a major shell, i.e. $sd$ or $pf$ shell here. In the Elliott model, any $2^+$ state within a major shell is labeled by the SU(3) representation $(\lambda,~\mu)$, the quantum number of the intrinsic state ($K$), and orbital angular momentum $L=2$ \cite{elliott-su3}. The $2^+$ state is normally near the bottom of a $K$ band, and thus its $Q$ value can be approximately given by \cite{elliott-q} \begin{equation} Q(2^+)=\frac{2\lambda}{7}(K^2-2). \end{equation} The $K$ number is limited to 0, 1 and 2. Thus, the $Q(2^+_1)=-Q(2^+_2)$, i.e. $\theta=-45^{\circ}$, correlation is produced by two $2^+$ states with the same $\lambda$ number and $K=0,~2$ respectively, which agrees with the rotor-model conjecture \cite{allmond}. On the other hand, the $Q(2^+_1)=Q(2^+_2)$, i.e. $\theta=45^{\circ}$, correlation is from two $2^+$ states with the same $\lambda$ and $K$ values. According to above SU(3) description, one can expect two $2^+$ states with the $\theta=-45^{\circ}$ correlation from the same $(\lambda,\mu)$ representation. On the contrary, a single $(\lambda,\mu)$ representation can not produced two $2^+$ states with the same $K$ number, so that the $\theta=45^{\circ}$ correlation always requires the cooperation of two different $(\lambda,\mu)$ representations. Empirically, the former case has a relatively larger probability to emerge in the low-lying region, which explains why the $\theta=-45^{\circ}$ peak intensity is always larger than the $\theta=45^{\circ}$ one in Fig. \ref{q_sm}. Independently of the rotor interpretation, the anharmonic vibration (AHV) with quadrupole degrees of freedom \cite{ahv} can also provide the $\theta=-45^{\circ}$ correlation. In the AHV interpretation, the first two $2^+$ states are constructed with a significant mixing of one- and two-phonon configurations as \begin{equation}\label{2+} \begin{aligned} |2^+_1\rangle&=a_1 |b^{\dagger}\rangle+a_2| (b^{\dagger})^2 \rangle,\\ |2^+_2\rangle&=-a_2 |b^{\dagger}\rangle+a_1 |(b^{\dagger})^2 \rangle, \end{aligned} \end{equation} where $b^{\dagger}$ is the creation operator of a phonon; $a_1$ and $a_2$ are amplitudes of phonon configurations. In this phonon space, the quadrupole operator $\hat{Q}$ is a polynomial of operator $b^{\dagger}+\tilde{b}$ \cite{ahv-q}, where $\tilde{b}$ is the phonon time-reversal operator. The first order of such polynomial dominates the $Q$ matrix element. However, it also vanishes with respect to configuration with definite numbers of phonons. In particular, \begin{equation} \begin{aligned} &\langle \tilde{b}||\hat{Q}||b^{\dagger}\rangle \propto \langle \tilde{b}||b^{\dagger}+\tilde{b}||b^{\dagger} \rangle=0,\\ &\langle (\tilde{b})^2||\hat{Q}||(b^{\dagger})^2\rangle\propto \langle (\tilde{b})^2||b^{\dagger}+\tilde{b}||(b^{\dagger})^2 \rangle=0, \end{aligned} \end{equation} Thus, \begin{equation} \begin{aligned} &\langle2^+_1||\hat{Q}||2^+_1\rangle=2a_1a_2\langle \tilde{b}||\hat{Q}||(b^{\dagger})^2\rangle,\\ &\langle2^+_2||\hat{Q}||2^+_2\rangle=-2a_1a_2\langle \tilde{b}||\hat{Q}||(b^{\dagger})^2\rangle, \end{aligned} \end{equation} and the $\theta=-45^{\circ}$ relation is obtained. \begin{figure} \includegraphics[angle=0,width=0.48\textwidth]{r22_sm.eps} \caption{(Color online) $R_{22}$ distributions around $\theta=- 45^{\circ}$ (red circles) compared with those in the whole shell-model TBRE (black squares). Error bars correspond to statistic error.}\label{r22_sm} \end{figure} We can spectrally examine this AHV interpretation for the $\theta=-45^{\circ}$ correlation in the shell-model TBRE. Because AHV $2^+$ states correspond to the mixing of one- and two-phonon configurations as defined in Eq. (\ref{2+}), the excitation energy of the first $2^+$state, $E(2^+_1)$, is smaller than the one-phonon excitation energy, $\hbar\omega$; while $E(2^+_2)$ is larger than $2\hbar\omega$, according to the perturbation theory. Thus, the energy ratio of $R_{22}=E(2^+_2)/E(2^+_1)$ of the AHV is always larger than 2. In other words, if the AHV contributes to the $\theta=-45^{\circ}$ correlation in the TBRE, the distribution of $R_{22}$ with $\theta\in(-50^{\circ},-40^{\circ})$ should have an obvious enhancement for $R_{22}>2$. In Fig. \ref{r22_sm}, we compare $R_{22}$ distributions in the $\theta\in(-50^{\circ},-40^{\circ})$ range and those in the whole shell-model TBRE. There is no obvious difference between these $R_{22}$ distributions. Thus, we don't see the spectral sign of the AHV contribution to the $\theta=-45^{\circ}$ correlation. \section{$Q$ correlations in IBM1} In IBM1, the $Q$ operator is a linear combination of two independent rank-two operators as: \begin{equation}\label{q1q2_ibm} Q=Q^1+\chi Q^2, \end{equation} where $Q^1=d^{\dagger}s+s\tilde{d}$, $Q^2=[d^{\dagger}\tilde{d}]^2$, and $\chi$ is a free parameter. Correspondingly, we need to define two independent $\theta$ coordinates as \begin{equation}\label{theta_q1q2} \begin{aligned} \theta^1&= \arctan\left\{\frac{\langle 2^+_2||Q^1||2^+_2\rangle}{\langle 2^+_1||Q^1||2^+_1\rangle}\right\},\\ \theta^2&= \arctan\left\{\frac{\langle 2^+_2||Q^2||2^+_2\rangle}{\langle 2^+_1||Q^2||2^+_1\rangle}\right\}. \end{aligned} \end{equation} A robust correlation with the polar angle $\theta$ should be insensitive to the $\chi$ value, which requires $\theta_1=\theta_2=\theta$. Obviously, such correlation corresponds to a peak at ($\theta$, $\theta$) point in the two-dimensional ($\theta^1$, $\theta^2$) distribution of the IBM1 TBRE. \begin{figure*} \includegraphics[angle=0,width=0.8\textwidth]{theta_ibm.eps} \caption{(Color online) Two-dimensional ($\theta^1$, $\theta^2$) distributions of the IBM1 TBRE. Three sharp peaks are characterized with ``$Q(2^+_2)=\pm Q(2^+_1)$" and ``U(5)" corrlations.}\label{theta_ibm} \end{figure*} Fig. \ref{theta_ibm} represents ($\theta^1$, $\theta^2$) distributions of the IBM1 TBRE with $N_b=12$, 13, 14 and 15. These distributions follow similar pattern with three sharp peaks along the $\theta^1=\theta^2$ diagonal line, corresponding to three proportional $Q$ correlations. We fit ($\theta^1$, $\theta^2$) distributions to a two-dimensional function, $f(\theta^1,\theta^2)$, with three Gaussian peaks as \begin{widetext} \begin{equation}\label{gau} f(\theta^1,\theta^2)=f_0+\sum\limits_{i=1}^3A_i \exp\left\{{-\frac{[(\theta^1-\theta^1_{c,i})\cos\omega_i+(\theta^2-\theta^2_{c,i})\sin\omega_i]^2}{2w^2_{\parallel , i}}} {-\frac{[-(\theta^1-\theta^1_{c,i})\sin\omega_i+(\theta^2-\theta^2_{c,i})\cos\omega_i]^2}{2w^2_{\perp , i}}}\right\}, \end{equation} \end{widetext} where $f_0$ is the background; all the other fitting variables are parameters of Gaussian peaks. These three Gaussian peaks are labeled by indices $i=1$, 2 and 3. For the $i$th peak, $\omega_i$ defines its orientation in the $(\theta^1,\theta^2)$ plane, $(\theta^1_{c,i},\theta^2_{c,i})$ is the peak position, $A_i$ is the amplitude, and $(w_{\parallel , i}, w_{\perp , i})$ are widths along and perpendicularly to $\omega_i$ direction. Thus, the best-fit intensity of the $i$th peak can be calculated as $2\pi A_i w_{\parallel , i}w_{\perp , i}$. \begin{table*} \caption{Best-fit peak positions $(\theta^1_{c,i},\theta^2_{c,i})$ and intensities of three sharp peaks in Fig. \ref{theta_ibm} with the two-dimensional three-peak Gaussian function defined in Eq. (\ref{gau}).}\label{peak-fit} \begin{tabular}{cccccccccccccccccccccc} \hline\hline \multirow{3}{*}{$N_b$} & $~~~$ & \multicolumn{3}{c}{$Q(2^+_2)=-Q(2^+_1)$} & $~~~$ & \multicolumn{3}{c}{$Q(2^+_2)=Q(2^+_1)$} & $~~~$ & \multicolumn{3}{c}{U(5)} \\ \cline{3-5}\cline{7-9}\cline{11-13} & & $\theta^1_{c,1}$ & $\theta^2_{c,1}$ & Intensity & & $\theta^1_{c,2}$ & $\theta^2_{c,2}$ & Intensity & & $\theta^1_{c,3}$ & $\theta^2_{c,3}$ & Intensity \\ & & (deg) & (deg) & ($\times 10^2$ counts) & & (deg) & (deg) & ($\times 10^2$ counts) & & (deg) & (deg) & ($\times 10^2$ counts) \\ \hline 12 & & -42.05(1) & -36.27(2) & 361(6) & & 40.19(2) & 43.44(1) & 272(5) & & -21.27(4) & -22.17(1) & 565(9) \\ 13 & & -42.29(1) & -36.92(2) & 476(6) & & 40.60(1) & 43.62(1) & 347(5) & & -21.12(3) & -22.21(1) & 742(9) \\ 14 & & -42.52(1) & -37.45(1) & 468(6) & & 40.91(1) & 43.73(1) & 347(5) & & -21.20(2) & -22.23(1) & 733(8) \\ 15 & & -42.66(1) & -37.94(1) & 393(7) & & 41.20(1) & 43.83(1) & 334(5) & & -21.32(2) & -22.26(1) & 683(8) \\ \hline\hline \end{tabular} \end{table*} In Table \ref{peak-fit}, we list the best-fit peak positions and intensities for the three sharp peaks in Fig. \ref{theta_ibm}. The $i=1$ and $i=2$ peaks are very close to $(\pm 45^{\circ},\pm 45^{\circ})$, i.e. ``$Q(2^+_2)= \pm Q(2^+_1)$" correlations, as labeled in Fig. \ref{theta_ibm} and Table \ref{peak-fit}. The $i=3$ peak is located around $(-21^{\circ},-22^{\circ})$, and thus gives $Q(2^+_2)/Q(2^+_1)\simeq -3/7$, the typical IBM1 $Q$ ratio at the U(5) limit regardless of the boson number. Therefore, we believe the $i=3$ peak may correspond to the vibrational U(5) collectivity, and denote it as ``U(5)" in following analysis. To identify or confirm the collective patterns corresponding to the three sharp peaks in Fig. \ref{theta_ibm}, we firstly investigate their $R_{42}$ distribution, i.e. the predominance of low-lying collective excitations, similarly to our $R_{42}$ analysis for the shell-model TBRE with Fig. \ref{r_sm}; secondly, we adopt the $sd$-boson mean-field theory to observe dominant nuclear sharps of these peaks. \begin{figure} \includegraphics[angle=0,width=0.48\textwidth]{r_ibm.eps} \caption{(Color online) $R_{42}$ distributions around three peaks in Fig. \ref{theta_ibm}.}\label{r_ibm} \end{figure} For the analysis of $R_{42}$ distributions, we firstly collect all the random interactions, which produce ($\theta^1$, $\theta^2$) points within $3^{\circ}$ from peaks in Fig. \ref{theta_ibm}. Secondly, all $R_{42}$ values from these interactions are calculated. Thirdly, $R_{42}$ distributions of these peaks are calculated and presented in Fig. \ref{r_ibm}, respectively. $Q(2^+_2)= \pm Q(2^+_1)$ peaks always have large probabilities at rotational limit $R_{42}=3.3$, which agrees with the rotor-model description. On the other hand, $R_{42}$ distributions of U(5) peaks are dominated by $R_{42}=2$, corresponding to a typical U(5) vibrational spectrum, which supports our U(5) assignment for this peak. Our analysis with the $sd$-boson mean-field theory starts with the $sd$-boson coherent state for the ground band as \begin{equation}\label{coh} |g\rangle=(s^{\dagger}+\tan \alpha_0 d^{\dagger}_0)^{N_b}|0\rangle. \end{equation} Similarly to Ref. \cite{bijker-mf}, the nuclear shape, i.e. the optimized $\alpha_0$ value, is determined by minimizing the Hamiltonian expectation value of this coherent state as \begin{equation}\label{eg} \begin{aligned} E_g(\alpha)=&a_1\sin^4\alpha+a_2\sin^3\alpha\cos\alpha\\ &+a_3\sin^2\alpha\cos^2\alpha+a_0\cos^4\alpha,\\ \end{aligned} \end{equation} where $E_g(\alpha_0)$ reaches the minimum of this equation; $a_0$, $a_1$, $a_2$ and $a_3$ are linear combination of $sd$-boson two-body interaction matrix elements as formulated in Ref. \cite{chen-mf}. We calculate $\alpha_0$ values for all the interactions with spin-0$\hbar$ ground states in the TBRE, and perform frequency counting for calculated $\alpha_0$ values. Thus, the ensemble-normalized $\alpha$ distribution for the $i$th peaks is given by \begin{equation}\label{palpha} P(\alpha)=N(\alpha,\theta^1_{c,i},\theta^2_{c,i})/\mathcal{N}(\alpha), \end{equation} where $N(\alpha,\theta^1_{c,i},\theta^2_{c,i})$ is the counting number with $\alpha_0\in (\alpha-2.5^{\circ},\alpha+2.5^{\circ})$ and $\sqrt{(\theta^1-\theta^1_{c,i})^2+(\theta^2-\theta^2_{c,i})^2}<3^{\circ}$, and $\mathcal{N}(\alpha)$ is that with $\alpha_0\in (\alpha-2.5^{\circ},\alpha+2.5^{\circ})$ in the whole IBM1 TBRE. \begin{figure} \includegraphics[angle=0,width=0.48\textwidth]{alpha.eps} \caption{(Color online) $P(\alpha)$ around three peaks in Fig. \ref{theta_ibm}, as defined in Eq. (\ref{palpha}).}\label{alpha} \end{figure} Fig. \ref{alpha} presents calculated $P(\alpha)$s. The U(5) peak only has a significant probability at $\alpha=0$, corresponding to the $s$-boson condensation. Thus, $2^+$ states for the U(5) peak are constructed by replacing $s$ bosons with $d$ bosons in the $s$-boson condensation, which agrees with the quadrupole vibration described by the U(5) limit. This further confirms our U(5) characterization of this peak. On the other hand, the $Q(2^+_2)= \pm Q(2^+_1)$ peaks both have large probabilities for $0<|\alpha|<90^{\circ}$, corresponding to the axially symmetric rotor at the SU(3) limit. Considering that the $Q(2^+_2)= \pm Q(2^+_1)$ peaks also favor SU(3) rotational spectra with $R=3.3$ in Fig. \ref{r_ibm}, we conclude that both $Q(2^+_2)= \pm Q(2^+_1)$ correlations in IBM1 are strongly related to the SU(3) limit. Conversely, we also derive $Q(2^+_2)= \pm Q(2^+_1)$ correlations from the SU(3) limit of the IBM1. At the SU(3) limit, the $2^+_1$ state is from the ground band with ($\lambda=2N_b$, $\mu=0$) and $K=0$; yet, the $2^+_2$ state belongs to the ($\lambda=2N_b-4$, $\mu=2$) representation, which generates $\beta$ and $\gamma$ bands with $K=0$ and 2, respectively \cite{ibm}. Thus, the $2^+_2$ is from either $\beta$ or $\gamma$ band, which leads to two phase-different $Q$ correlations \begin{equation}\label{q_nb} \frac{Q^{\beta}(2^+)}{Q^{g}(2^+)}= \frac{4N_b-3}{4N_b+3};~\frac{Q^{\gamma}(2^+)}{Q^{g}(2^+)}= -\frac{4N_b-3}{4N_b+3}. \end{equation} For $N_b\rightarrow \infty$, $Q^{\beta}(2^+)= Q^{\rm g}(2^+)$ and $Q^{\gamma}(2^+)= -Q^{\rm g}(2^+)$ are achieved, corresponding to $Q(2^+_2)= Q(2^+_1)$ and $Q(2^+_2)= -Q(2^+_1)$ correlations, respectively. In the shell-model TBRE, the $Q(2^+_2)= - Q(2^+_1)$ correlation has a larger probability than $Q(2^+_2)= Q(2^+_1)$ one (see Fig. \ref{q_sm}). Yet, in the IBM1 TBRE, these two correlations have roughly equal peak intensities, i.e. probabilities, as shown in Fig. \ref{theta_ibm} and Table \ref{peak-fit}. This is a major difference between behaviors of the $Q$ correlations in shell-model and IBM1 TBREs. This difference can be explained according to the $(\lambda, \mu)$ assignment of the SU(3) scheme. In the Shell Model, i.e. the Elliott model, the $Q(2^+_2)= - Q(2^+_1)$ correlation normally emerges with two $2^+$ states from a single $(\lambda, \mu)$ representation, which empirically provides a larger probability. However, in the $sd$-boson space, both $Q(2^+_2)= \pm Q(2^+_1)$ correlations require $2^+_1$ and $2^+_2$ states from two different $(\lambda, \mu)$ representations, and thus have similar probabilities. As shown in Table \ref{peak-fit}, $|\theta^1_{c,i}|$ and $|\theta^2_{c,i}|$ of $Q(2^+_2)= \pm Q(2^+_1)$ correlations are systematically smaller than $45^{\circ}$. This observation can be explained with Eq. (\ref{q_nb}). For large but finite $N_b$, the magnitude of $Q(2^+_2)$ is always smaller that that of $Q(2^+_1)$, which drives the $|\theta^1_{c,i}|$ and $|\theta^2_{c,i}|$ value smaller than $45^{\circ}$. Therefore, we attribute the systematical derivation of $Q(2^+_2)\simeq \pm Q(2^+_1)$ peak positions from the exact SU(3) prediction to the finite-boson-number effect, as proposed by Ref. \cite{allmond} with consistent-$Q$ calculations. \section{summary} To summarize, we observe three proportional correlations between $Q$ values of the first two $I^{\pi}=2^+$ states in the TBRE. $Q(2^+_1)=\pm Q(2^+_2)$ correlations robustly and universally exists in both shell-model and $sd$ spaces, consistently with experiments. In the IBM1 TBRE, the $Q(2^+_2)=-\frac{3}{7}Q(2^+_1)$ correlation is also reported. By using the Elliot model and the $sd$-boson mean-field theory, we can microscopically assign $Q(2^+_1)=\pm Q(2^+_2)$ correlations to the rotational SU(3) symmetry, and the $Q(2^+_2)=-\frac{3}{7}Q(2^+_1)$ correlation to the quadrupole vibrational U(5) limit. Phenomenologically, the anharmonic vibration may also provide the $Q(2^+_1)=- Q(2^+_2)$ correlation, although its spectral behavior is not observed in the shell-model TBRE. In particular, the invariance of under orthogonal transformation intrinsically provides the shell-model TBRE more opportunity to restore part of SU(3) properties, i.e. $Q(2^+_1)=\pm Q(2^+_2)$ correlations, even though these $Q$ correlations are insensitive to the SU(3) rotational spectrum as expected based on the experimental survey \cite{allmond}. On the other hand, IBM1 $Q(2^+_1)=\pm Q(2^+_2)$ correlations always favor low-lying rotational spectra, which indicates that the IBM is more strongly governed by the dynamic symmetry. The SU(3) group reduction rule also qualitatively explains why the Shell Model more obviously favors the $Q(2^+_1)=- Q(2^+_2)$ correlation compared with the IBM1. Low-lying $Q$ correlations represent intrinsic nuclear collectivity, and thus are more sensitive to the wave-function detail than the spectrum. Therefore, the nuclear quadrupole collectivity may maintain in a far more deep level than the common realization based on the orderly spectral pattern. \acknowledgements The discussion with Prof. Y. M. Zhao and Prof. N. Yoshida is greatly appreciated. We also thank Dr. Z. Y. Xu for his careful proof reading. This work was supported by the National Natural Science Foundation of China under Grant No. 11305151.
1,116,691,501,377
arxiv
\section{Introduction} \subsection*{Background and aim} In the representation theory of loop groups, one often encounters with situations where $\mathbb{G}_m$-central extensions of a loop group are concerned. There is a canonical one among such, the \textit{Tate central extension}, which appears as a pullback of a more general construction in the infinite-dimensional linear algebra of Tate vector spaces. A topological vector space over a discrete field $k$ is called a \textit{Tate vector space} if it is isomorphic to the direct sum of a discrete space and the dual of a discrete space. A typical example of a Tate vector space is the space $k((t))$ of formal Laurent series with the $t$-adic topology. If $G$ is a reductive algebraic group and $V$ a finite dimensional representation then there is an induced natural representation of the corresponding loop group $G((t))$ on a Tate vector space $V((t))$. The group of automorphisms of a Tate vector space is known to have a canonical $\mathbb{G}_m$-central extension, called the \textit{Tate central extension}, for whose construction we refer the reader to, for example, \cite{kapranov}. The Tate central extension is classified by a $\mathbb{G}_m$-gerbe equipped with an action by the automorphism group, but the assignment of this gerbe to each Tate vector space is not canonically compatible with direct sums. This led Beilinson et al. \cite{bbe} and Drinfeld \cite{drinfeld} to introduce the notion of a \textit{torsor over a sheaf of Picard groupoids}, enriching the $\mathbb{G}_m$-gerbe classifying the Tate central extension to a $\operatorname{Pic}^{\mathbb{Z}}$-torsor classifying an object that should be called the \textit{categorical Tate central extension} of the automorphism group of a Tate vector bundle by the stack $\operatorname{Pic}^{\mathbb{Z}}$ of $\mathbb{Z}$-graded line bundles. See \cite{bbe}, section 2, and \cite{drinfeld}, secion 5, for details. They gave the construction of the $\operatorname{Pic}^{\mathbb{Z}}$-torsor by a direct analogy of the classical construction of the plain $\mathbb{G}_m$-gerbe as in \cite{kapranov}, but Drinfeld proposes in section 5.5 of \cite{drinfeld} an interesting idea, which he attributes to Beilinson. Their idea, posed as a "somewhat vague picture," roughly says that there should be a more homotopical interpretation of the $\operatorname{Pic}^{\mathbb{Z}}$-torsor classifying the categorical Tate central extension in terms of algebraic $K$-theory. Drinfeld's description of their idea remains in a sketchy state (which is why it is called a "vague picture"), and he leaves it as a problem to make it precise. The aim of this article is to propose and prove a more precisely and more comprehensively formulated version of Beilinson-Drinfeld's picture, presenting a very natural and simple approach to the Tate central extension via a classification theorem of objects called \textit{torsors over the sheaf of $K$-theory spaces}. The theory of $\infty$-topoi, recently developed by Lurie \cite{htt} et al., makes it possible to regard the whole sheaf of $K$-theory spaces (note that the stack of graded line bundles $\operatorname{Pic}^{\mathbb{Z}}$ can be interpreted as a truncation of the $K$-theory sheaf) as a group object, allowing us to meaningfully speak of torsors over it. We show that the corresponding classifying space is equivalent to the $K$-theory sheaf of Tate vector bundles, as a geometric consequence of a delooping theorem obtained by the author in \cite{saito} and Drinfeld's theorem that the first negative $K$-group vanishes Nisnevich locally (\cite{drinfeld}, Theorem 3.4). This directly leads to a canonical construction of a torsor over the sheaf of $K$-theory spaces to each Tate vector bundle. The torsor thus obtained admits a canonical action by the sheaf of automorphisms of the Tate vector bundle, thereby resulting an object that should be called the \textit{$\infty$-categorical Tate central extension} of the automorphism group of the Tate vector bundle by the sheaf of $K$-theory spaces. We believe that our approach via a delooping theorem of $K$-theory, or its geometric consequence in an $\infty$-topos where the $K$-theory satisfies descent and the delooped $K$-theory satisfies local connectedness, is the most comprehensive and conceptually appropriate way of treating the Tate central extension. We will discuss a possible generalization of the results presented here to more higher dimensional contexts in future work. \subsection*{Summary of the results} Let us give here a more detailed and precise summary of our results. Write $\Pi$ for the filtered category of pairs $(i,j)$ of integers with $i\leq j$, where there is a unique morphism $(i,j)\to(i^{\prime},j^{\prime})$ if $i\leq i^{\prime}$ and $j\leq j^{\prime}$. For an exact category $\mathcal{A}$, let $\displaystyle\lim_{\longleftrightarrow}\mathcal{A}$ be the full subcategory of $\operatorname{Ind}\operatorname{Pro}\mathcal{A}$ consisting of ind-pro-objects $X=(X_{i,j})_{(i,j)\in\Pi}$, indexed by $\Pi$, satisfying that for every $i\leq j\leq k$ the sequence $$0\to X_{i,j}\to X_{i,k}\to X_{j,k}\to0$$ is a short exact sequence in $\mathcal{A}$. If the exact category $\mathcal{A}$ is an extension-closed, full additive subcategory of an abelian category $\mathcal{F}$, then $\displaystyle\lim_{\longleftrightarrow}\mathcal{A}$ is an extension-closed, full additive subcategory of the abelian category $\operatorname{Ind}\operatorname{Pro}\mathcal{F}$, so that $\displaystyle\lim_{\longleftrightarrow}\mathcal{A}$ is endowed with a structure of an exact category. See \cite{beilinson}, A.3, and \cite{lcoec}, for details on the exact category $\displaystyle\lim_{\longleftrightarrow}\mathcal{A}$. We write $\mathbb{K}$ for Schlichting's non-connective $K$-theory spectrum of an exact category, introduced in \cite{schlichting2}, whose positive homotopy groups are the positive $K$-groups of the exact category, and whose $0$-th homotopy group is the $0$-th $K$-group of the idempotent completion of the exact category, and whose negative homotopy groups recover the classical negative $K$-groups when the exact category is the category of finitely generated projective modules over a ring or the category of vector bundles on a quasi-compact, quasi-separated scheme with an ample family of line bundles. See \cite{schlichting2} for details. In a recent paper \cite{saito} the author proved the following theorem. (Note that idempotent completion causes no change on non-connective $K$-theory.) \begin{thm}[\cite{saito}, 1.2] \label{abstractdelooping} There is a natural equivalence of sectra between $\mathbb{K}(\mathcal{A})$ and $\Omega\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{A})^{\natural})$, where $(-)^{\natural}$ denotes the idempotent completion. \end{thm} \begin{remark} \begin{enumerate} \item This was the concluding conjecture of L. Previdi's thesis (\cite{previdi}, Conjecture 5.1.7). \item In the case where $\mathcal{A}$ is the category of finitely generated projective $R$-modules, Drinfeld \cite{drinfeld} observes a fact which is essentially the $\pi_{-1}$-part of this equivalence: That is, he observes an isomorphism between the first negative $K$-group of $R$ and the $0$-th $K$-group of his category of Tate $R$-modules (\cite{drinfeld}, Theorem 3.6-(iii)). \item Recent work of Br\"{a}unling, Grochenig and Wolfson \cite{bgw2} provides an interpretation of this theorem as an algebraic analogue of the Atiyah-Janich theorem in topological $K$-theory. \end{enumerate} \end{remark} Let $R$ be a commutative ring, which we assume in the sequel to be noetherian and of finite Krull dimension, and denote by $\mathcal{P}(R)$ the exact category of finitely generated projective $R$-modules. Then the idempotent-completed exact category $(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}$ is very close to Drinfeld's category $\operatorname{Tate}_R^{\operatorname{Dr}}$ of Tate $R$-modules (which is denoted by $\mathcal{T}_R$ in \cite{drinfeld}, 3.3.2). Indeed, if $(M_{i,j})_{i\leq j}$ is an object of $\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R)$, the $R$-module $\varinjlim_j\varprojlim_iM_{i,j}$ endowed with the topology induced from the discrete ones on $M_{i,j}$ is an elementary Tate $R$-module in Drinfeld's sense (\cite{drinfeld}, 3.2.1). Recent work by Br\"{a}unling, Grochenig, and Wolfson \cite{bgw} shows this induces a fully faithful functor $(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}\hookrightarrow\operatorname{Tate}_R^{\operatorname{Dr}}$, which is an equivalence onto the full subcategory of Tate $R$-modules of countable type (that is, direct summands of elementary Tate $R$-modules $P\oplus Q^{\ast}$ where $P$ and $Q$ are countably generated discrete, projective $R$-modules). See \cite{bgw}, Theorem 5.22. \begin{df} We call $(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}$ the category of {\rm Tate vector bundles} over the affine scheme $\operatorname{Spec}R$. \end{df} We write $\operatorname{Spec}R_{\operatorname{Nis}}$ for the site whose underlying category is the opposite category of \'{e}tale $R$-algebras and $R$-homomorphisms, and whose notion of a covering is given as follows. A collection of \'{e}tale morphisms $\{\operatorname{Spec}R^{\prime}_{\alpha}\to\operatorname{Spec}R^{\prime}\}_{\alpha\in A}$ over $\operatorname{Spec}R$ is a covering in $\operatorname{Spec}R_{\operatorname{Nis}}$ if it is the opposite of a family of \'{e}tale $R$-homomorphisms $\{\phi_{\alpha}:R^{\prime}\to R_{\alpha}^{\prime}\}_{\alpha\in A}$ for which there exists a finite sequence of elements $a_1,\ldots,a_n\in R^{\prime}$ such that $(a_1,\ldots,a_n)=R^{\prime}$ and for every $1\leq i\leq n$ there exists an $\alpha\in A$ and an $R$-homomorphism $\psi:R^{\prime}_{\alpha}\to R^{\prime}[\frac{1}{a_i}]/(a_1,\ldots,a_{i-1})$ whose composition with $\phi_{\alpha}:R^{\prime}\to R^{\prime}_{\alpha}$ equals the map $R^{\prime}\to R^{\prime}[\frac{1}{a_i}]/(a_1,\ldots,a_{i-1})$. (See \cite{dagxi}, section 1, for details.) We refer to $\operatorname{Spec}R_{\operatorname{Nis}}$ as the \textit{small Nisnevich site} of the affine scheme $\operatorname{Spec}R$. Denote by $\operatorname{Set}_{\Delta}$ the the category of simplicial sets, which is a combinatorial, simplicial model category with the Kan model structure. We write $\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}}$ for the combinatorial, simplicial model category of simplicial presheaves on the underlying category of $\operatorname{Spec}R_{\operatorname{Nis}}$ with the injective model structure, and $(\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}$ for its fibrant-cofibrant objects. By Proposition 4.2.4.4 of \cite{htt}, there is an equivalence of $\infty$-categories $$\theta:N(\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}\stackrel{\sim}{\to}\operatorname{Fun}(N\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}},(\operatorname{Spaces}))=\operatorname{Preshv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}}),$$ where $N$ denotes the simplicial nerve and $(\operatorname{Spaces})$ is the $\infty$-category of spaces, which is by definition the simplicial nerve of the simplicial category of Kan complexes. (We write $\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ for the $\infty$-category of presheaves of spaces on an $\infty$-category $\mathcal{C}$.) Let $\operatorname{Set}_{\Delta,\operatorname{loc}}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}}$ denote the combinatorial, simplicial model category of simplicial presheaves on the site $\operatorname{Spec}R_{\operatorname{Nis}}$ with respect to Jardine's local model structure \cite{jardine}, and $(\operatorname{Set}_{\Delta,\operatorname{loc}}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}$ its fibrant-cofibrant objects. Then Proposition 6.5.2.14 of \cite{htt} shows that the above equivalence $\theta$ restricts to the equivalence $$\theta:N(\operatorname{Set}_{\Delta,\operatorname{loc}}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}\stackrel{\sim}{\to}\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})^{\wedge}\subset\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}}),$$ where $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})\subset\operatorname{Preshv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$ is the $\infty$-topos of sheaves of spaces on $N\operatorname{Spec}R_{\operatorname{Nis}}$ (see Definition \ref{inftytopos} and Example \ref{sheavesonsites} below), and $(-)^{\wedge}$ denotes its hypercompletion (\cite{htt}, 6.5.2). Suppose $R$ is noetherian and of finite Krull dimension. Then, by Thomason's Nisnevich descent theorem of non-connective $K$-theory (\cite{tt}, 10.8), the simplicial presheaf on $\operatorname{Spec}R_{\operatorname{Nis}}$ given by $K$-theory spaces $$R^{\prime}\mapsto\Omega^{\infty}\mathbb{K}(R^{\prime})$$ is a fibrant object of $\operatorname{Set}_{\Delta,\operatorname{loc}}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}}$, so that by the above equivalence $\theta$ it defines an object of the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \begin{df} \label{k} We denote this object by $$\mathcal{K}=\theta(\Omega^{\infty}\mathbb{K}(-))\in\operatorname{ob}\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}}).$$ \end{df} Note that a presheaf of spectra satisfies Nisnevich descent if and only if it sends elementary Nisnevich squares to pullback-pushout squares. Since the suspension functor $\Sigma$ preserves pullback-pushout squares of spectra, we see that the Nisnevich descent of the non-connective $K$-theory $\mathbb{K}(-)$ implies the Nisnevich descent of $\Sigma\mathbb{K}(-)$, which is equivalent by Theorem \ref{abstractdelooping} to the presheaf $\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})$. Hence the simplicial presheaf on $\operatorname{Spec}R_{\operatorname{Nis}}$ given by $$R^{\prime}\mapsto\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural})$$ is also fibrant in $\operatorname{Set}_{\Delta,\operatorname{loc}}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}}$, and thus defines, via the equivalence $\theta$, an object of the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \begin{df} \label{ktate} We denote this object by $$\mathcal{K}_{\operatorname{Tate}}=\theta(\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})\in\operatorname{ob}\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}}).$$ \end{df} We refer the reader to section 2 for a short exposition on the materials of $\infty$-topos theory employed in this article, which are collected from \cite{htt} and \cite{nss}. We in particular make essential use of the notions of group objects, their actions, and torsors, in an $\infty$-topos. These notions we recall in section 2, Definitions \ref{group}, \ref{action}, and \ref{torsor}, respectively, following \cite{htt} and \cite{nss}. \begin{prop} \label{Kasagroup} The object $\mathcal{K}$ is a group object in the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \end{prop} \begin{df} We refer as a {\rm torsor over the sheaf of $K$-theory spaces} to a $\mathcal{K}$-torsor over the final object $\operatorname{Spec}R$, where $\mathcal{K}$ is regarded by Proposition \ref{Kasagroup} as a group object in the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \end{df} In general, for every group object $G$ of an $\infty$-topos $\mathfrak{X}$ there is an object $BG$ that classifies $G$-torsors, in the sense that for each object $X$ of $\mathfrak{X}$ there is an equivalence between the $\infty$-groupoid of $G$-torsors over $X$ and the mapping space from $X$ to $BG$; the object $BG$ is just given by the connected delooping of the group object $G$. (Theorem 3.19 of \cite{nss}, recalled in section 2 below as Theorem \ref{3.19}.) We call the object $BG$ the \textit{classifying space object} of the group object $G$. The following is the geometric incarnation of Theorem \ref{abstractdelooping}, which serves as a classification theorem of torsors over the sheaf of $K$-theory spaces. We remark that Drinfeld's theorem on the Nisnevich local vanishing of the first negative $K$-group (\cite{drinfeld}, Theorem 3.4) also plays a crucial role in its proof. \begin{thm} \label{geometricdelooping} The classifying space object of the group object $\mathcal{K}$ in the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(\operatorname{Spec}R_{\operatorname{Nis}})$ is given by the object $\mathcal{K}_{\operatorname{Tate}}$. I.e., there is an equivalence between $B\mathcal{K}$ and $\mathcal{K}_{\operatorname{Tate}}$. \end{thm} \begin{cor} \label{dm} Torsors over the sheaf of $K$-theory spaces is classified by points of the space $\mathcal{K}_{\operatorname{Tate}}(\operatorname{Spec}R)=\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural})$. In particular, a Tate vector bundle $M\in\operatorname{ob}(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}$ defines a torsor $\mathfrak{D}_M$ over the sheaf of $K$-theory spaces. \end{cor} Let $\operatorname{Aut}M$ denote the sheaf of groups on $\operatorname{Spec}R_{\operatorname{Nis}}$ given by $$R^{\prime}\mapsto\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime}.$$ This is a group object of the ordinary topos $\operatorname{Shv}_{(\operatorname{Sets})}(\operatorname{Spec}R_{\operatorname{Nis}})$, which is regarded as the full subcategory of discrete objects of the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \begin{thm} \label{autmaction} There is a canonical action of the group object $\operatorname{Aut}M$ on the $\mathcal{K}$-torsor $\mathfrak{D}_M$. \end{thm} \subsection*{Organization and conventions} Section 2 provides a brief review of the necessary materials in $\infty$-topos theory, main references being \cite{htt} and \cite{nss}. In section 3 we prove our results. We work in an $\infty$-categorical setting and refer the reader to \cite{htt} for basic terminology. In this article the category of simplicial sets is denoted by $\operatorname{Set}_{\Delta}$. We write $(\operatorname{Spaces})$ for the $\infty$-category of spaces (the simplicial nerve of the simplicial category of Kan complexes), and denote by $\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ and $\operatorname{Shv}_{(\operatorname{Spaces})}(\mathcal{C})$ the $\infty$-categories of presheaves and sheaves of spaces on $\mathcal{C}$, respectively. \subsection*{Acknowledgement} I thank Luigi Previdi for answering my questions on his conjecture. In fact, the work presented here grew out from an endeavour to understand the conceptual meaning of Theorem \ref{abstractdelooping}, and his suggestion that the main application of it should be to higher Tate central extensions was truly helpful. \section{Recollection on the theory of $\infty$-topoi} In this section we give a review on the necessary materials of $\infty$-topos theory, collected from \cite{htt} and \cite{nss}. Since our exposition given here is somewhat terse, we refer the reader to the references \cite{htt} and \cite{nss} for the full details. Let us begin with recalling the definition of an $\infty$-topos. \begin{df}[$\infty$-topos; \cite{htt}, 6.1.0.4] \label{inftytopos} An {\rm $\infty$-topos} $\mathfrak{X}$ is a full, accessible (see \cite{htt}, 5.4.2.1, for the definition) subcategory of the $\infty$-category $\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})=\operatorname{Fun}(\mathcal{C}^{\operatorname{op}},(\operatorname{Spaces}))$ of presheaves of spaces on some $\infty$-category $\mathcal{C}$, such that the inclusion $\mathfrak{X}\hookrightarrow\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ has a left adjoint which preserves finite limits. The left adjoint is called the {\rm sheafification functor}. \end{df} A typical example of an $\infty$-topos is the $\infty$-category $\operatorname{Shv}_{(\operatorname{Spaces})}(\mathcal{C})$ of sheaves of spaces on an $\infty$-category $\mathcal{C}$ equipped with a Grothendieck topology. \begin{ex}[$\infty$-topos of sheaves of spaces; \cite{htt}, 6.2.2] \label{sheavesonsites} Let $\mathcal{C}$ be an $\infty$-category. A {\rm sieve} on an object $C$ of $\mathcal{C}$ is a full subcategory $\mathcal{C}^{(0)}_{/C}$ of the overcategory $\mathcal{C}_{/C}$ such that if a morphism in $\mathcal{C}_{/C}$ has its target in $\mathcal{C}^{(0)}_{/C}$ then it also has its source in $\mathcal{C}^{(0)}_{/C}$. It is Proposition 6.2.2.5 of \cite{htt} that there is a canonical bijection between sieves on the object $C$ and monomorphisms in the $\infty$-category $\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ whose target is $j(C)$, where $j:\mathcal{C}\hookrightarrow\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ denotes the Yoneda embedding. A {\rm Grothendieck topology} on $\mathcal{C}$ is an assignment of a collection of sieves on $C$ to each object $C$ of $\mathcal{C}$. A sieve on $C$ belonging to that assigned collection is called a {\rm covering sieve} on $C$. A presheaf $F\in\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ on $\mathcal{C}$ is called a {\rm sheaf of spaces} on $\mathcal{C}$ if for every object $C$ of $\mathcal{C}$ and for every monomorphism $U\hookrightarrow j(C)$ corresponding to a covering sieve on $C$ the induced map $\operatorname{Map}_{\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})}(j(C),F)\stackrel{\sim}{\to}\operatorname{Map}_{\operatorname{Preshv}_{(\operatorname{Spaces})}}(U,F)$ is a weak equivalence. The full subcategory $\operatorname{Shv}_{(\operatorname{Spaces})}(S)\subset\operatorname{Preshv}_{(\operatorname{Spaces})}(S)$ of sheaves of spaces on $\mathcal{C}$ is an $\infty$-topos (\cite{htt}, 6.2.2.7). \end{ex} An $\infty$-category equipped with a Grothendieck topology is called an \textit{$\infty$-site}. An ordinary site can be seen as an $\infty$-site by taking the nerve. \begin{df}[Homotopy sheaves; \cite{htt}, 6.5.1] \label{homotopysheaves} Let $\mathfrak{X}\subset\operatorname{Preshv}_{(\operatorname{Spaces})}(\mathcal{C})$ be an $\infty$-topos and $X$ a pointed object. For each non-negative integer $n\geq0$, the {\rm $n$-th homotopy sheaf} of $X$ is the sheaf of sets on $\mathcal{C}$ given by sheafifying the presheaf of sets on $\mathcal{C}$ that assigns to each object $C$ of $\mathcal{C}$ the $n$-th homotopy set $\pi_n(X(C))$ of the pointed space $X(C)$. \end{df} We say a pointed object to be \textit{connected} if its $0$-th homotopy sheaf is trivial. Write $\Delta_{\operatorname{big}}$ for the category of non-empty finite linearly ordered sets. A \textit{simplicial object} in an $\infty$-topos $\mathfrak{X}$ is a functor $N(\Delta_{\operatorname{big}}^{\operatorname{op}})\to\mathfrak{X}$. The notions of group objects and their actions in an $\infty$-topos are formulated in terms simplicial objects, as follows. \begin{df}[group object; \cite{htt}, 6.1.2.7, 7.2.2.1] \label{group} A {\rm group object} of an $\infty$-topos $\mathfrak{X}$ is a simplicial object $G:N(\Delta_{\operatorname{big}}^{\operatorname{op}})\to\mathfrak{X}$ in $\mathfrak{X}$ such that $G([0])$ is a terminal object of $\mathfrak{X}$ and for every $n\geq0$ and for every partition $[n]=S\cup S^{\prime}$ with $S\cap S^{\prime}=\{s\}$, the maps $G([n])\to G(S)$ and $G([n])\to G(S^{\prime})$ exhibit $G([n])$ as a product of $G(S)$ and $G(S^{\prime})$. \end{df} By a slight abuse of language we usually refer to the object $G([1])\in\operatorname{ob}\mathfrak{X}$ as a group object and call the simplicial object $G$ as the \textit{group structure} on $G([1])$. \begin{thm}[\cite{htt}, 7.2.2.11-(1)] \label{classifyingspace} If $X$ is a connected pointed object of an $\infty$-topos $\mathfrak{X}$ then its loop space $\Omega X=\varprojlim(\ast\rightarrow X\leftarrow\ast)$ has a natural structure of a group object. This assignment $X\mapsto \Omega X$ arranges into an equivalence $$\Omega:\mathfrak{X}_{\ast,\operatorname{conn}}\leftrightarrows\operatorname{Grp}(\mathfrak{X}):B$$ between the $\infty$-categories $\mathfrak{X}_{\ast,\operatorname{conn}}$ of connected pointed objects of $\mathfrak{X}$ and $\operatorname{Grp}(\mathfrak{X})$ of group objects of $\mathfrak{X}$. The inverse functor $B$ takes a group object $G$ to the colimit $BG=\varinjlim G$ with the pointing given by $\ast=G([0])\to\varinjlim G$, where $G$ is seen as a diagram in $\mathfrak{X}$ indexed by $N(\Delta_{\operatorname{big}}^{\operatorname{op}})$. \end{thm} \begin{df}[Action of a group object; \cite{nss}, Definition 3.1] \label{action} Let $G$ be a group object of an $\infty$-topos $\mathfrak{X}$. An {\rm action} of $G$ on an object $P\in\operatorname{ob}\mathfrak{X}$ is a map of simplicial objects $\rho\to G$ in $\mathfrak{X}$ such that $\rho([0])=P$ and for every $n\geq0$ and for every partition $[n]=S\cup S^{\prime}$ with $S\cap S^{\prime}=\{s\}$, the maps $\rho([n])\to\rho(S)$ and $\rho([n])\to G(S^{\prime})$ exhibit $\rho([n])$ as a product of $\rho(S)$ and $G(S^{\prime})$. \end{df} Given an action $\rho\to G$ of $G$ on $P$, we get a square \begin{displaymath} \begin{CD} P@>>>\ast\\ @VVV@VVV\\ \varinjlim\rho@>>>BG \end{CD} \end{displaymath} by taking the colimits of the simplicial objects $\rho$ and $G$ seen as diagrams in $\mathfrak{X}$ indexed by $N(\Delta_{\operatorname{big}}^{\operatorname{op}})$. It can be shown that this square is a pullback square (\cite{nss}, Proposition 3.15). Conversely, given a pullback square \begin{displaymath} \begin{CD} P@>>>\ast\\ @VVV@VVV\\ X@>>>BG \end{CD} \end{displaymath} we can form a map of simplicial objects $\check{C}(P\to X)\to\check{C}(\ast\to BG)=G$ by taking the Cech nerves $\check{C}$ (see \cite{htt}, 6.1.2) of $P\to X$ and $\ast\to BG$. The constructions given above are mutually inverses to each other, due to the Giraud axiom saying that in an $\infty$-topos every groupoid object is effective; see \cite{nss}, section 3, for a details. Therefore, in an $\infty$-topos, giving an action of a group object $G$ on an object $P$ is equivalent to giving a fiber sequence $P\to X\to BG$, i.e. to describing $P$ as a pullback $P=\varprojlim(X\rightarrow BG\leftarrow\ast)$. \begin{df}[Torsor; \cite{nss}, Definition 3.4] \label{torsor} Let $G$ be a group object in an $\infty$-topos $\mathfrak{X}$ and $X$ an object. A {\rm $G$-torsor over $X$} is a $G$-action $\rho\to G$ together with a map $\rho([0])\to X$ such that the induced map to $X$ from the colimit $\varinjlim\rho$, taken over the simplicial index category $N(\Delta^{\operatorname{op}}_{\operatorname{big}})$, is an equivalence. \end{df} It is notable that this simple definition automatically implies, in the setting of an $\infty$-topos, the usual conditions for torsors, such as the principality condition and the local triviality. See \cite{nss}, Propositions 3.7 and 3.13. Given a $G$-torsor $\rho\to G$ over $X$, we get, by taking the colimits, a pullback square \begin{displaymath} \begin{CD} P@>>>\ast\\ @VVV@VVV\\ X@>>>BG, \end{CD} \end{displaymath} where $P=\rho([0])$, and in particular a map $X\to BG$. The above discussion on group actions shows that one can conversely construct a $G$-torsor $\check{C}(X\times_{BG}\ast\to X)\to G$ out of a given map $X\to BG$, and these constructions are mutually inverses. Hence, \begin{thm}[\cite{nss}, Theorem 3.19] \label{3.19} Let $\mathfrak{X}$ be an $\infty$-topos and $G$ a group object. The $\infty$-category (which can be shown to be an $\infty$-groupoid; \cite{nss}, Proposition 3.18) of $G$-torsors over a fixed object $X$ is equivalent to the $\infty$-groupoid $\operatorname{Map}_{\mathfrak{X}}(X,BG)$ of maps from $X$ to $BG$. \end{thm} In this sense we call $BG$ the \textit{classifying space object} of the group object $G$, and say that a map $X\to BG$ \textit{classifies} the $G$-torsor $\check{C}(X\times_{BG}\ast\to X)\to G$ over $X$. This and Theorem \ref{classifyingspace} exhibit a feature of $\infty$-topos theory, which is particularly convenient for our purposes, that in an $\infty$-topos the classifying space for torsors is just given by the connected delooping of the group. \section{Proofs} Let $R$ be a commutative noetherian ring of finite Krull dimension, and consider the objects $\mathcal{K}$ and $\mathcal{K}_{\operatorname{Tate}}$ (Definitions \ref{k} and \ref{ktate}) of the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$ of sheaves of spaces on the small Nisnevich site of $\operatorname{Spec}R$. \begin{lemma} \label{pointedconnected} The object $\mathcal{K}_{\operatorname{Tate}}$ is a connected pointed object of the $\infty$-topos $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$. \end{lemma} \begin{proof} The pointedness is trivial, with the pointing $\operatorname{Spec}R\to\mathcal{K}_{\operatorname{Tate}}$ classified by the point $[0]\in\mathcal{K}_{\operatorname{Tate}}(\operatorname{Spec}R)=\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural})$ given by the chosen $0$-object of the exact category $(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural}$. The nontrivial part is the connectedness, which amounts to showing that the $0$-th homotopy sheaf $\pi_0\mathcal{K}_{\operatorname{Tate}}$ is a terminal object. The sheaf of sets $\pi_0\mathcal{K}_{\operatorname{Tate}}$ is by Definition \ref{homotopysheaves} the sheafification of the presheaf $R^{\prime}\mapsto\pi_0(\mathcal{K}_{\operatorname{Tate}}(R^{\prime}))=\pi_0(\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}))$, which is naturally isomorphic to the presheaf $R^{\prime}\mapsto\pi_0(\Omega^{\infty}\Sigma\mathbb{K}(\mathcal{P}(R^{\prime})))=K_{-1}(R^{\prime})$ by Theorem \ref{abstractdelooping}. Now, it is a theorem of Drinfeld (\cite{drinfeld}, Theorem 3.4) that the presheaf $K_{-1}$ vanishes Nisnevich locally. Therefore its sheafification vanishes and we get the desired triviality of the $0$-th homotopy sheaf $\pi_0\mathcal{K}_{\operatorname{Tate}}$. \end{proof} \begin{lemma} \label{loopspace} The loop space $\Omega\mathcal{K}_{\operatorname{Tate}}$ of the pointed object $\mathcal{K}_{\operatorname{Tate}}$ is equivalent to $\mathcal{K}$. \end{lemma} \begin{proof} Recall that the objects $\mathcal{K}$ and $\mathcal{K}_{\operatorname{Tate}}$ of $\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$ are the images by $\theta$ of the simplicial presheaves $\Omega^{\infty}\mathbb{K}(-)$ and $\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})$ (Definitions \ref{k} and \ref{ktate}). In the simplicial category $(\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}$ we have that the object $\Omega^{\infty}\mathbb{K}(-)$ is equivalent to the homotopy limit $\displaystyle{\rm \mathop{holim}_{\longleftarrow}}(\ast\rightarrow\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})\leftarrow\ast)$ by Theorem \ref{abstractdelooping}. By Theorem 4.2.4.1 of \cite{htt} this translates into an equivalence in $N(\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}=\operatorname{Preshv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$ between the object $\mathcal{K}=\theta(\Omega^{\infty}\mathbb{K}(-))$ and the limit $\varprojlim(\ast\rightarrow\theta(\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})\leftarrow\ast)$, which is by definition the loop space of the pointed object $\theta(\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})=\mathcal{K}_{\operatorname{Tate}}$. \end{proof} {\bf Proof of Proposition \ref{Kasagroup}, Theorem \ref{geometricdelooping}, and Corollary \ref{dm}. } Recall Theorem \ref{classifyingspace} saying that for an $\infty$-topos $\mathfrak{X}$ there is an equivalence $$\Omega:\mathfrak{X}_{\ast,\operatorname{conn}}\leftrightarrows\operatorname{Grp}(\mathfrak{X}):B$$ between the $\infty$-categories $\mathfrak{X}_{\ast,\operatorname{conn}}$ of connected pointed objects of $\mathfrak{X}$ and $\operatorname{Grp}(\mathfrak{X})$ of group objects of $\mathfrak{X}$. For $\mathfrak{X}=\operatorname{Shv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$ we have by Lemma \ref{pointedconnected} that $\mathcal{K}_{\operatorname{Tate}}$ is in $\mathfrak{X}_{\ast,\operatorname{conn}}$, and by Lemma \ref{loopspace} that its loop space $\Omega\mathcal{K}_{\operatorname{Tate}}$ is equivalent to $\mathcal{K}$. This provides with $\mathcal{K}$ the desired group structure, and the proof of Proposition \ref{Kasagroup} is complete. Applying the inverse functor $B$ to the equivalence between $\Omega\mathcal{K}_{\operatorname{Tate}}$ and $\mathcal{K}$ we obtain the desired equivalence between $\mathcal{K}_{\operatorname{Tate}}\cong B\Omega\mathcal{K}_{\operatorname{Tate}}$ and $B\mathcal{K}$, where $B\mathcal{K}$ serves as the classifying space object for $\mathcal{K}$-torsors in view of Theorem \ref{3.19}, and the proof of Theorem \ref{geometricdelooping} is complete. Thus we see that $\mathcal{K}$-torsors over $\operatorname{Spec}R$ are classified by maps from $\operatorname{Spec}R$ to $B\mathcal{K}\cong\mathcal{K}_{\operatorname{Tate}}$, which correspond by Yoneda's lemma to points of $\mathcal{K}_{\operatorname{Tate}}(\operatorname{Spec}R)=\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural})$, thereby proving the first assertion of Corollary \ref{dm}. To prove the second assertion, let $M$ be a Tate vector bundle over $\operatorname{Spec}R$. Then as an object of the exact category $(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}$ it defines a point $[M]$ of the $K$-theory space $\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural})=\mathcal{K}_{\operatorname{Tate}}(\operatorname{Spec}R)$, whose corresponding map $\operatorname{Spec}R\to\mathcal{K}_{\operatorname{Tate}}\cong B\mathcal{K}$ we also denote by $[M]$ by a slight abuse of notation. The desired torsor $\mathfrak{D}_M$ is the $\mathcal{K}$-torsor classified by this map $[M]$. Namely, it is the pullback $\mathfrak{D}_M=\varprojlim(\operatorname{Spec}R\stackrel{[M]}{\rightarrow}B\mathcal{K}\leftarrow\ast)$: \begin{displaymath} \begin{CD} \mathfrak{D}_M@>>>\operatorname{Spec}R\\ @VVV@VV{\text{base-point}}V\\ \operatorname{Spec}R@>{[M]}>>B\mathcal{K}. \end{CD} \end{displaymath} {\bf Proof of Theorem \ref{autmaction}. } Let $M\in\operatorname{ob}(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural}$ be a Tate vector bundle over $\operatorname{Spec}R$. We consider the simplicial presheaf on $\operatorname{Spec}R_{\operatorname{Nis}}$ that assigns to $R^{\prime}$ the simplicial set $N\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime}}$, where $\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime}}$ is the groupoid with a single object and morphisms on the unique object given by the elements of the group $\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime}$, and composition defined by the multiplication of the group. By taking the $\theta$ of the fibrant replacement of it we get a presheaf of spaces on $\operatorname{Spec}R^{\operatorname{op}}_{\operatorname{Nis}}$, which is denoted by $N\overline{\operatorname{Aut}M}$, and whose sheafification is denoted by $a(N\overline{\operatorname{Aut}M})$. We use the following lemma. \begin{lemma} The classifying space object for the group object $\operatorname{Aut}M$ is given by $a(N\overline{\operatorname{Aut}M})$. \end{lemma} \begin{proof} The proof goes similarly to the proof of Theorem \ref{geometricdelooping}, once we notice that $a(N\overline{\operatorname{Aut}M})$ is a connected pointed object with its loop space object equivalent to $\operatorname{Aut}M$. With the obvious pointing $\ast\to N\overline{\operatorname{Aut}M}$ we have that $N\overline{\operatorname{Aut}M}$ is a pointed object, and so is its sheafification $a(N\overline{\operatorname{Aut}M})$. Recall the general fact that for every ordinary group $G$, the Kan complex $N\overline{G}$ is the Eilenberg-MacLane space $K(G,1)$, where $\overline{G}$ denotes the groupoid with a single object and morphisms given by elements of $G$. The $0$-th homotopy sheaf $\pi_0a(N\overline{\operatorname{Aut}M})$ is given by sheafifying the presheaf $R^{\prime}\mapsto\pi_0(N\overline{\operatorname{Aut}M}(R^{\prime}))$, and this vanishes since the Eilenberg-MacLane space $N\overline{G}=K(G,1)$ is always connected. Since the sheafification functor commutes with finite limits, the loop space $\Omega(a(N\overline{\operatorname{Aut}M}))$ is the sheafification of the loop space $\Omega(N\overline{\operatorname{Aut}M})$, which can be computed as the homotopy limit in the simplicial category $(\operatorname{Set}_{\Delta}^{\operatorname{Spec}R_{\operatorname{Nis}}^{\operatorname{op}}})^{\circ}$ by Theorem 4.2.4.1 of \cite{htt}. This in turn can be computed object-wise on the simplicial presheaf $R^{\prime}\mapsto N\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime}}=K(\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}}M\otimes_RR^{\prime},1)$, and using the general fact that $\Omega K(G,1)=G$ we get the desired conclusion. \end{proof} Now, recall that for a group object $G$ of an $\infty$-topos $\mathfrak{X}$, giving a $G$-action on an object $P\in\operatorname{ob}\mathfrak{X}$ is equivalent to giving a fiber sequence $P\to X\to BG$, i.e. to describing $P$ as a pullback $P=\varprojlim(X\rightarrow BG\leftarrow\ast)$. (See Definition \ref{action} and following discussions in section 2.) Hence, constructing the desired the $\operatorname{Aut}M$-action on the $\mathcal{K}$-torsor $\mathfrak{D}_M$ amounts to describe $\mathfrak{D}_M$ as a pullback $\mathfrak{D}_M=\varprojlim(X\rightarrow B\operatorname{Aut}M\leftarrow\operatorname{Spec}R)$ for some $X$ and some map $X\to B\operatorname{Aut}M$. It turns out that it suffices to have a map $[[M]]:B\operatorname{Aut}M\to B\mathcal{K}$ whose precomposition with the base-point map $\operatorname{Spec}R\to B\operatorname{Aut}M$ is equivalent to the map $[M]:\operatorname{Spec}R\to B\mathcal{K}$ classifying the $\mathcal{K}$-torsor $\mathfrak{D}_M$. Indeed, the successive pullback $\varprojlim(X\rightarrow B\operatorname{Aut}M\stackrel{(\text{base-point})}{\leftarrow}\operatorname{Spec}R)$, where $X=\varprojlim(B\operatorname{Aut}M\stackrel{[[M]]}{\rightarrow}B\mathcal{K}\stackrel{(\text{base-point})}{\leftarrow}\operatorname{Spec}R)$, is given by $\mathfrak{D}_M$ if $[[M]]\circ(\text{base-point})=[M]$: \begin{displaymath} \begin{CD} \mathfrak{D}_M@>>> X@>>>\operatorname{Spec}R\\ @VVV@VVV@VV{\text{base-point}}V\\ \operatorname{Spec}R@>{\text{base-point}}>>B\operatorname{Aut}M@>{[[M]]}>>B\mathcal{K}.\\ \end{CD} \end{displaymath} We thus get a fiber sequence $\mathfrak{D}_M\to\operatorname{Spec}R\to B\operatorname{Aut}M$, i.e. the description $\mathfrak{D}_M=\varprojlim(X\rightarrow B\operatorname{Aut}M\leftarrow\operatorname{Spec}R)$, as desired. To find such a map $[[M]]$, we notice that, in general, for any idempotent complete exact category $\mathcal{A}$ and an object $a$ of $\mathcal{A}$ the space $N\overline{\operatorname{Aut}_{\mathcal{A}}a}$ admits a natural, canonical map to the space $\Omega\abs{iS_{\bullet}(\mathcal{A})}=\Omega^{\infty}\mathbb{K}(\mathcal{A})$, where $S_{\bullet}$ denotes Waldhausen's $S_{\bullet}$-construction (\cite{waldhausen}, 1.3), $i(-)$ the subcategory of isomorphisms, and $\abs{-}$ the geometric realization. This is the composition of the map $N\overline{\operatorname{Aut}_{\mathcal{A}}a}\to Ni\mathcal{A}$ (recall that we write $i\mathcal{A}$ for the subcategory of isomorphisms) with the first structure map $Ni\mathcal{A}\to\Omega \abs{iS_{\bullet}(\mathcal{A})}$ of Waldhausen's connective algebraic $K$-theory spectrum (\cite{waldhausen}, 1.3). Applying this construction to $M\otimes_RR^{\prime}\in\operatorname{ob}(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R^{\prime}))^{\natural}$ for \'{e}tale $R$-algebras $R^{\prime}$, we get a map of simplicial presheaves $N\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural}}M\otimes_R(-)}\to\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})$. Via the equivalence $\theta$ this defines a map $N\overline{\operatorname{Aut}M}\to\mathcal{K}_{\operatorname{Tate}}$ in $\operatorname{Preshv}_{(\operatorname{Spaces})}(N\operatorname{Spec}R_{\operatorname{Nis}})$, which in turn induces a map $[[M]]:B\operatorname{Aut}M\cong a(N\overline{\operatorname{Aut}M})\to\mathcal{K}_{\operatorname{Tate}}\cong B\mathcal{K}$. Note that the precomposition of the map of simplicial presheaves $N\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural}}M\otimes_R(-)}\to\Omega^{\infty}\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural})$ with the canonical pointing $\operatorname{Spec}R\to N\overline{\operatorname{Aut}_{(\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(-))^{\natural}}M\otimes_R(-)}$ corresponds to the point $[M]\in\Omega\mathbb{K}((\displaystyle\lim_{\longleftrightarrow}\mathcal{P}(R))^{\natural})$, so that the map $[[M]]$ satisfies the desired property $[[M]]\circ(\text{base-point})=[M]$. The proof of Theorem \ref{autmaction} is complete.
1,116,691,501,378
arxiv
\section{Introduction} Let $\nu$ be a Borel probability measure on $\mathbb{R}^q$. The quantization problem for $\nu$ is concerned with the approximation of $\nu$ by discrete measures of finite support in $L_r$-metrics. This problem has a deep background in information theory and engineering technology such as signal processing and pattern recognition \cite{BW:82,GN:98}. We refer to \cite{GL:00,GL:04} for rigorous mathematical theory of the quantization problem. In the past decades, this problem has attracted great interest of mathematicians (cf. \cite{GL:00,GL:04,GL:08,GL:12,GPM:02,KZ:15,KZ:16,Kr:13,PG:97,PK:01,PK:04}). \subsection{Some definitions and basic facts} Let $r\in [0,\infty)$ and $k\in\mathbb{N}$. Let $d$ denote the Euclidean metric on $\mathbb{R}^q$. For every $k\geq 1$, let $\mathcal{D}_k:=\{\alpha\subset\mathbb{R}^q:1\leq {\rm card}(\alpha)\leq k\}$. For $x\in\mathbb{R}^q$ and $\alpha\subset\mathbb{R}^q$, let $d(x,\alpha):=\inf_{a\in\alpha}d(x,a)$. The $k$th quantization error for $\nu$ of order $r$ can be defined by \begin{eqnarray}\label{quanerror} e_{k,r}(\nu)=\left\{\begin{array}{ll}\big(\inf\limits_{\alpha\in\mathcal{D}_k}\int d(x,\alpha)^{r}d\nu(x)\big)^{1/r}&r>0\\ \inf\limits_{\alpha\in\mathcal{D}_k}\exp\big(\int\log d(x,\alpha)d\nu(x)\big)&r=0\end{array}\right.. \end{eqnarray} One may see \cite{GL:00} for some equivalent definitions and interpretations in various contexts. For $r\in[1,\infty)$, $e_{n,r}(\nu)$ is equal to the minimum error when approximating $\nu$ by discrete probability measures supported on at most $n$ points in the $L_r$-metrics. A set $\alpha\in\mathcal{D}_k$ is called a $k$-optimal set for $\nu$ of order $r$, if the infimum in (\ref{quanerror}) is attained at $\alpha$. We call the points in such an $\alpha$ $k$-\emph{optimal points}. As in \cite{GL:00,GL:04}, we denote the collection of all the $k$-optimal sets for $\nu$ of order $r$ by $C_{k,r}(\nu)$ and simply write $C_k(\nu)$ for $C_{k,0}(\nu)$. For $r>0$, $C_{k,r}(\nu)\neq\emptyset$ if $\int |x|^rd\nu(x)<\infty$; and $C_k(\nu)$ is non-empty if the following condition is satisfied: \[ \int_0^1 s^{-1}\sup_{x\in\mathbb{R}^q}\nu(B(x,s))ds<\infty. \] In particular, $C_k(\nu)\neq\emptyset$ if for some constants $C,t>0$, we have \[ \sup_{x\in\mathbb{R}^q}\nu(B(x,\epsilon))\leq C\epsilon^t \] for every $\epsilon>0$. Furthermore, whenever the support $K_\nu$ of $\nu$ is an infinite set, we have that $e_{n,r}(\nu)$ is strictly decreasing as $n$ increases and ${\rm card}(\alpha_n)=n$ for every $\alpha_n\in C_{n,r}(\nu)$. One can see Theorem 4.12 of \cite{GL:00} and Theorem 2.4 of \cite{GL:04} for more detailed information. Let $\alpha$ be a non-empty finite subset of $\mathbb{R}^q$. For each $a\in\alpha$, the Voronoi region generated by $a$ with respect to $\alpha$ is defined by \begin{equation}\label{vregion} W(a|\alpha):=\{x\in\mathbb{R}^q:d(x,\alpha)=d(x,a)\}. \end{equation} A Voronoi partition (VP) with respect to the set $\alpha$ is defined to be a Borel partition $\{P_a(\alpha)\}_{a\in\alpha}$ of $\mathbb{R}^q$ such that \[ P_a(\alpha)\subset W(a|\alpha),\;a\in\alpha. \] Let us call a VP with respect to an $\alpha\in C_{k,r}(\nu)$ a $k$-\emph{optimal Voronoi partition}. For a Borel set $A\subset\mathbb{R}^q$ and a non-empty finite subset $\alpha$ of $\mathbb{R}^q$, we define \begin{eqnarray} I_{\nu,r}(A,\alpha):=\left\{\begin{array}{ll}\int_{A}d(x,\alpha)^rd\nu(x)&r>0\\ \int_{A}\log d(x,\alpha)d\nu(x)&r=0\end{array}\right.. \end{eqnarray} In the following, we simply write $I_\nu(A,\alpha)$ for $I_{\nu,0}(A,\alpha)$ for convenience. \subsection{A significant concern about the Voronoi partition} Let $\nu$ be an absolutely continuous probability measure on $\mathbb{R}^q$. In \cite{Ger:79}, Gersho conjectured that for $\alpha_n\in C_{n,r}(\nu)$ and an arbitrary VP $\{P_a(\alpha_n)\}_{a\in\alpha_n}$ with respect to $\alpha_n$, we have \begin{equation}\label{gersho} \lim_{n\to\infty}\frac{I_{\nu,r}(P_a(\alpha_n),\{a\})}{n^{-1}e_{n,r}^r(\nu)}=1,\;\;a\in\alpha_n. \end{equation} This conjecture is also significant for singular Borel probability measures. In \cite{GL:12}, Graf, Luschgy and Pag\`{e}s proved that, for a large class of absolutely continuous probability measures $\nu$, there exist constants $A_1,A_2>0$ such that \begin{equation}\label{g5} A_1n^{-1}e_{n,r}^r(\nu)\leq I_{\nu,r}(P_a(\alpha_n),\{a\})\leq A_2n^{-1}e_{n,r}^r(\nu),\;\;a\in\alpha_n. \end{equation} This is a weak version of (\ref{gersho}). One may see \cite{Kr:13,Zhu:20} for some other related work. We remark that for general probability measures, it is very difficult even to examine whether (\ref{g5}) holds. It is known from \cite{GL:04} that $e_{n,r}(\nu)\to e_n(\nu)$ as $r$ decreases to zero. Thus, by letting $r\to 0$ in (\ref{g5}), it is natural to conjecture that, for a Borel probability measure $\nu$, there exist some constants $B_1,B_2$ such that, for an arbitrary $\alpha_n\in C_n(\nu)$ and an arbitrary VP $(P_a(\alpha_n))_{a\in\alpha_n}$, the following holds: \begin{equation}\label{wgc} B_1n^{-1}\leq\min_{a\in\alpha_n}\nu(P_a(\alpha_n))\leq\max_{a\in\alpha_n}\nu(P_a(\alpha_n))\leq B_2n^{-1},\;\;a\in\alpha_n. \end{equation} This can be regarded as a limiting case of the weak version (\ref{g5}). \subsection{Statement of the main result} A Borel measure $\mu$ $\mathbb{R}^q$ is called an $s_0$-dimensional Ahlfors-David measure if there exists some $\epsilon_0>0$ such that, for every $x\in{\rm supp}(\mu)$, \begin{equation}\label{AD} C_1\epsilon^{s_0}\leq\mu(B(x,\epsilon))\leq C_2\epsilon^{s_0},\;\epsilon\in(0,\epsilon_0). \end{equation} The asymptotics of the quantization errors for Ahlfors-David measures have been studied in detail by Graf and Luschgy (cf. \cite[Theorem 12.18]{GL:00}). One can also see \cite{GL:00,Mattila:95} for various examples of such measures. In the remaining part of the paper, we always denote by $\mu$ a probability measure satisfying (\ref{AD}). In addition, by Lemma 12.3 of \cite{GL:00}, we assume that the second inequality in (\ref{AD}) holds for all $x\in\mathbb{R}^q$ and all $\epsilon>0$. For a set $B\subset\mathbb{R}^q$, we denote the diameter of $B$ by $|B|$. We will prove \begin{theorem}\label{mthm} Let $\mu$ be an Ahlfors-David probability measure on $\mathbb{R}^q$ with support $K_\mu$. There exist positive constants $d_1, d_2, d_3$, such that for each $n\geq 1$, every $\alpha_n\in C_n(\mu)$ and an arbitrary VP $\{P_a(\alpha_n)\}_{a\in\alpha_n}$, we have \[ d_1n^{-1}\leq\min_{a\in\alpha_n}\mu(P_a(\alpha_n))\leq\max_{a\in\alpha_n}\mu(P_a(\alpha_n))\leq d_2n^{-1}. \] Moreover, for every $a\in\alpha_n$, $P_a(\alpha_n)$ contains a ball of radius $d_3|P_a(\alpha_n)\cap K_\mu|$ which is centered at $a$. \end{theorem} Our main idea for the proof of Theorem \ref{mthm} is to reduce the quantization problem for $\mu$ with respect to an arbitrarily large $n$ to that for some conditional measures of $\mu$ with respect to some bounded integers, and then we apply Theorem 2.4 of \cite{GL:04} which says that a subset $\beta$ of a $k$-optimal set is ${\rm card}(\beta)$-optimal for the corresponding conditional measure of $\mu$. In order to accomplish the above-mentioned reduction, we will select some auxiliary integers and establish a characterization for $n$-optimal sets for $\mu$ with respect to the geometric mean error. In order to complete the proof of Theorem \ref{mthm} by using \cite[Theorem 2.4]{GL:04}, we will prove some weak estimates for the measures and geometrical size of elements of an optimal Voronoi partition. These results will be given in a more general context and allow us to drop an additional condition in \cite{Zhu:13} that the considered measure vanishes on every hyperplane. Unlike the $L_r$-quantization problem, in the study of the geometric mean error, the involved integrals are usually negative and the integrands are in logarithmic form. It turns out that those methods to characterize the optimal sets in the $L_r$-quantization problem are often not applicable. For instance, let $A\supset B$ be Borel sets and $\alpha$ a non-empty finite set, we have $I_{\nu,r}(A,\alpha)\geq I_{\nu,r}(B,\alpha)$ for $r>0$; while for $r=0$, we usually have an inequality in the reverse direction: $I_\nu(A,\alpha)\leq I_\nu(B,\alpha)$, because the integrands are usually negative. For this reason, the arguments in the present paper are substantially different from those in \cite{Zhu:20} which are for $L_r$-quantization for $\mu$. \section{Preliminaries} For a probability measure $\nu$ on $\mathbb{R}^q$, we always denote the support of $\nu$ by $K_\nu$. Let $m$ be the smallest integer with $m>2(C_1^{-1}C_2)^{1/s_0}$. Let $k_0$ be the smallest integer such that $2m^{-k_0}<\epsilon_0$. Note that, for the Ahlfors-David probability measure $\mu$, $K_\mu$ is compact. Thus, for every $k\geq k_0$, we denote by $\phi_k$ the largest number of closed balls of radii $m^{-k}$ which are pairwise disjoint and centered in $K_\mu$. We fix such $\phi_k$ closed balls and denote them by $E_{k,i},1\leq i\leq\phi_k$. We define \[ \Omega_k:=\{(k,i):1\leq i\leq \phi_k\}. \] By the definition of $m$, one can show that $\phi_k<\phi_{k+1}$ by using (\ref{AD}) and the arguments in the proof of \cite[Lemma 2.1]{Zhu:20}. For $\sigma\in\Omega_k$, we denote the center of $E_\sigma$ by $c_\sigma$ and define $A_\sigma:=B(c_\sigma,2m^{-k})$. Then we have $K_\mu\subset\bigcup_{\sigma\in\Omega_k}A_\sigma$. The following lemma is a consequence of (\ref{AD}). \begin{lemma}\label{lem5} There exist constants $\eta_1,\eta_2>0$, such that for every $\sigma\in\Omega_k$, \[ \eta_1\phi_k^{-1}\leq\mu(A_\sigma)\leq\eta_2\phi_k^{-1}. \] \end{lemma} \begin{proof} Note that $K_\mu\subset\bigcup_{\sigma\in\Omega_k}A_\sigma$. By (\ref{AD}), for every $\sigma\in\Omega_k$, we have \begin{eqnarray*} &&1\leq\sum_{\tau\in\Omega_k}\mu(A_\tau)\leq\phi_k C_2C_1^{-1}\mu(A_\sigma); \\&&\mu(A_\sigma)\leq C_2C_1^{-1}2^{s_0}\mu(E_\sigma). \end{eqnarray*} It follows that $\mu(A_\sigma)\geq C_2^{-1}C_1\phi_k^{-1}$. Because the sets $E_\tau,\tau\in\Omega_k$, are pairwise disjoint, by (\ref{AD}), we have \[ 1\geq\sum_{\tau\in\Omega_k}\mu(E_\tau)\geq\phi_k C_1C_2^{-1}\mu(E_\sigma)\geq\phi_k C_1^2C_2^{-2}2^{-s_0}\mu(A_\sigma). \] Hence, we have $\mu(A_\sigma)\leq (C_1^{-1}C_2)^22^{s_0}\phi_k^{-1}$. It suffices to define \[ \eta_1:=C_2^{-1}C_1\;{\rm and}\;\eta_2:=(C_1^{-1}C_2)^22^{s_0}. \] \end{proof} Let $C_1, C_2$ be as given in (\ref{AD}). We define \begin{eqnarray} &&\delta:=\frac{1}{16}\big(C_1C_2^{-1}\big)^{\frac{1}{s_0}};\label{g3}\\ &&\mathcal{A}_\sigma:=\{\tau\in\Omega_k: (A_\tau)_{2\delta|A_\tau|}\cap(A_\sigma)_{2\delta|A_\sigma|}\neq\emptyset\};\label{z8}\\ &&M_\sigma:={\rm card}(\mathcal{A}_\sigma);\;A_\sigma^*:=\bigcup_{\tau\in\mathcal{A}_\sigma}A_\tau,\;\;\sigma\in\Omega_k.\label{z7} \end{eqnarray} \begin{remark} The number $\delta$ is defined as above for two reasons. First, $\delta$ should be small enough so that the set $E_\omega\setminus B(x_0,2^{-1}\delta|A_\omega|)$ is large enough. Second, it will be convenient for us to estimate the $\mu$-measure of a ball $B(x_0,2^{-1}\delta|A_\omega|)$ by using (\ref{AD}). One may see Lemma \ref{lem2} below for more details. \end{remark} For $x\in\mathbb{R}$, let $[x]$ denote the largest integer not exceeding $x$. For $t>0$ and a set $A\subset\mathbb{R}^q$, we denote the closed $t$-neighborhood of $A$ by $(A)_t$. \begin{remark}\label{rem0} Let $L_0:=[2\delta^{-1}+10]$. By estimating the volumes, we know that for each $\sigma\in\Omega_k$, the set $(A_\sigma)_{2\delta|A_\sigma|}$ can be covered by $L_0$ closed balls of radii $2^{-1}\delta|A_\sigma|$ which are centered in $(A_\sigma)_{2\delta|A_\sigma|}$. This can be seen as follows. First, we consider the largest number of pairwise disjoint closed balls of radii $4^{-1}\delta|A_\sigma|$ which are centered in $(A_\sigma)_{2\delta|A_\sigma|}$; and then we double the radii of the balls and get a cover for $(A_\sigma)_{2\delta|A_\sigma|}$. In the remaining part of the paper, we always denote by $B_\sigma$ the set of the centers of such $L_0$ closed balls. \end{remark} Using the next lemma, we collect some basic facts regarding $A_\sigma^*$. These facts will allow us to adjust the number of prospective optimal points in $(A_\sigma)_{\delta|A_\sigma|}$ without affecting the points in $K_\mu\setminus A_\sigma^*$ unfavorably. One may apply Lemma 8 of \cite{Zhu:08} to obtain an optional proof. \begin{lemma}\label{lem9} Let $\sigma\in\Omega_k$ and let $\emptyset\neq\beta\subset\mathbb{R}^q$ be a finite set. Then \begin{enumerate} \item[(a1)] there exists an integer $M_0$ such that $M_\sigma\leq M_0$. \item[(a2)] for every $\tau\in\Omega_k\setminus\mathcal{A}_\sigma$ and every $x\in A_\tau$, we have \[ d(x,(\beta\setminus (A_\sigma)_{\delta|A_\sigma|})\cup B_\sigma)\leq d(x,\beta). \] \end{enumerate} \end{lemma} \begin{proof} (a1) Note that $A_\sigma^*\subset B(c_\sigma,2(1+2\delta)|A_\sigma|)$ and $E_\tau,\tau\in\mathcal{A}_\sigma$, are pairwise disjoint. By estimating the volumes, one can see that \[ M_\sigma (4^{-1}|A_\sigma|)^q\leq(2(1+2\delta)|A_\sigma|)^q. \] Hence, it is sufficient to define $M_0:=[(8(1+2\delta))^q]+1$. (a2) Let $\tau\in\Omega_k\setminus\mathcal{A}_\sigma$ and $x\in A_\tau$. Then we have $(A_\sigma)_{2\delta|A_\sigma|}\cap(A_\tau)_{2\delta|A_\tau|}=\emptyset$. Therefore, $x\in\mathbb{R}^q\setminus(A_\sigma)_{2\delta|A_\sigma|}$. We have two cases: Case 1: $d(x,\beta)=d(x,\beta\setminus(A_\sigma)_{\delta|A_\sigma|})$, then (a2) is clearly true. Case 2: $d(x,\beta)=d(x,\beta\cap(A_\sigma)_{\delta|A_\sigma|})$. We denote the boundary of a set $B$ by $\partial B$. Note that $(A_\sigma)_{2\delta|A_\sigma|}$ is compact with non-empty interior. We may select a $z_0\in \partial(A_\sigma)_{2\delta|A_\sigma|}$ such that \[ d(x,z_0)=d(x,(A_\sigma)_{2\delta|A_\sigma|})=d(x,\partial(A_\sigma)_{2\delta|A_\sigma|}) \] By the definition of $B_\sigma$, there exists some $b\in B_\sigma$ such that $d(z_0,b)\leq 2^{-1}\delta|A_\sigma|$. For every $a\in \beta\cap(A_\sigma)_{\delta|A_\sigma|}$, we have \begin{equation*} d(x,a)\geq d(x,z_0)+\delta|A_\sigma|>d(x,z_0)+d(z_0,b)\geq d(x,b). \end{equation*} Hence, $d(x,\beta\cap(A_\sigma)_{\delta|A_\sigma|})\geq d(x,b)$. \begin{eqnarray*} d(x,\beta)=d(x,\beta\cap(A_\sigma)_{\delta|A_\sigma|})\geq d(x,B_\sigma)\geq d(x,(\beta\setminus (A_\sigma)_{\delta|A_\sigma|})\cup B_\sigma)). \end{eqnarray*} This completes the proof of the lemma. \end{proof} \section{Weak estimates for measures and geometrical size of $P_a(\alpha_k)$} Let $C,t>0$. We consider compactly supported measures $\nu$ satisfying \begin{equation}\label{holder} \sup_{x\in\mathbb{R}^q}\nu(B(x,\epsilon))\leq C\epsilon^t \;\;{\rm for\; every}\; \epsilon>0. \end{equation} Without loss of generality, we may assume that $C\geq1$. As in \cite{GL:04}, we write \[ \hat{e}_k(\nu)=\log e_{k,0}(\nu)=\inf_{\alpha\in\mathcal{D}_k}I_\nu(\mathbb{R}^q,\alpha). \] The following lemma can be seen as an analogue of \cite[Lemma 2.1]{Zhu:20}. \begin{lemma}\label{lem1} Let $\nu$ be a Borel probability measure on $\mathbb{R}^q$ with compact support $K_\nu$. Assume that $|K_\nu|\leq 1$ and (\ref{holder}) is satisfied. Then for every $k\geq 2$, there exists a real number $\zeta_k>0$, which depends on $C$ and $t$ such that \[ \hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)\geq\zeta_k. \] \end{lemma} \begin{proof} Let $\alpha_{k-1}=\{a_i\}_{i=1}^{k-1}\in C_{k-1}(\nu)$. We define \begin{eqnarray*} &&\delta_{k,1}:=(4C(k-1))^{-\frac{1}{t}};\;\delta_{k,2}:=(2C(k-1))^{-\frac{1}{t}};\\ &&\delta_k:=2^{-1}\min\{\delta_{k,1},\delta_{k,2}-\delta_{k,1}\}. \end{eqnarray*} By (\ref{holder}), we have $\nu(K_\nu\setminus\bigcup_{i=1}^{k-1}B(a_i,\delta_{k,2}))\geq 2^{-1}$. Let $l_k:=[(2\delta_k^{-1}+2)^q]+1$. Note that $|K_\nu|\leq 1$. Hence, $K_\nu\setminus\bigcup_{i=1}^{k-1}B(a_i,\delta_{k,2})$ can be covered by $l_k$ closed balls $B_i (1\leq i\leq l_k$) of radii $\delta_k$ which are centered in the set $K_\nu\setminus\bigcup_{i=1}^{k-1}B(a_i,\delta_{k,2})$ (cf. Remark \ref{rem0}). Thus, there exists some ball $B_i$ such that $\nu(B_i)\geq (2l_k)^{-1}$. Let $b_i$ denote the center of $B_i$. Then \begin{eqnarray*} \hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)&\geq& I_\nu(\mathbb{R}^q,\alpha_{k-1})-I_\nu(\mathbb{R}^q,\alpha_{k-1}\cup\{b_i\})\\ &\geq&I_\nu(B_i,\alpha_{k-1})-I_\nu(B_i,\alpha_{k-1}\cup\{b_i\})\\&\geq&I_\nu(B_i,\alpha_{k-1})-I_\nu(B_i,\{b_i\})\\&\geq&\nu(B_i)(\log\delta_{k,1}-\log\delta_k) \\&\geq&(2l_k)^{-1}\log 2. \end{eqnarray*} The proof of the lemma is complete by defining $\zeta_k:=(2l_k)^{-1}\log 2$. \end{proof} Using the next lemma, we establish a lower bound for the $\nu$-measure of the elements of a VP with respect to a $k$-optimal set for $\nu$ of order zero. \begin{lemma}\label{lem11} Assume that the hypothesis of Lemma \ref{lem1} is satisfied. For each $k\geq 2$, there exists a positive real number $\underline{d}_k$ such that, for every $\alpha_k\in C_k(\nu)$ and an arbitrary VP $\{P_a(\alpha_k)\}_{a\in\alpha_k}$ with respect to $\alpha_k$, we have \[ \min_{a\in\alpha_k}\nu(P_a(\alpha_k))\geq \underline{d}_k. \] \end{lemma} \begin{proof} Let $\alpha_k\in C_k(\nu)$ and $\{P_a(\alpha_k)\}_{a\in\alpha_k}$ a VP. Let $a\in\alpha_k$ be fixed. By Theorem 2.4 of \cite{GL:04}, $\nu(P_a(\alpha_k))>0$. We define $\beta:=\alpha_k\setminus\{a\}$. Then for every $x\in\bigcup_{b\in\beta}P_b(\alpha_k)$, we have $d(x,\beta)=d(x,\alpha_k)$. Thus, \begin{eqnarray}\label{s10} \hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)&\leq& I_\nu(\mathbb{R}^q,\beta)-I_\nu(\mathbb{R}^q,\alpha_k)\nonumber\\&=&I_\nu(P_a(\alpha_k),\beta)-I_\nu(P_a(\alpha_k),\{a\}). \end{eqnarray} Note that $\sup_{x\in K_\nu}d(x,\alpha_k)\leq 2|K_\nu|\leq 2$ (cf. \cite[Lemma 5.8]{GL:04}). Therefore for every $x\in P_a(\alpha_k)\cap K_\nu$, we have, $d(x,\beta)\leq 3|K_\nu|\leq 3$. It follows that \begin{equation}\label{s8} I_\nu(P_a(\alpha_k),\beta)\leq\nu(P_a(\alpha_k))\log 3. \end{equation} Now by \cite[Lemma 3.4]{GL:04}, we have \begin{equation}\label{s9} I_\nu(P_a(\alpha_k),\{a\})\geq\frac{1}{t}\big(\nu(P_a(\alpha_k))\log\nu(P_a(\alpha_k))-C\nu(P_a(\alpha_k))\big). \end{equation} We define $h(x):=-x\log x$ for $x>0$. Then $h(x)\to 0$ as $x$ decreases to zero. Thus, there exists some $\eta_k>0$ such that $0<x<\eta_k$ implies $-x\log x<2^{-1}t\zeta_k$. Therefore, if $\mu(P_a(\alpha_k))<\eta_k$, using Lemma \ref{lem1} and (\ref{s10})-(\ref{s9}), we deduce \begin{eqnarray*} \zeta_k\leq\hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)\leq\nu(P_a(\alpha_k))\log 3+\frac{1}{2}\zeta_k+\frac{C}{t}\nu(P_a(\alpha_k)). \end{eqnarray*} Thus, we obtain $\nu(P_a(\alpha_k))\geq 2^{-1}(\log3+Ct^{-1})^{-1}\zeta_k$. It suffices to define \[ \underline{d}_k:=\min\big\{\eta_k,2^{-1}(\log3+Ct^{-1})^{-1}\zeta_k\big\}. \] \end{proof} Next, we establish an upper bound for $\nu(P_a(\alpha_k)),a\in\alpha_k$. \begin{lemma}\label{l1} Assume that the hypothesis of Lemma \ref{lem1} is satisfied. For each $k\geq 1$, there exists a positive real number $\overline{d}_k$ such that, for every $\alpha_k\in C_k(\nu)$ and an arbitrary VP $\{P_a(\alpha_k)\}_{a\in\alpha_k}$ with respect to $\alpha_k$, we have \[ \max_{a\in\alpha_k}\nu(P_a(\alpha_k))\leq \overline{d}_k. \] \end{lemma} \begin{proof} Let $\alpha_k\in C_k(\nu)$ and $a\in\alpha_k$. Set \[ \delta_{k,3}:=(2C)^{-\frac{1}{t}}\nu(P_a(\alpha_k))^{\frac{1}{t}}. \] Then we have $\delta_{k,3}<1$, since $C\geq 1$. By (\ref{holder}), we deduce that \[ \nu(B(a,\delta_{k,3}))\leq C\delta_{k,3}^t=2^{-1}\nu(P_a(\alpha_k)). \] It follows that \begin{equation}\label{z1} \nu(P_a(\alpha_k)\setminus B(a,\delta_{k,3}))\geq\nu(P_a(\alpha_k))-\nu(B(a,\delta_{k,3}))\geq\frac{1}{2}\nu(P_a(\alpha_k)). \end{equation} Let $N_k:=[(8\delta_{k,3}^{-1})^q]+3$. One can easily see that \begin{equation}\label{zsz1} N_k\leq (16\delta_{k,3}^{-1})^{q}\;\; {\rm and}\;\;N_k^{-1}\geq16^{-q}\delta_{k,3}^q. \end{equation} Note that $|P_a(\alpha_k)\cap K_\nu|\leq |K_\nu|\leq 1$. By estimating volumes, one can see that \[ (P_a(\alpha_k)\cap K_\nu)\setminus B(a,\delta_{k,3}) \] can be covered by $N_k$ closed balls $B_i (1\leq i\leq N_k)$ of radii $4^{-1}\delta_{k,3}$ which are centered in $(P_a(\alpha_k)\cap K_\nu)\setminus B(a,\delta_{k,3})$. Thus, by (\ref{z1}) and (\ref{zsz1}), there exists some ball $B_i$ such that \begin{eqnarray}\label{z3} \nu(B_i\cap P_a(\alpha_k))&\geq& \frac{1}{2N_k}\nu(P_a(\alpha_k)\geq32^{-q}\delta_{k,3}^q\nu(P_a(\alpha_k))\nonumber \\&\geq&32^{-q}(2C)^{-\frac{q}{t}}\nu(P_a(\alpha_k))^{1+\frac{q}{t}}\nonumber\\ &=:&D_1\nu(P_a(\alpha_k))^{1+\frac{q}{t}}. \end{eqnarray} Now we define $\beta:=\alpha_k\cup\{b_i\}$. Then we have the following estimate: \begin{eqnarray}\label{z2} \hat{e}_k(\nu)-\hat{e}_{k+1}(\nu)&\geq& I_\nu(\mathbb{R}^q,\alpha_k)-I_\nu(\mathbb{R}^q,\beta)\nonumber\\ &\geq& I_\nu(B_i\cap P_a(\alpha_k),\alpha_k)-I_\nu(B_i\cap P_a(\alpha_k),\beta)\nonumber\\&\geq& I_\nu(B_i\cap P_a(\alpha_k),\{a\})-I_\nu(B_i\cap P_a(\alpha_k),\{b_i\}) \end{eqnarray} By the definition of $B_i$, for every $x\in B_i$, we have \begin{equation}\label{g4} d(x,a)\geq \frac{1}{2}\delta_{k,3},\;\;d(x,b_i)\leq \frac{1}{4}\delta_{k,3}. \end{equation} Now by the proof of \cite[Lemma 5.8]{GL:04}, for every $n\geq 1$, we have \[ \hat{e}_n(\nu)-\hat{e}_{n+1}(\nu)\leq\frac{1}{n+1}\log3+C^{\frac{1}{2}}\frac{2}{t}\big(\frac{1}{n+1}\big)^{1/2}=:\chi_n. \] Using this and (\ref{z3})-(\ref{g4}), we deduce \[ \chi_k\geq\hat{e}_k(\nu)-\hat{e}_{k+1}(\nu)\geq\nu(B_i\cap P_a(\alpha_k))\log2\geq D_1\log 2\;(\nu(P_a(\alpha_k))^{1+\frac{q}{t}}. \] The proof of the lemma is complete by defining $\overline{d}_k:=\big(\chi_k (D_1\log 2)^{-1}\big)^{\frac{t}{t+q}}$. \end{proof} We end this section with an estimate for the geometrical size of the elements of a VP with respect to a $k$-optimal set $\alpha_k\in C_k(\nu)$. \begin{lemma}\label{l2} Assume that the hypothesis of Lemma \ref{lem1} is satisfied. For each $k\geq 2$, there exists a number $g_k>0$ such that, for every $\alpha_k\in C_k(\nu)$ and every VP $\{P_a(\alpha_k)\}_{a\in\alpha_k}$ with respect to $\alpha_k$ and every $a\in\alpha_k$, we have, $P_a(\alpha_k)$ contains a closed ball of radius $g_k|P_a(\alpha_k)\cap K_\nu|$ which is centered at $a$. \end{lemma} \begin{proof} Let $d_H$ denote the Hausdorff metric. We define $\phi: C_k(\nu)\mapsto\mathbb{R}$ by: \[ \phi(\alpha_k):=\min_{a\in\alpha_k}\min_{b\in\alpha_k\setminus\{a\}}d(a,b). \] We first show that $\phi$ is continuous on $C_k(\nu)$. To see this, it is sufficient to consider an arbitrary accumulation point (if any) $\alpha_k=\{a_i\}_{i=1}^k$ of $C_k(\nu)$. Assume that $\beta_{k,n}=\{b_{i,n}\}_{i=1}^k\in C_k(\nu)$ and $d_H(\beta_{k,n},\alpha_k)\to 0$ as $n\to\infty$. Without loss of generality, we assume that $\phi(\alpha_k)=d(a_1,a_2)$. Let $\eta_0:=4^{-1}\phi(\alpha_k)$. Then for every $\epsilon\in (0,\eta_0)$, there exists some $N_0\geq 1$, such that for all $n\geq N_0$, we have $d_H(\beta_{k,n},\alpha_k)<\epsilon$. Thus, for every $1\leq i\leq k$, there exists a unique $1\leq j(i)\leq k$ such that $d(b_{j(i),n},a_i)<\epsilon$. Thus, we rewrite $\beta_{k,n}$ as $\{b_{j(i),n}\}_{i=1}^k$. For $1\leq i_1\neq i_2\leq k$, by the triangle inequality, we have \begin{eqnarray}\label{ss1} d(b_{j(i_1),n},b_{j(i_2),n})&\geq& d(a_{i_1},a_{i_2})-d(a_{i_1},b_{j(i_1),n})-d(a_{i_2},b_{j(i_2),n})\nonumber\\ &\geq&d(a_1,a_2)-2\epsilon. \end{eqnarray} It follows that $\phi(\beta_{k,n})\geq \phi(\alpha_k)-2\epsilon$ for every $n\geq N_0$. Also, we have \begin{eqnarray}\label{ss2} \phi(\beta_{k,n})&\leq& d(b_{j(1),n},b_{j(2),n})\nonumber\\&\leq& d(b_{j(1),n},a_1)+d(a_1,a_2)+d(a_2,b_{j(2),n})\nonumber\\&<&\phi(\alpha_k)+2\epsilon. \end{eqnarray} From (\ref{ss1}) and (\ref{ss2}), we obtain that $|\phi(\beta_{k,n})-\phi(\alpha_k)|<2\epsilon$ for every $n\geq N_0$. It follows that $\phi(\beta_{k,n})\to\phi(\alpha_k)$ as $n\to\infty$. Thus, $\phi$ is continuous on $C_k(\nu)$. By \cite[Theorem 2.5]{GL:04}, $C_k(\nu)$ is $d_H$-compact. Thus, by the continuity of $\phi$, there exist some $\alpha_{k,1}\in C_k(\nu)$, such that \[ \underline{\lambda}_k(\nu):=\min_{\alpha\in C_k(\nu)}\phi(\alpha)=\phi(\alpha_{k,1}). \] Clearly, we have $\underline{\lambda}_k(\nu)>0$. Now let $\alpha_k=\{a_i\}_{i=1}^k$ be an arbitrary $k$-optimal set for $\nu$ and $\{P_a(\alpha_k)\}_{a\in\alpha_k}$ an arbitrary VP with respect to $\alpha_k$. Then \begin{eqnarray*} B(a,3^{-1}\underline{\lambda}_k(\nu))\subset P_a(\alpha_k)\;{\rm and}\;|P_a(\alpha_k)\cap K_\nu|\leq|K_\nu|\leq 1. \end{eqnarray*} Let $\eta_k,\zeta_k$ be as defined in the preceding lemmas. Next, we establish a lower bound for $\underline{\lambda}_k(\nu)$ in terms of $\eta_k$ and $\zeta_k$ which depend only on $C,t,k,q$. Set \[ B_k:=\min\big\{\frac{t\zeta_k}{4C},\eta_k\big\};\;\epsilon_k:=\min\big\{\big(\frac{B_k}{2C}\big)^{1/t},2^{-1}\big\}; \;s_k:=\frac{1}{2}(e^{\frac{\zeta_k}{4}}-1)\epsilon_k. \] Note that $\zeta_k<1$, we have that $s_k<\epsilon_k$. We are going to show that $\underline{\lambda}_k(\nu)\geq s_k$. Suppose that $\underline{\lambda}_k(\nu)=d(a_1,a_2)<s_k$, we deduce a contradiction. Write \[ A_{\epsilon_k}:=B(a_1,\epsilon_k)\cap P_{a_1}(\alpha_k);\;\;\beta:=\alpha_k\setminus\{a_1\}. \] Then we have $\nu(A_{\epsilon_k})\leq C\epsilon_k^t<\min\{\eta_k,(4C)^{-1}t\zeta_k\}$. By the proof of Lemma \ref{lem11}, we know that $-\frac{1}{t}(\nu(A_{\epsilon_k})\log \nu(A_{\epsilon_k})<2^{-1}\zeta_k$. Further, one can easily see that for every $b\in\beta$ and $x\in P_b(\alpha_k)$, we have $d(x,\alpha_k)=d(x,b)=d(x,\beta)$. Thus, \begin{eqnarray}\label{append1} \hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)&\leq& I_\nu(\mathbb{R}^q,\beta)-I_\nu(\mathbb{R}^q,\alpha_k)\nonumber\\&=&I_\nu(P_{a_1}(\alpha_k),\beta)-I_\nu(P_{a_1}(\alpha_k),\alpha_k). \end{eqnarray} Note that for $x\in A_{\epsilon_k}$, we have $d(x,\alpha_k)=d(x,a_1)$ and \[ d(x,\beta)\leq d(x,a_2)\leq d(x,a_1)+d(a_1,a_2)<\epsilon_k+s_k<2\epsilon_k<1. \] Using this and \cite[Lemma 3.6]{GL:04}, we deduce \begin{eqnarray*} \Delta_1:&=&I_\nu(A_{\epsilon_k},\beta)-I_\nu(A_{\epsilon_k},\alpha_k)\\&\leq&I_\nu(A_{\epsilon_k},\{a_2\})-I_\nu(A_{\epsilon_k},\{a_1\}) \\&\leq&\nu(A_{\epsilon_k})\log(2\epsilon_k)-\frac{1}{t}\big(\nu(A_{\epsilon_k})\log \nu(A_{\epsilon_k})-C\nu(A_{\epsilon_k})\big)\\ &\leq&-\frac{1}{t}\nu(A_{\epsilon_k})\log \nu(A_{\epsilon_k})+\frac{C}{t}\nu(A_{\epsilon_k})\\ &<&2^{-1}\zeta_k+4^{-1}\zeta_k. \end{eqnarray*} For every $x\in P_{a_1}(\alpha_k)\setminus A_{\epsilon_k}=:B_{\epsilon_k}$, we have \[ d(x,\alpha_k)=d(x,a_1)>\epsilon_k,\;d(x,\beta)\leq d(x,a_2)<d(x,a_1)+s_k. \] From this, we deduce that \[ \frac{d(x,\beta)}{d(x,\alpha_k)}\leq\frac{d(x,a_2)}{d(x,a_1)}\leq \frac{d(x,a_1)+s_k}{d(x,a_1)}=1+\frac{s_k}{\epsilon_k}<e^{\frac{\zeta_k}{4}}. \] By the preceding inequality and the fact that $\nu(B_{\epsilon_k})<1$, we obtain \begin{eqnarray*} \Delta_2:=I_\nu(B_{\epsilon_k},\beta)-I_\nu(B_{\epsilon_k},\alpha_k)\leq\nu(B_{\epsilon_k})\log e^{\frac{\zeta_k}{4}}<\frac{\zeta_k}{4}. \end{eqnarray*} From this and (\ref{append1}), we deduce that $\hat{e}_{k-1}(\nu)-\hat{e}_k(\nu)\leq\Delta_1+\Delta_2<\zeta_k$, contradicting Lemma \ref{lem1}. Thus, $\underline{\lambda}_k(\nu)\geq s_k$ and the proof of the lemma is complete by defining $g_k=3^{-1}s_k$. \end{proof} \section{Auxiliary measures and auxiliary integers} \subsection{Some subsets of $A_\omega^*$ and auxiliary measures} For a finite subset $\alpha$ of $\mathbb{R}^q$, let $W(a|\alpha),a\in\alpha$, be as defined in (\ref{vregion}). Let $\delta$ be as defined in (\ref{g3}). Let $\omega,\sigma\in\Omega_k$ with $\sigma\neq\omega$. Let $x_0\in A_\omega\cap K_\mu$. The following three types of subsets of $A_\omega^*$ will be considered in the characterization for the optimal sets for $\mu$: \begin{eqnarray*} &&D_{\omega,1}^{(\alpha)}:=E_\omega\cup\bigg(\bigcup_{a\in\alpha\cap (A_\omega)_{\delta|A_\omega|}}(W(a|\alpha)\cap A_\omega^*)\bigg)\setminus B(x_0,2^{-1}\delta|A_\omega|);\\ &&D_{\omega,2}^{(\alpha)}(\sigma):=E_\omega\cup\bigg(\bigcup_{a\in\alpha\cap (A_\omega)_{\delta|A_\omega|}}(W(a|\alpha)\cap A_\omega^*)\bigg)\setminus E_\sigma;\\ &&D_{\omega,3}^{(\alpha)}:=E_\omega\cup\bigg(\bigcup_{a\in\alpha\cap (A_\omega)_{\delta|A_\omega|}}(W(a|\alpha)\cap A_\omega^*)\bigg). \end{eqnarray*} If no confusion arises, we write $D_{\omega,i}$ for $D_{\omega,i}^{(\alpha)}$ and write $D_{\omega,2}$ for $D_{\omega,2}^{(\alpha)}(\sigma)$. For $\omega\in\Omega_k$, recall that $c_\omega$ is the center of $E_\omega$. We define \[ D_{\omega,4}:=B(c_\omega,(2^{-1}-\delta)|E_\omega|)\subset E_\omega. \] For $1\leq i\leq 4$, let $\mu(\cdot|D_{\omega,i})$ denote the conditional measure of $\mu$ on $D_{\omega,i}$: \begin{equation}\label{auxmeasure} \mu(\cdot|D_{\omega,i})(A)=\frac{\mu(A\cap D_{\omega,i})}{\mu(D_{\omega,i})},\;A\;{\rm is\;a\; Borel\;set}. \end{equation} Let $f_{D_{\omega,i}}$ be a similarity mapping of similarity ratio $|D_{\omega,i}|$ and define \begin{equation} \nu_{D_{\omega,i}}:=\mu(\cdot|D_{\omega,i})\circ f_{D_{\omega,i}},\;K_{\nu_{D_{\omega,i}}}:={\rm supp}(\nu_{D_{\omega,i}}). \end{equation} Then $\nu_{D_{\omega,i}}$ is a probability measure satisfying $|K_{\nu_{D_{\omega,i}}}|\leq 1$. In a similar manner, we define the measures $\nu_{E_\sigma},\sigma\in\Omega_k$. We have \begin{lemma}\label{lem2} There exists a constant $C$ such that, for $B\in \{D_{\omega,i}\}_{i=1}^4\cup\{E_\omega\}$, we have $\sup_{x\in\mathbb{R}^q}\nu_B(B(x,\epsilon))\leq C\epsilon^{s_0}$ for every $\epsilon>0$. \end{lemma} \begin{proof} By the definition, $D_{\omega,i}\subset A_\omega^*$ and $A_\omega^*\subset B(c_\omega, 2(1+2\delta)|A_\omega|)$. Hence, \begin{equation}\label{g6} |D_{\omega,i}|\leq |A_\omega^*|\leq 4(1+2\delta)|A_\omega|,\;1\leq i\leq 3. \end{equation} Since the diameter of $B(x_0,2^{-1}\delta|A_\omega|)$ is equal to $\delta|A_\omega|$, we have \begin{equation}\label{zz1} (1-2\delta)|E_\omega|\leq\bigg|E_\omega\setminus B\big(x_0,2^{-1}\delta|A_\omega|\big)\bigg|\leq|D_{\omega,1}|\leq4(1+2\delta)|A_\omega|. \end{equation} It follows that $|E_\omega|=2^{-1}|A_\omega|\geq(8(1+2\delta))^{-1}|D_{\omega,1}|$. This and (\ref{AD}) yield that \begin{eqnarray*} \mu(D_{\omega,1})&\geq&\mu(E_\omega\setminus B\big(x_0,2^{-1}\delta|A_\omega|)\\&\geq&\mu(E_\omega)-\mu(B\big(x_0,2^{-1}\delta|A_\omega|))\\&\geq& C_1(2^{-1}|E_\omega|)^{s_0}-C_2(2^{-1}\delta|A_\omega|)^{s_0}\\&\geq&C_1(2^{-1}|E_\omega|)^{s_0}-C_1(16^{-1}|E_\omega|)^{s_0} \\&=&C_1(2^{-s_0}-16^{-s_0})|E_\omega|^{s_0}\\&\geq&C_1(2^{-s_0}-16^{-s_0})(8(1+2\delta))^{-s_0}|D_{\omega,1}|^{s_0}. \end{eqnarray*} We write $\xi_1:=C_1(2^{-s_0}-16^{-s_0})(8(1+2\delta))^{-s_0}$. On the other hand, by (\ref{zz1}), \begin{eqnarray*} \mu(D_{\omega,1})&\leq&\mu(A_\omega^*)\leq C_2(2(1+2\delta)|A_\omega|)^{s_0}\\&\leq& C_2(4(1+2\delta))^{s_0}(1-2\delta)^{-s_0}|D_{\omega,1}|^{s_0}=:\xi_2|D_{\omega,1}|)^{s_0}. \end{eqnarray*} Note that for distinct words $\sigma,\omega\in\Omega_k$, we have $E_\sigma\cap E_\omega=\emptyset$. Thus, for $i=2,3$, we have $E_\omega\subset D_{\omega,i}\subset A_\omega^*$. Using these facts and (\ref{g6}), we deduce \begin{eqnarray*} \mu(D_{\omega,i})&\leq&\mu(A_\omega^*)\leq C_2(2(1+2\delta)|A_\omega|)^{s_0}\\&\leq& C_24^{s_0}(1+2\delta)^{s_0}|E_\omega|^{s_0}\leq C_24^{s_0}(1+2\delta)^{s_0}|D_{\omega,i}|^{s_0}=:\xi_3|D_{\omega,i}|^{s_0};\\ \mu(D_{\omega,i})&\geq&\mu(E_\omega)\geq C_1(2^{-1}|E_\omega|)^{s_0}\geq C_1 4^{-s_0}|A_\omega|^{s_0}\\ &\geq& C_1 4^{-s_0}(4(1+2\delta))^{-s_0}|D_{\omega,i}|^{s_0}=:\xi_4|D_{\omega,i}|^{s_0}. \end{eqnarray*} For every $\omega\in\Omega_k$, we have \begin{eqnarray*} C_1(2^{-1}|E_\omega|)^{s_0})\leq\mu(E_\omega)\leq C_2(2^{-1}|E_\omega|)^{s_0});\\ C_1(2^{-1}|D_{\omega,4}|)^{s_0})\leq\mu(D_{\omega,4})\leq C_2(2^{-1}|D_{\omega,4}|)^{s_0}) \end{eqnarray*} We define $\xi:=\max\{\xi_1^{-1},\xi_4^{-1},\xi_2,\xi_3\}$. Then by the above analysis, we obtain \begin{equation}\label{xi} \xi^{-1}|B|^{s_0}\leq\mu(B)\leq \xi|B|^{s_0}. \end{equation} for $B\in\{D_{\omega,i}\}_{i=1}^4\cup\{E_\omega\}$. Thus, the lemma follows from \cite[Lemma 2.5]{Zhu:20}. \end{proof} \begin{remark}\label{rem2} Let $\xi$ be as defined in (\ref{xi}). For $1\leq i\leq 3$, we have \begin{eqnarray*} \mu(D_{\omega,i})&\leq&\xi|D_{\omega,i}|^{s_0}\\&\leq& \xi|A_\omega^*|^{s_0}\leq \xi(4(1+2\delta)|A_\omega|)^{s_0}\\&\leq& \xi(8(1+2\delta))^{s_0}|E_\omega|^{s_0}\\&\leq&\xi(8(1+2\delta))^{s_0}C_1^{-1}(2^{-1}-\delta)^{-s_0}\min_{\sigma\in\Omega_k}\mu(D_{\sigma,4}). \end{eqnarray*} Let $\zeta:=\xi(8(1+2\delta))^{s_0}C_1^{-1}(2^{-1}-\delta)^{-s_0}$. Then for every $\sigma\in\Omega_k$, we have \[ \max_{1\leq i\leq 3}\mu(D_{\omega,i})\leq\zeta\mu(D_{\sigma,4})\leq \zeta\mu(E_\sigma). \] \end{remark} In the following we denote by $f_B$ the similarity mapping in the definition of the measure $\nu_B$. The subsequent two lemmas will be very important for the characterization for the optimal sets. One of them is a consequence of the definition of the auxiliary measures $\nu_B$, and the other is based on Lemma \ref{lem9}. \begin{lemma}\label{l3} Let $B\in\{D_{\omega,i}\}_{i=1}^4\cup\{E_\omega\}$. Let $\alpha$ be a non-empty finite subset of $\mathbb{R}^q$ with ${\rm card}(\alpha)=l_\alpha$. Then $I_\mu(B,\alpha)\geq\mu(B)\log |B|+\mu(B)\hat{e}_{l_\alpha}(\nu_B)$, and equality holds if $f_B^{-1}(\alpha)\in C_{l_\alpha}(\nu_B)$. \end{lemma} \begin{proof} By the definition of $\nu_B$ (cf. (\ref{auxmeasure})), we have \begin{eqnarray*} I_\mu(B,\alpha)&=&\int_{B}\log d(x,\alpha)d\mu(x)\\ &=&\mu(B)\int_{B}\log d(x,\alpha)d\mu(\cdot|B)(x)\\&=&\mu(B)\int_{B}\log d(x,\alpha)d\nu_B\circ f_B^{-1}(x)\\ &=&\mu(B)\log |B|+\mu(B)\int_{f_B^{-1}(B)}\log d(x,f^{-1}_B(\alpha))d\nu_B(x)\\&\geq&\mu(B)\log |B|+\mu(B)\hat{e}_{l_\alpha}(\nu_B). \end{eqnarray*} This completes the proof of the lemma. \end{proof} Let $\omega,\tau,\sigma\in\Omega_k$ with $\sigma\neq\omega$ and $\tau\in\Omega_k\setminus\mathcal{A}_\omega$. Let $D_{\omega,i},1\leq i\leq 3$, be as defined above. We write \[ F_{\omega,i}=\left\{\begin{array}{ll}D_{\omega,1}\cup B(x_0,2^{-1}\delta|A_\omega|)&\;\;\;\;i=1\\ D_{\omega,2}\cup E_\sigma&\;\;\;\;i=2 \\D_{\omega,3}\cup D_{\tau,4}&\;\;\;\;i=3\end{array}\right.. \] \begin{lemma}\label{compare2} Let $\alpha,\gamma$ be non-empty finite subsets of $\mathbb{R}^q$. Let $B_\omega$ be as defined in Remark \ref{rem0}. We define $\beta:=(\alpha\setminus(A_\omega)_{\delta|A_\omega|})\cup B_\omega\cup\gamma$. Then \[ I_\mu(\mathbb{R}^q\setminus F_{\omega,i},\beta)\leq I_\mu(\mathbb{R}^q\setminus F_{\omega,i},\alpha),\;\;1\leq i\leq 3. \] \end{lemma} \begin{proof} Let $1\leq i\leq 3$ be fixed. By the definition of $D_{\omega,i}$ and $F_{\omega,i}$, we have \[ \{x\in A_\omega^*: d(x,\alpha)=d(x,\alpha\cap(A_\sigma)_{\delta|A_\sigma|})\}\subset F_{\omega,i}. \] Therefore, for every $x\in \mathbb{R}^q\setminus F_{\omega,i}$, we have the following two cases: \begin{enumerate} \item[(b1)] $x\in A_\omega^*$ and $d(x,\alpha)=d(x,\alpha\setminus(A_\sigma)_{\delta|A_\sigma|})$; then clearly $d(x,\beta)\leq d(x,\alpha)$; \item[(b2)] $x\in K_\mu\setminus A_\omega^*$. Note that $K_\mu\subset\bigcup_{\tau\in\Omega_k}A_\tau$. Thus, $x\in A_\tau$ for some $\tau\in\Omega_k\setminus\mathcal{A}_\omega$. By Lemma \ref{lem9} (a2), we also have, $d(x,\beta)\leq d(x,\alpha)$. \end{enumerate} Thus, $d(x,\beta)\leq d(x,\alpha)$ for every $x\in \mathbb{R}^q\setminus F_{\omega,i}$, which implies the lemma. \end{proof} \subsection{Selection of some auxiliary integers} Let $L_0$ be as defined in Remark \ref{rem0}. We define \begin{eqnarray*} L_1:=[(2\delta^{-1}+1)^q]+1,\;\;L_2:=6^q;\;\;n_0:=[(4\delta^{-1}+1)^q]+1. \end{eqnarray*} \begin{remark}\label{rem4} By estimating the volumes, one can see the following facts: \begin{enumerate} \item[(c1)] for each $\sigma\in\Omega_k$, the set $E_\sigma$ can be covered by $L_1$ closed balls of radii $2^{-1}\delta|E_\sigma|$ which are centered in $E_\sigma$. We denote by $\gamma_{E_\sigma}$ the set of the centers of such $L_1$ balls. \item[(c2)] for $x\in A_\sigma\cap K_\mu$, the ball $B(x,2^{-1}\delta|A_\sigma|)$ can always be covered by $L_2$ closed balls of radii $4^{-1}\delta|A_\sigma|$ which are centered in $B(x,2^{-1}\delta|A_\sigma|)$. We will denote by $G_x$ the set of the centers of such $L_2$ closed balls. \item[(c3)]$A_\sigma$ can be covered by $n_0$ closed balls of radii $4^{-1}\delta|A_\sigma|$ which are centered in $A_\sigma$. We denote by $H_\sigma$ the set of the centers of such $n_0$ balls. \end{enumerate} \end{remark} With the above preparations, we are able to define an integer $n_1$ which will be used to give a lower estimate for the number of optimal points in $(A_\sigma)_{\delta|A_\sigma|}$. \begin{lemma}\label{lem10} Let $\zeta$ be as defined in Remark \ref{rem2}. There exists a smallest integer $n_1$ such that for every $\omega\in\Omega_k$ and $n\geq n_1$, we have \[ \hat{e}_{n-L_0-L_2}(\nu_{D_{\omega,2}})-\hat{e}_{n+L_1}(\nu_{D_{\omega,2}})<\zeta^{-1}C_1C_2^{-1}\delta^{s_0}\log 2. \] \end{lemma} \begin{proof} By Lemma \ref{lem2} and \cite[Lemma 5.8]{GL:04}, we deduce, \begin{eqnarray*} &&\lim_{n\to\infty}(\hat{e}_{n-L_0-L_2}(\nu_{D_{\omega,2}})-\hat{e}_{n+L_1}(\nu_{D_{\omega,2}}))\\ &&\;\;\;\;\;\;\;\;=\sum_{h=-(L_0+L_2)}^{L_1-1}\lim_{n\to\infty}(\hat{e}_{n+h}(\nu_{D_{\omega,2}})-\hat{e}_{n+h+1}(\nu_{D_{\omega,2}}))=0. \end{eqnarray*} This implies the lemma. \end{proof} By \cite[Lemma 2.1]{Zhu:20}, there exists an integer $N$ such that $\phi_{k+1}\leq N\phi_k$. Next, we select three more integers $n_2,n_3, n_4$. These integers will be used to establish an upper bound for the number of optimal points in $(A_\sigma)_{\delta|A_\sigma|}$. \begin{lemma}\label{lem6} Let $\zeta$ and $M_0$ be as defined in Remark \ref{rem2} and Lemma \ref{lem9}. Then (d1) there exists a smallest integer $n_2>n_1+L_0+L_1$, such that for every $n\geq n_2$, $\sigma,\omega\in\Omega_k$, the following holds: \[ \hat{e}_{n-L_0-n_1-L_1}(\nu_{D_{\omega,2}})-\hat{e}_{n+L_1}(\nu_{D_{\omega,2}}) <\zeta^{-1}\big(\hat{e}_{n_1+L_1-1}(\nu_{E_\sigma})-\hat{e}_{n_1+L_1}(\nu_{E_\sigma})\big); \] (d2) let $n_3:=(n_2+n_0)N$; there exists a smallest integer $n_4>M_0n_3+L_0+L_1$, such that for $n\geq n_4$ and every pair $\sigma,\omega\in\Omega_k$, the following holds: \[ \hat{e}_{n-L_0-n_3-L_1}(\nu_{D_{\sigma,3}})-\hat{e}_{n+L_1}(\nu_{D_{\sigma,3}}) <\zeta^{-1}\big(\hat{e}_{n_3+L_1-1}(\nu_{D_{\omega,4}})-\hat{e}_{n_3+L_1}(\nu_{D_{\omega,4}})\big). \] \end{lemma} \begin{proof} This is a consequence of \cite[Lemma 5.8]{GL:04} and Lemmas \ref{lem1} and \ref{lem2}. \end{proof} \section{A characterization for the $n$-optimal sets} Our first lemma in this section is analogous to \cite[Lemma 4.1]{Zhu:20}. \begin{lemma}\label{lc} We have $L_c:={\rm card}(\alpha_n\setminus\bigcup_{\sigma\in\Omega_k}(A_\sigma)_{\delta|A_\sigma|})\leq n_0\phi_k$. \end{lemma} \begin{proof} Assume that $L_c>n_0\phi_k$. Let $H_\sigma$ be as defined in Remark \ref{rem4} (c3). Set \[ \beta:=\bigg(\alpha_n\cap \bigcup_{\sigma\in\Omega_k}(A_\sigma)_{\delta|A_\sigma|}\bigg)\cup\bigg(\bigcup_{\sigma\in\Omega_k}H_\sigma\bigg). \] Then ${\rm card}(\beta)\leq n$. For $x\in K_\mu\subset\bigcup_{\sigma\in\Omega_k}A_\sigma$, we have $d(x,\beta)\leq d(x,\alpha_n)$. We choose an arbitrary $x\in K_\mu $ with $d(x,\alpha_n)=d(x,\alpha_n\setminus\bigcup_{\sigma\in\Omega_k}(A_\sigma)_{\delta|A_\sigma|})$. Then we have, $d(x,\alpha_n)>\delta|A_\sigma|$ for some $\sigma\in\Omega_k$. Thus, for every $y\in B_x:=B(x,4^{-1}\delta|A_\sigma|)$, we have $d(y,\alpha_n)\geq \frac{3}{4}\delta|A_\sigma|$. Hence, \begin{eqnarray*} I_\mu(\mathbb{R}^q,\alpha_n)-I_\mu(\mathbb{R}^q,\beta)&\geq& I_\mu(B_x,\alpha_n)-I_\mu(B_x,\beta)\\&\geq&\mu(B_x)\big(\log(\frac{3}{4}\delta|A_\sigma|)-\log(\frac{1}{4}\delta|A_\sigma|)\big)\\&=&\mu(B_x)\log 3>0. \end{eqnarray*} It follows that $I_\mu(\mathbb{R}^q,\alpha_n)>I_\mu(\mathbb{R}^q,\beta)$, contradicting the optimality of $\alpha_n$. \end{proof} For every $n\geq (n_0+n_2)\phi_{k_0}$, there exists a unique $k\geq k_0$, such that \begin{equation}\label{g2} (n_0+n_2)\phi_k\leq n<(n_0+n_2)\phi_{k+1}. \end{equation} Recall that $n_3=(n_0+n_2)N$. By \cite[Lemma 2.1]{Zhu:20}, we have \begin{equation}\label{ncomp} (n_0+n_2)\phi_k\leq n<(n_0+n_2)N\phi_k=n_3\phi_k. \end{equation} From now on, we assume that $n,k$ satisfy (\ref{g2}). We fix an $\alpha_n\in C_n(\mu)$ and an arbitrary VP $\{P_a(\alpha_n)\}_{a\in\alpha_n}$. We write $D_{\omega,i}$ for $D_{\omega,i}^{(\alpha_n)}$. We define \[ L_\sigma:={\rm card}(\alpha_n\cap(A_\sigma)_{\delta|A_\sigma|}),\;\sigma\in\Omega_k. \] Using the subsequent two lemmas, we establish a lower bound for $L_\sigma$. \begin{lemma}\label{key1} Let $\omega\in\Omega_k$ and $B\in\{D_{\omega,i}\}_{i=1}^3\cup\{E_\omega\}$. Then \[ I_\mu(B,\alpha_n)\geq \mu(B)(\log|B|+\hat{e}_{L_\omega+L_1}(\nu_B)). \] \end{lemma} \begin{proof} We divide $B$ into two parts: \[ B(1):=\{x\in B:d(x,\alpha_n)=d(x,\alpha_n\cap(A_\omega)_{\delta|A_\omega|)}\};\;B(2):=B\setminus B(1). \] By the definition, we have $B(2)\subset E_\omega$. Let $\gamma_{E_\omega}$ be as defined in Remark \ref{rem4}. We define $\gamma(\omega):=\big(\alpha_n\cap(A_\omega)_{\delta|A_\omega|}\big)\cup \gamma_{E_\omega}$. Then ${\rm card}(\gamma(\omega))\leq L_\omega+L_1$ and clearly $d(x,\alpha_n)\geq d(x,\gamma(\omega))$ for every $x\in B(1)$. For $x\in B(2)$, we have \[ d(x,\alpha_n))>\delta|A_\omega|=2\delta|E_\omega|>d(x,\gamma_{E_\omega})\geq d(x,\gamma(\omega)). \] Thus, for every $x\in B$, we have $d(x,\alpha_n)\geq d(x,\gamma(\omega))$. Thus, by Lemma \ref{l3}, \begin{eqnarray*} I_\mu(B,\alpha_n)\geq I_\mu(B,\gamma(\omega)) \geq\mu(B)(\log|B|+\hat{e}_{L_\omega+L_1}(\nu_B)). \end{eqnarray*} This completes the proof of the lemma. \end{proof} Next, we give a lower bound for $\min\limits_{\sigma\in\Omega_k}L_\sigma$. \begin{lemma}\label{lem7} For every $\sigma\in\Omega_k$, we have $L_\sigma\geq n_1$. \end{lemma} \begin{proof} Assume that $L_\sigma<n_1$ for some $\sigma\in\Omega_k$. We deduce a contradiction. By the assumption and Lemma \ref{lc}, we obtain \[ \sum_{\tau\in\Omega_k\setminus\{\sigma\}}L_\tau>n-L_c-n_1\geq(n_2+n_0)\phi_k-n_0\phi_k-n_1>(\phi_k-1)n_2. \] Hence, there exists some $\omega\in\Omega_k\setminus\{\sigma\}$ such that $L_\omega>n_2$. We consider \[ D_{\omega,2}=E_\omega\cup\bigg(\bigcup_{a\in\alpha_n\cap (A_\omega)_{\delta|A_\omega|}}(W(a|\alpha_n)\cap A_\omega^*)\bigg)\setminus E_\sigma. \] Note that it is possible that $E_\sigma\cap (A_\omega)_{\delta|A_\omega|}=\emptyset$. Let \begin{eqnarray*} &&\gamma_{L_\omega-L_0-n_1-L_1}(D_{\omega,2})\in C_{L_\omega-L_0-n_1-L_1}(\nu_{D_{\omega,2}}),\;\gamma_{n_1+L_1}(E_\sigma)\in C_{n_1+L_1}(\nu_{E_\sigma}); \\&&\beta:=\big(\alpha_n\setminus (A_\omega)_{\delta|A_\omega|}\big)\cup B_\omega\cup f_{D_{\omega,2}}(\gamma_{L_\omega-L_0-n_1-L_1}(D_{\omega,2}))\cup f_{E_\sigma}(\gamma_{n_1+L_1}(E_\sigma)). \end{eqnarray*} Then by applying Lemma \ref{compare2} to $F_{\omega,2}=D_{\omega,2}\cup E_\sigma$, we obtain \begin{equation}\label{s4} I_\mu(\mathbb{R}^q\setminus F_{\omega,2},\beta)\leq I_\mu(\mathbb{R}^q\setminus F_{\omega,2},\alpha_n). \end{equation} Next, we focus on the sets $D_{\omega,2}$ and $E_\sigma$. By the assumption, we have $L_\sigma\leq n_1-1$. Hence, by applying Lemmas \ref{l3} and \ref{key1} with $B=E_\sigma$, we deduce \begin{eqnarray} \Delta_{E_\sigma}:&=&I_\mu(E_\sigma,\alpha_n)-I_\mu(E_\sigma,\beta)\nonumber\\ &\geq&I_\mu(E_\sigma,\alpha_n)-I_\mu(E_\sigma,f_{E_\sigma}(\gamma_{n_1+L_1}(E_\sigma)))\nonumber\\ &\geq&\mu(E_\sigma)\big(\hat{e}_{n_1+L_1-1}(\nu_{E_\sigma})-\hat{e}_{n_1+L_1}(\nu_{E_\sigma})\big). \end{eqnarray} On the other hand, we apply Lemmas \ref{l3} and \ref{key1} with $B=D_{\omega,2}$, we have \begin{eqnarray} \Delta_{D_{\omega,2}}:&=&I_\mu(D_{\omega,2},\beta)-I_\mu(D_{\omega,2},\alpha_n)\nonumber\\ &\leq&I_\mu(D_{\omega,2},f_{D_{\omega,2}}(\gamma_{L_\omega-L_0-n_1-L_1}(D_{\omega,2})))-I_\mu(D_{\omega,2},\alpha_n)\nonumber\\ &\leq&\mu(D_{\omega,2})\big(\hat{e}_{L_\omega-L_0-n_1-L_1}(\nu_{D_{\omega,2}})-\hat{e}_{L_\omega+L_1}(\nu_{D_{\omega,2}})\big). \end{eqnarray} Note that $L_\omega>n_2$. By Lemma \ref{lem6} (d1) and Remark \ref{rem2}, we obtain that $\Delta_{E_\sigma}>\Delta_{D_{\omega,2}}$. This and (\ref{s4}), yield that $I_\mu(\mathbb{R}^q,\alpha_n)>I_\mu(\mathbb{R}^q,\beta)$, contradicting the optimality of $\alpha_n$. \end{proof} Our next lemma is very helpful for us to characterize the geometrical structure of an optimal VP. \begin{lemma}\label{lem8} For every $\sigma\in\Omega_k$, the following holds: \[ \sup_{x\in A_\sigma\cap K_\mu}d(x,\alpha_n)\leq \delta|A_\sigma|. \] \end{lemma} \begin{proof} Assume that for some $\sigma\in\Omega_k$ and some $x_0\in A_\sigma\cap K_\mu$, we have, $d(x_0,\alpha_n)>\delta|A_\sigma|$. We will deduce a contradiction. By the assumption, for $x\in B(x_0,2^{-1}\delta|A_\sigma|)$, we have $d(x,\alpha_n)>2^{-1}\delta|A_\sigma|$. Let $G_{x_0}$ be as defined in Remark \ref{rem4} (c2). We consider \[ D_{\sigma,1}=E_\sigma\cup\bigg(\bigcup_{b\in\alpha_n\cap(A_\sigma)_{\delta|A_\sigma|}}(W_b(\alpha_n)\cap A_\sigma^*)\bigg)\setminus B(x_0,2^{-1}\delta|A_\sigma|). \] Let $\gamma_{L_\sigma-L_0-L_2}(D_{\sigma,1})\in C_{L_\sigma-L_0-L_2}(\nu_{D_{\sigma,1}})$. We define \begin{eqnarray*} \gamma:=\big(\alpha_n\setminus(A_\sigma)_{\delta|A_\sigma|}\big)\cup G_{x_0}\cup B_\sigma\cup f_{D_{\sigma,1}}(\gamma_{L_\sigma-L_0-L_2}(D_{\sigma,1})). \end{eqnarray*} Then by applying Lemma \ref{compare2} to $F_{\sigma,1}=D_{\sigma,1}\cup B(x_0,2^{-1}\delta|A_\sigma|)$, we obtain \begin{equation}\label{s1} I_\mu(\mathbb{R}^q\setminus F_{\sigma,1},\gamma)\leq I_\mu(\mathbb{R}^q\setminus F_{\sigma,1},\alpha_n). \end{equation} For every $x\in B(x_0,2^{-1}\delta|A_\sigma|)$, we have $d(x,\gamma)\leq 4^{-1}\delta|A_\sigma|$. It follows that \begin{eqnarray}\label{s2} \Delta_{x_0}:&=&I_\mu(B(x_0,2^{-1}\delta|A_\sigma|),\alpha_n)-I_\mu(B(x_0,2^{-1}\delta|A_\sigma|),\gamma)\nonumber\\&\geq&\mu(B(x_0,2^{-1}\delta|A_\sigma|))\log 2\nonumber\\&\geq&C_1(2^{-1}\delta|A_\sigma|)^{s_0}\log 2\nonumber\\&\geq& C_1C_2^{-1} \delta^{s_0}\mu(A_\sigma)\log2. \end{eqnarray} By applying Lemmas \ref{l3} and \ref{key1} with $B=D_{\sigma,1}$, we have \begin{eqnarray}\label{s3} \Delta_{D_{\sigma,1}}:&=&I_\mu(D_{\sigma,1},\gamma)-I_\mu(D_{\sigma,1},\alpha_n) \nonumber\\&\leq&I_\mu(D_{\sigma,1},f_{D_{\sigma,1}}(\gamma_{L_\sigma-L_0-L_2}(D_{\sigma,1})))-I_\mu(D_{\sigma,1},\alpha_n)\nonumber\\&\leq& \mu(D_{\sigma,1})\big(\hat{e}_{L_\sigma-L_0-L_2})(\nu_{D_{\sigma,1}})-\hat{e}_{L_\sigma+L_1}(\nu_{D_{\sigma,1}})\big). \end{eqnarray} From Lemma \ref{lem7}, we know that $L_\sigma\geq n_1$. Thus, by Lemmas \ref{lem2}, \ref{lem10} and Remark \ref{rem2}, we obtain that $\Delta_{x_0}>\Delta_{D_{\sigma,1}}$. This and (\ref{s1}) imply that $I_\mu(\mathbb{R}^q,\gamma)<I_\mu(\mathbb{R}^q,\alpha_n)$, which contradicts the optimality of $\alpha_n$. \end{proof} \begin{remark}\label{rem5} By Lemma \ref{lem8}, we obtain that, whenever $n\geq(n_0+n_2)\phi_k$, we have $L_c={\rm card}(\alpha_n\setminus\bigcup_{\sigma\in\Omega_k}(A_\sigma)_{\delta|A_\sigma|})=0$. Therefore, we have \[ \alpha_n\subset\bigcup_{\sigma\in\Omega_k}(A_\sigma)_{\delta|A_\sigma|}. \] \end{remark} \begin{lemma}\label{lemma1} Let $\emptyset\neq\beta\subset\mathbb{R}^q$ be a finite set and $l_\beta(\omega):={\rm card}(\beta\cap E_\omega)$ for $\omega\in\Omega_k$. Then the following estimate holds: \[ I_\mu(D_{\omega,4},\beta)\geq\mu(D_{\omega,4})(\log|D_{\omega,4}|+\hat{e}_{l_\beta(\omega)+L_1}). \] \end{lemma} \begin{proof} Let $\gamma_{E_\omega}$ be as defined in Remark \ref{rem4} (c1). Since $D_{\omega,4}\subset E_\omega$, we have $d(x,\gamma_{E_\omega})\leq2^{-1}\delta|E_\omega|$ for every $x\in D_{\omega,4}$. We define \[ \gamma(\omega):=(\beta\cap E_\omega)\cup\gamma_{E_\omega}. \] Then ${\rm card}(\gamma(\omega))\leq l_\beta(\omega)+L_1$. Let $x\in D_{\omega,4}$. If $d(x,\beta)=d(x,\beta\cap E_\omega)$, then it is clear that $d(x,\gamma(\omega))\leq d(x,\beta)$. Otherwise, we have \[ d(x,\beta)=d(x,\beta\setminus E_\omega)\geq\delta|E_\omega|>d(x,\gamma_{E_\omega})\geq d(x,\gamma). \] Thus, $I_\mu(D_{\omega,4},\beta)\geq I_\mu(D_{\omega,4},\gamma(\omega))$. The lemma follows by Lemma \ref{l3}. \end{proof} Now we are able to give an upper bound for $\max_{\sigma\in\Omega_k}L_\sigma$. \begin{lemma}\label{lem4} For every $\sigma\in\Omega_k$, we have $L_\sigma\leq n_4$. \end{lemma} \begin{proof} Assume that, for some $\sigma\in\Omega_k$, we have $L_\sigma>n_4(>M_0n_3)$. Next, we deduce a contradiction. By the assumption and (\ref{ncomp}), we deduce \[ {\rm card}(\alpha_n\setminus (A_\sigma)_{\delta|A_\sigma|})\leq n-n_4<n_3\phi_k-M_0n_3\leq(\phi_k-M_0)n_3. \] By Lemma \ref{lem9}, we have ${\rm card}(\Omega_k\setminus\mathcal{A}_\sigma)\geq \phi_k-M_0$. Note that $E_\rho,\rho\in\Omega_k$, are pairwise disjoint. There exists some $\omega\in\Omega_k\setminus\mathcal{A}_\sigma$ such that ${\rm card}(\alpha_n\cap E_\omega)<n_3$. We consider \[ D_{\sigma,3}=E_\sigma\cup\bigg(\bigcup_{a\in\alpha_n\cap(A_\sigma)_{\delta|A_\sigma|}}(W_a(\alpha_n)\cap A_\sigma^*)\bigg). \] Using Lemma \ref{lem8} and the triangle inequality, for every $x\in E_\omega$, we have \begin{eqnarray*} d(x,\alpha_n)\leq \frac{1}{2}|E_\omega|+\delta|A_\omega|<d(x,(A_\sigma)_{\delta|A_\sigma|}). \end{eqnarray*} It follows that $E_\omega\cap D_{\sigma,3}=\emptyset$. We define \begin{eqnarray*} &&\gamma_{n_3+L_1}(D_{\omega,4})\in C_{n_3+L_1}(\nu_{D_{\omega,4}}),\;\gamma_{L_\sigma-L_0-n_3-L_1}(D_{\sigma,3})\in C_{L_\sigma-L_0-n_3-L_1}(\nu_{D_{\sigma,3}});\\ &&\beta:=\big(\alpha_n\setminus(A_\sigma)_{\delta|A_\sigma|}\big)\cup B_\sigma\cup f_{D_{\sigma,3}}(\gamma_{L_\sigma-L_0-n_3-L_1}(D_{\sigma,3}))\cup f_{D_{\omega,4}}(\gamma_{n_3+L_1}(D_{\omega,4})). \end{eqnarray*} Then ${\rm card}(\beta)\leq n$. By applying Lemma \ref{compare2} to $F_{\sigma,3}=D_{\sigma,3}\cup D_{\omega,4}$, we obtain \begin{eqnarray}\label{s6} I_\mu(\mathbb{R}^q\setminus F_{\sigma,3},\beta)\leq I_\mu(\mathbb{R}^q\setminus F_{\sigma,3},\alpha_n). \end{eqnarray} This allows us to focus on integrals over the sets $D_{\sigma,3}$ and $D_{\omega,4}$. Note that for every $x\in D_{\omega,4}$, we have $d(x,\beta)\leq d(x,\gamma_{n_3+L_1}(D_{\omega,4}))$. Applying Lemma \ref{l3} with $B=D_{\omega,4}$ and Lemma \ref{lemma1} , we have \begin{eqnarray*} \Delta_{D_{\omega,4}}:&=&I_\mu(D_{\omega,4},\alpha_n)-I_\mu(D_{\omega,4},\beta)\nonumber\\ &\geq&I_\mu(D_{\omega,4},\alpha_n)-I_\mu(D_{\omega,4},f_{D_{\omega,4}}(\gamma_{n_3+L_1}(D_{\omega,4})))\nonumber \\&=&\mu(D_{\omega,4})\big(\hat{e}_{n_3-1+L_1}(\nu_{D_{\omega,4}})-\hat{e}_{n_3+L_1}(\nu_{D_{\omega,4}})\big). \end{eqnarray*} Similarly, for every $x\in D_{\sigma,3}$, we have $d(x,\beta)\leq d(x,\gamma_{L_\sigma-L_0-n_3-L_1}(D_{\sigma,3}))$. Thus, we apply Lemmas \ref{l3} and \ref{key1} with $B=D_{\sigma,3}$ and obtain \begin{eqnarray*} \Delta_{D_{\sigma,3}}:&=&I_\mu(D_{\sigma,3},\beta)-I_\mu(D_{\sigma,3},\alpha_n)\nonumber\\ &\leq&I_\mu(D_{\sigma,3},f_{D_{\sigma,3}}(\gamma_{L_\sigma-L_0-n_3-L_1}(D_{\sigma,3})))-I_\mu(D_{\sigma,3},\alpha_n)\nonumber \\&=&\mu(D_{\sigma,3})\big(\hat{e}_{L_\sigma-L_0-n_3-L_1}(\nu_{D_{\sigma,3}})-\hat{e}_{L_\sigma+L_1}(\nu_{D_{\sigma,3}})\big). \end{eqnarray*} By the assumption, we have $L_\sigma>n_4$. Thus, from Lemmas \ref{lem2}, \ref{lem6} (d2) and Remark \ref{rem2}, we deduce that $\Delta_{D_{\omega,4}}>\Delta_{D_{\sigma,3}}$. Combining this with (\ref{s6}), we obtain that $I_\mu(\mathbb{R}^q,\beta)<I_\mu(\mathbb{R}^q,\alpha_n)$, contradicting the optimality of $\alpha_n$. \end{proof} \section{Proof of Theorem \ref{mthm} } Let $a\in\alpha_n$. By Remark \ref{rem5}, we have $a\in (A_\sigma)_{\delta|A_\sigma|}$ for some $\sigma\in\Omega_k$. Fix an arbitrary word $\tau_0\in\mathcal{A}_\sigma$. We define \begin{eqnarray*} &&\Gamma(\tau):=\alpha_n\cap (A_\tau)_{\delta|A_\tau|},\; \tau\in\mathcal{A}_\sigma;\\ &&G(a):=A_{\tau_0}\cup\bigcup_{\tau\in\mathcal{A}_\sigma}\bigcup_{b\in\Gamma(\tau)}(P_b(\alpha_n)\cap K_\mu);\\&&H(a):=\bigcup_{\tau\in\mathcal{A}_\sigma}\Gamma(\tau);\;T_a:={\rm card}(H(a)). \end{eqnarray*} Let $f_a$ be a similarity mapping of similarity ratio $|G(a)|$. We define \[ \nu_{G(a)}=\mu(\cdot|G(a))\circ f_a=\mu\bigg(\cdot\bigg|\bigcup_{\tau\in\mathcal{A}_\sigma}\bigcup_{b\in\Gamma(\tau)}(P_b(\alpha_n)\cap K_\mu)\bigg)\circ f_a. \] \begin{lemma}\label{lem3} Let $G(a)$ and $\nu_{G(a)}$ be as defined above. Then we have \begin{enumerate} \item[(i)]$P_a(\alpha_n)\cap K_\mu\subset G(a)$ and $n_1\leq T_a\leq M_0n_4=:n_5$; \item[(ii)] there exists some constant $C$, such that \[ \sup_{x\in\mathbb{R}^q}\nu_{G(a)}(B(x,\epsilon))\leq C\epsilon^{s_0}\;{\rm for\; every}\;\epsilon>0. \] \end{enumerate} \end{lemma} \begin{proof} The first part of (i) is an easy consequence of the definition of $G(a)$. By Lemma \ref{lem9}, ${\rm card}(\mathcal{A}_\sigma)\leq M_0$. Further, for every $\tau\in\mathcal{A}_\sigma$, by Lemmas \ref{lem7} and \ref{lem4}, we have, $n_1\leq{\rm card}(\Gamma(\tau))\leq n_4$ for every $\tau\in\mathcal{A}_\sigma$. Hence, $n_1\leq T_a\leq n_5$. Next, we show (ii). By the definitions of $G(a),\mathcal{A}_\rho$ and $A_\rho,\rho\in\Omega_k$ and Lemma \ref{lem8}, we have \begin{equation}\label{g1} A_{\tau_0}\subset G(a)\subset\bigcup_{\tau\in\mathcal{A}_\sigma}\bigcup_{\rho\in\mathcal{A}_\tau}A_\rho\subset B\big(c_\sigma,(8\delta+\frac{5}{2})|A_{\tau_0}|\big) \end{equation} Thus, we have the following estimate: \begin{equation}\label{z4} |A_{\tau_0}|\leq|G(a)|\leq (5+16\delta)|A_{\tau_0}|. \end{equation} Let $\eta_3:= C_2(3+8\delta)^{s_0}$ and $\eta_4:=C_12^{-s_0}$. By (\ref{g1}), (\ref{z4}) and (\ref{AD}), \begin{eqnarray}\label{compare1} &&\mu(G(a))\leq C_2(3+8\delta)^{s_0}|A_{\tau_0}|^{s_0}\leq\eta_3|G(a)|^{s_0};\label{compare11}\\ &&\mu(G(a))\geq C_12^{-s_0}|A_{\tau_0}|^{s_0}\geq \eta_4(5+16\delta)^{-s_0}|G(a)|^{s_0}.\nonumber \end{eqnarray} Thus, from \cite[Lemma 2.5]{Zhu:20}, we obtain (ii). \end{proof} \emph{Proof of Theorem \ref{mthm}} By (\ref{AD}), Lemmas \ref{lem11} and \ref{l1}, it is sufficient to consider $n\geq(n_0+n_2)\phi_{k_0}$. Let $a\in\alpha_n$ and let $G(a),H(a),\nu_{G(a)}$ be as defined above. By Theorem 2.4 and Lemma 2.3 of \cite{GL:04} and the similarity of $f_a$, we know that $f_a^{-1}(H(a))\in C_{T_a}(\nu_{G(a)})$. From Lemma \ref{lem3} (i), we have that $n_1\leq T_a\leq n_5$. Because of Lemma \ref{lem3} (ii), we may apply Lemmas \ref{lem11}, \ref{l1} to the measure $\nu_{G(a)}$. We define \[ \underline{d}:=\min_{2\leq h \leq n_5}\underline{d}_h,\;\overline{d}:=\max_{2\leq h \leq n_5}\overline{d}_h,\;d_3:=\min_{2\leq h \leq n_5}g_h. \] Thus, using the similarity of $f_a$ and Lemmas \ref{lem11} and \ref{l1}, we obtain \[ \mu(G(a))\underline{d}\leq\mu(P_a(\alpha_n))=\mu(G(a))\nu_{G(a)}\big(P_{f_a^{-1}(a)}(f_a^{-1}(H(a)))\big)\leq \mu(G(a))\overline{d}. \] By Lemma \ref{lem5}, (\ref{compare11}) and (\ref{g2}), we have \begin{eqnarray*} &&\mu(G(a))\leq \eta_3|A_{\tau_0}|^{s_0}\leq\eta_3C_1^{-1}2^{s_0}\eta_2\phi_k^{-1}\leq n_3\eta_2\eta_3\eta_4^{-1}n^{-1};\\ &&\mu(G(a))\geq \mu(A_{\tau_0})\geq\eta_1\phi_k^{-1}\geq (n_0+n_2)\eta_1n^{-1}. \end{eqnarray*} It suffices to define $d_1:=\underline{d}\eta_1(n_0+n_2)$ and $d_2:=\overline{d}n_3\eta_2\eta_3\eta_4^{-1}$. By Lemma \ref{l2}, we know that for every $b\in H(a)\setminus\{a\}$, \[ d(f_a^{-1}(b),f_a^{-1}(a))\geq 3d_3|P_{f^{-1}(a)}(f_a^{-1}(H(a)))\cap K_{\nu_{G(a)}}|. \] Thus, using the similarity of $f_a$, we obtain that \[ \min_{b\in H(a)\setminus\{a\}}d(a,b)\geq 3d_3|P_a(\alpha_n)\cap K_\mu|. \] By Lemma \ref{lem8}, we know that $|P_a(\alpha_n)\cap K_\mu|\leq 2\delta|A_\sigma|$. On the other hand, for every $b\in\alpha_n\setminus H(a)$, there exists some $\tau\in\Omega_k\setminus\mathcal{A}_\sigma$ such that $b\in (A_\tau)_{\delta|A_\tau|}$. Note that $(A_\tau)_{2\delta|A_\tau|}\cap(A_\sigma)_{2\delta|A_\sigma|}=\emptyset$ and $a\in (A_\sigma)_{\delta|A_\sigma|}$, we deduce \[ d(b,a)\geq2\delta|A_\tau|=2\delta|A_\sigma|\geq |P_a(\alpha_n)\cap K_\mu|. \] Note that $3d_3<2^{-1}$. It follows that $d(b,a)\geq 3d_3|P_a(\alpha_n)\cap K_\mu|$ for every $b\in\alpha_n\setminus\{a\}$. Thus, the set $P_a(\alpha_n)$ contains a closed ball of radius $d_3|P_a(\alpha_n)\cap K_\mu|$ which is centered at $a$.
1,116,691,501,379
arxiv
\section*{Introduction} Supersymmetry (SUSY) is a compelling idea that is motivated by both phenomenological (Beyond the Standard Model) and theoretical (String Theory) point of view. If nature indeed uses supersymmetry it must be spontaneously broken. In the simplest scenario SUSY breaking happens in the hidden sector and is mediated to the visible sector (Supersymmetric Standard Model) by gravitational interactions. It is therefore of interest to study SUSY breaking in the context of $N=1$ four-dimensional supergravity (SUGRA). On the other hand, according to observations the Universe is currently expanding with acceleration \cite{Riess:1998cb,Perlmutter:1997zf}. The simplest way to describe such a universe is by introducing a (very) small positive cosmological constant. In supergravity the task of adding a positive cosmological constant is known to be non-trivial because of the restrictions on the scalar potential imposed by supersymmetry. For example in pure (standard) supergravity one can only have zero (Minkowski vacuum) or negative (anti-de Sitter vacuum) cosmological constant \cite{Townsend:1977qa}. It is possible to generate a positive cosmological constant if we allow other (non-gravitational) multiplets. One interesting possibility is that the same field(s) that breaks SUSY can also generate the cosmological constant. This is possible, for example, in the simplest Polonyi model \cite{Polonyi:1977pj,Linde:2016bcz,Aldabergenov:2017bjt}. In this work we will focus on the supergravity non-linear $\sigma$-model with $SU(1,1)/U(1)$ target space. This coset manifold, known as the Poincar\'e plane, describes hyperbolic K\"ahler geometry, and often arises in superstring-derived effective SUGRA models where the corresponding scalars are the compactification moduli. Our goal is to find a Poincar\'e plane model that spontaneously breaks supersymmetry in de Sitter vacuum, i.e. allowing for a positive (tunable) cosmological constant. It turns out, one such class of models is available if we introduce linearly realized gauged $U(1)_R$ symmetry. This, of course, adds a gauge (vector) multiplet with its $D$-term contribution to the scalar potential and SUSY breaking. This paper is organized as follows. In Section 1 we recall basic properties of $N=1$ four-dimensional supergravity as well as the $SU(1,1)/U(1)$ non-linear $\sigma$-model. We discuss the two equivalent coordinate choices -- one covering the whole Poincar\'e plane (disk) while the other covering its upper half. In Section 2 we use the fact that the two parametrizations of the plane reveal two different types of $U(1)$ symmetries (linearly and non-linearly realized), to construct new models where the $U(1)$ is linearly realized local R-symmetry. In Section 3 we show that for suitable parameter choices our models spontaneously break SUSY and R-symmetry, and generate tunable cosmological constant. We find that in two particular cases the scalar potential becomes flat with positive height (de Sitter no-scale supergravity). Some generalizations of the our models are discussed in Section 4, while Section 5 is devoted for further discussion and conclusion. \section{\texorpdfstring{$N=1$ $D=4$}{Lg} supergravity and the Poincar\'e plane} Let us briefly review the general features of the standard four-dimensional $N=1$ supergravity. Its bosonic sector is described by the action (we use Planck units, $\kappa=1$, unless otherwise stated)~\footnote{A derivation of this action can be found in Refs. \cite{Wess:1992cp,Freedman:2012zz}} \begin{equation} e^{-1}{\cal L}=\frac{1}{2}R-K_{i\bar{j}}D_m\Phi^i\overbar{D^m\Phi}^j-\frac{1}{4}f^R_{AB}F_{mn}^AF^{B,mn}-\frac{i}{4}f^I_{AB}\tilde{F}_{mn}^AF^{B,mn}-V_F-V_D~,\label{standardaction} \end{equation} whose the F- and D- type scalar potentials are given by \begin{gather} V_F=e^K\left[K^{i\bar{j}}(W_i+K_iW)(\overbar{W}_{\bar{j}}+K_{\bar{j}}\overbar{W})-3|W|^2\right]~,\label{VF}\\ V_D=\frac{g^2}{2}f_R^{AB}\mathscr{D}_A\mathscr{D}_B~,\label{VD} \end{gather} where $K=K(\Phi_i,\overbar{\Phi}_i)$ is a (real) K\"ahler potential depending upon chiral scalar fields $\Phi_i$, $W=W(\Phi_i)$ is a (holomorphic) superpotential, $f_{AB}=f_{AB}(\Phi_i)$ is a (holomorphic) gauge kinetic function with $f^R_{AB}\equiv {\rm Re}f_{AB}$ and $f^I_{AB}\equiv {\rm Im}f_{AB}$; $R$ is the spacetime scalar curvature, $F_{mn}^A=\partial_mA_n^A-\partial_nA_m^A+gf^{ABC}A^B_mA^C_n$ is the field strength of a vector (gauge) field $A_m^A$, $g$ is the gauge coupling, and $\mathscr{D}_A$ are Killing potentials of the gauged isometries of the K\"ahler manifold. We use the notation $K^{i\bar{j}}\equiv K_{i\bar{j}}^{-1}$, where $K_{i\bar{j}}\equiv\frac{\partial^2K}{\partial\Phi_i\partial\overbar{\Phi}_j}$, $W_i\equiv\frac{\partial W}{\partial\Phi_i}$, and $f^{AB}\equiv f_{AB}^{-1}$ with $A,B$ as the gauge group indices. The gauge-covariant derivatives of the charged scalars are \begin{equation} D_m\Phi^i=\partial_m\Phi^i-gA_m^AX_A^i~,\label{DPhi} \end{equation} where $X^i_A$ are the corresponding Killing vectors. The action \eqref{standardaction} is invariant under combined K\"ahler-Weyl transformations \begin{equation} K\rightarrow K+\Sigma+\overbar{\Sigma}~,~~~W\rightarrow We^{-\Sigma}~,\label{KWtransform} \end{equation} where $\Sigma$ is an arbitrary chiral scalar field. Killing potentials can be related to Killing vectors by the expression \begin{equation} \mathscr{D}_A=i\left(K_i+\frac{W_i}{W}\right)X^i_A~,\label{Killingpot} \end{equation} where the superpotential-dependent term is present whenever R-symmetry is gauged, and is known as the Fayet-Iliopoulos term (of gauged R-symmetry) in supergravity. SUSY is spontaneously broken whenever auxiliary $F$ and/or $D$ fields, satisfying \begin{gather} F^i=-e^{K/2}K^{i\bar{j}}(\overbar{W}_{\bar{j}}+K_{\bar{j}}\overbar{W})~,\label{F_aux}\\ D_A=-g\mathscr{D}_A~,\label{D_aux} \end{gather} acquire non-vanishing VEVs. When SUSY is broken gravitino becomes massive absorbing the goldstino. In the Lagrangian the gravitino effective mass appears as \begin{equation} m_{3/2}^2=e^K|W|^2~.\label{m_32} \end{equation} In Minkowski background the VEV of $m_{3/2}$ is the physical gravitino mass, however in more complicated backgrounds physical mass differs from the "Lagrangian" mass given by Eq. \eqref{m_32}. Throughout the paper we will use the term "gravitino mass" in the sense of Eq. \eqref{m_32}.~\footnote{One can borrow the notion of the physical gravitino mass from AdS supergravity as $m_{3/2,{\rm phys}}^2=\langle m_{3/2}\rangle^2 +V_0/3$ (see e.g. \cite{Freedman:2012zz} and Refs. therein). In (pure) AdS supergravity the cosmological constant is $V_0=-3\langle m^2_{3/2}\rangle$ and the physical mass vanishes.} Then, $\langle m_{3/2}\rangle$ can be zero even when SUSY is broken. As regards the Poincar\'e plane, it can be described by the K\"ahler metric in terms of the half-plane coordinate $T$ (a complex scalar in spacetime) as \begin{equation} K_{T\overbar{T}}=\frac{\alpha}{(T+\overbar{T})^2}~,\label{metricT} \end{equation} with some positive real number $\alpha$ that determines the K\"ahler curvature, $R_K=-2/\alpha$. Alternatively, the same metric can be defined using the disk coordinate $Z$, \begin{equation} K_{Z\overbar{Z}}=\frac{\alpha}{(1-Z\overbar{Z})^2}~.\label{metricZ} \end{equation} The two metrics are related by the Cayley transformation \begin{equation} Z=\frac{T-1}{T+1}~.\label{ZT} \end{equation} From string theory point-of-view, the Poincar\'e plane models corresponding to compactification moduli have (positive) integer values of $\alpha$. In principle, the available values are $\alpha=1,2,...,7$ according to Refs. \cite{Duff:2010ss,Duff:2010vy,Ferrara:2016fwe}. The metric \eqref{metricZ} can be obtained from the K\"ahler potential $K=-\alpha\log(1-Z\overbar{Z})$. Under the transformation \eqref{ZT} it becomes \begin{equation} K=-\alpha\left[\log(T+\overbar{T})-\log(T+1)-\log(\overbar{T}+1)\right]~, \end{equation} plus an irrelevant constant. The last two terms can be absorbed into the superpotential by the K\"ahler-Weyl transformation \eqref{KWtransform} with $\Sigma=-\alpha\log(T+1)$. To summarize, assuming the general superpotential $W=W(Z)$, the transformation \eqref{ZT} followed by the K\"ahler-Weyl rescaling takes the $Z$-parametrization of the Poincar\'e plane to the (equivalent) $T$-parametrization as follows \begin{equation} \begin{cases} K=-\alpha\log(1-Z\overbar{Z})\\ W=W(Z) \end{cases}~\Longrightarrow~\begin{cases} K=-\alpha\log(T+\overbar{T})\\ W=W\left(\frac{T-1}{T+1}\right)(T+1)^\alpha~. \end{cases}\label{ZTKahler} \end{equation} The Poincar\'e plane has a wide range of applications in phenomenology. For example, the choice $K=-3\log(T+\overbar{T})$ and $W=W_0$ ($W_0$ is a constant) corresponds to the simplest no-scale supergravity \cite{Cremmer:1983bf,Ellis:1983sf,Ellis:1983ei}. Using the inverse transformation of Eq. \eqref{ZT} the no-scale model can be expressed in terms of the disk coordinate $Z$ as $K=-3\log(1-Z\overbar{Z})$ and $W=W_0 (Z-1)^3$. In the both coordinate choices ($T$ and $Z$) the complex scalars can be parametrized in such a way that one of their two real components is canonical. $T$ can be parametrized as \begin{equation} T=\frac{1}{2}e^{-\sqrt{\frac{2}{\alpha}}\varphi}+it~,\label{Tpar} \end{equation} where the real scalar $\varphi$ is canonical, while $t$ (also real) is not -- its kinetic term is coupled to $\varphi$. The disk coordinate $Z$ can be parametrized e.g. in a polar form, \begin{equation} Z=e^{-i\zeta}\tanh{\frac{\phi}{\sqrt{2\alpha}}}~,\label{Zpar} \end{equation} where $\phi$ is the canonical scalar controlling the absolute value of $Z$, and $\zeta$ is the scalar controlling its angle. This parametrization of $Z$ will be useful in the following sections. \section{Gauged R-symmetry in \texorpdfstring{$SU(1,1)/U(1)$}{Lg} models} $U(1)$ gauge theories in the context of $SU(1,1)/U(1)$ models are often considered as half-plane models with the K\"ahler potential \begin{equation} K=-\alpha\log(T+\overbar{T})~, \end{equation} where the symmetry under imaginary shifts of $T$ is gauged. The local shifts can be written as $T\rightarrow T+iq_T\theta$, where $\theta=\theta(x)$ is the gauge parameter and $q_T$ is the corresponding $U(1)$ charge of $T$. The Killing vector must satisfy the relation $\delta T=\theta X^T$, thus $X^T=iq_T$. If we want to promote this gauge transformation to a local R-transformation, superpotential must transform as \begin{equation} W\rightarrow We^{-iq\theta}~,\label{W(T)transform} \end{equation} where $q$ is the $U(1)_R$ charge of the superpotential. If there are no other chiral fields in the model, the superpotential is fixed as $W=\mu e^{-\xi T}$ with some real constant $\xi$ and complex constant $\mu$. From the transformation property \eqref{W(T)transform} we obtain the relation $\xi=q/q_T$. Eq. \eqref{Killingpot} in this case yields \begin{equation} {\mathscr D}=q_T\left(\frac{\alpha}{T+\overbar{T}}+\xi\right)~, \end{equation} which makes it clear that $\xi$ is exactly the FI term of gauged R-symmetry that we mentioned earlier. If we switch to the $Z$-parametrization of the Poincar\'e plane with \begin{equation} K=-\alpha\log(1-Z\overbar{Z})~, \end{equation} the phase symmetry of $Z$ becomes the simplest choice for gauging. I.e. we can introduce the gauge transformation $Z\rightarrow Ze^{-iq_Z\theta}$, where $q_Z$ is the $U(1)$ charge of $Z$, with the corresponding Killing vector $X^Z=-iq_ZZ$. Promoting this transformation to an R-transformation, as usual, requires that the superpotential transforms as in Eq. \eqref{W(T)transform}. This fixes the superpotential as $W=\mu Z^n$ where $n=q/q_Z$. To avoid negative powers of $Z$ in the action $n$ must be greater or equal to one (unlike negative powers of $T$ in the half-plane case, negative powers of $Z$ lead to singularities as can be seen from parametrizations \eqref{Tpar} and \eqref{Zpar}). The Killing potential now takes the form \begin{equation} {\mathscr D}=q_Z\left(\frac{\alpha Z\overbar{Z}}{1-Z\overbar{Z}}+n\right)~,\label{KillingZ} \end{equation} with $n$ as the FI term. Let us investigate this setup in more detail. \section{Properties of the scalar potential} Our model of interest is defined by~\footnote{Similar setup was considered in Ref. \cite{Pallis:2018xmt} in the context of SUSY breaking, but without gauging the R-symmetry.} \begin{gather} K=-\alpha\log(1-Z\overbar{Z})~,\\ W=\mu Z^n~.\label{WZn} \end{gather} The superpotential is fixed by requiring R-symmetry, and for simplicity we put $n=1$ and $q=q_Z=1$ (the notation is the same as in the previous section). Also, without loss of generality we can consider $\mu$ to be real. Upon gauging the R-symmetry the Killing potential \eqref{KillingZ} is generated. After choosing the simplest gauge kinetic function $f=1$, we calculate the full scalar potential $V=V_F+V_D$, \begin{gather} V_F=\mu^2\frac{(\alpha-1)^2z^4-(\alpha+2)z^2+1}{\alpha(1-z^2)^\alpha}~,\\ V_D=\frac{g^2}{2}\left(\frac{\alpha z^2}{1-z^2}+1\right)^2~, \end{gather} where for convenience we introduced the notation $z\equiv |Z|$. When using the parametrization \eqref{Zpar} the angular mode $\zeta$ conveniently drops out of the scalar potential, and $z=\tanh\frac{\phi}{\sqrt{2\alpha}}$. We can find critical points of the potential by studying the equation \begin{equation} \frac{dV}{dz}=2z\frac{\left[(\alpha-1)z^2+1\right]\left[\alpha^2g^2(1-z^2)^\alpha+\mu^2(1-z^2)^2\left((\alpha-2)(\alpha-1)z^2-2\right)\right]}{\alpha(1-z^2)^{\alpha+3}}=0~.\label{dVdz} \end{equation} Regardless of the value of $\alpha$ there is always a critical point at $z=0$, where the scalar potential reduces to \begin{equation} V(z=0)=\frac{\mu^2}{\alpha}+\frac{g^2}{2}~.\label{V_z=0} \end{equation} The equation for critical points other than $z=0$ can be reduced from Eq. \eqref{dVdz} to \begin{equation} \alpha^2g^2(1-z^2)^\alpha+\mu^2(1-z^2)^2\left((\alpha-2)(\alpha-1)z^2-2\right)=0~,\label{zcrit} \end{equation} because the expression in the first square brackets of \eqref{dVdz} is non-vanishing even when $\alpha<1$, thanks to the canonical normalization $z^2=\tanh^2(\phi/\sqrt{2\alpha})<1$. The existence of consistent solutions to Eq. \eqref{zcrit} depends on the choice of $\alpha$. First, let us consider the cases $\alpha=1,2,3,4$, as they can be studied analytically (we will comment on more general $\alpha$ in the next section). $\bm{\alpha=1}$. Here the solution for Eq. \eqref{zcrit} is $z^2=1-\frac{g^2}{2\mu^2}$. This solution is valid if $2\mu^2>g^2$ in which case it corresponds to two minima (with $Z_2$ symmetry) while $z=0$ is a local maximum. Then the R-symmetry is spontaneously broken due to non-vanishing superpotential, while SUSY is broken due to~\footnote{For convenience we dropped the minus signs on the RHS in Eqs. \eqref{F_aux} and \eqref{D_aux}.} \begin{gather} \langle F\rangle=g/\sqrt{2}~,~~~\langle D\rangle=2\mu^2/g~,\label{FD_alpha=1}\\ \langle m_{3/2}\rangle^2=\frac{2\mu^4}{g^2}\left(1-\frac{g^2}{2\mu^2}\right)~, \end{gather} and the following cosmological constant is generated, \begin{equation} V_0=\frac{\mu^2}{g^2}(3g^2-2\mu^2)~,\label{CC_alpha=1} \end{equation} so that we have AdS minimum if $3g^2<2\mu^2$, Minkowski minimum if $3g^2=2\mu^2$, and dS minimum if $6\mu^2>3g^2>2\mu^2$ (the first inequality ensures $z^2>0$). These conditions show that if we want Minkowski or de Sitter vacuum, both $F$- and $D$-term contributions \eqref{FD_alpha=1} to SUSY breaking must be comparable in magnitude. As $U(1)_R$ is spontaneously broken, the Killing vector $X^Z=-iZ$ is non-vanishing at the minimum. This generates a mass term for the gauge boson proportional to $g^2\langle Z\rangle^2$, as can be seen from Eq. \eqref{DPhi}, while the goldstone mode $\zeta$ can be gauged away. As for the mass of the canonical scalar $\phi$, after introducing its excitation $\delta\phi\equiv\phi-\phi_0$ and expanding the potential around the minimum, it reads \begin{equation} m^2_{\delta\phi}=\frac{8\mu^4}{g^2}\left(1-\frac{g^2}{2\mu^2}\right)~,\label{m_deltaphi} \end{equation} which is positive since $2\mu^2>g^2$, and is twice the gravitino mass, $m_{\delta\phi}=2\langle m_{3/2}\rangle$. In order to describe dark energy, $V_0$ must be positive and very small, namely $V_0\sim 10^{-120}$ in Planck units. From Eq. \eqref{CC_alpha=1} it is clear that this can be achieved in two ways. The first option is to set $\mu^2\sim 10^{-120}$, which will also force $g^2\sim 10^{-120}$ as required by the dS condition $6\mu^2>3g^2>2\mu^2$. This is phenomenologically problematic, as it means that SUSY breaking scale is of the same order as the dark energy scale. A more viable option is the fine tuning of the difference $3g^2-2\mu^2$ so that it almost vanishes. This does not require the individual parameters $g$ and $\mu$ -- and thus the SUSY breaking scale -- to be small. The relation $3g^2\approx 2\mu^2$ then simplifies the gravitino and scalar masses as \begin{equation} \langle m_{3/2}\rangle^2\approx 3g^2~,~~~m_{\delta\phi}^2\approx 12g^2~. \end{equation} When $2\mu^2\leq g^2$ the solution $z^2=1-g^2/(2\mu^2)$ does not exist and the point $z=0$ is the global minimum (with no other critical points). In such case SUSY is broken by $\langle F\rangle=\mu$ and $\langle D\rangle=g$ while R-symmetry is restored at the minimum since the superpotential vanishes. This means that the gravitino mass $\langle m_{3/2}\rangle$, as well as the masses of the $U(1)_R$ gauge boson and the $\zeta$ scalar, are zero. This scenario is not viable from phenomenological point of view because there is a massless scalar in the spectrum, and the scales of SUSY breaking and the cosmological constant are identified. $\bm{\alpha=2}$. In this case $z=0$ is the only critical point: if $2g^2>\mu^2$ it is a de Sitter minimum (with broken SUSY and unbroken R-symmetry), if $2g^2<\mu^2$ it is a maximum and the potential is unbounded from below. When $2g^2=\mu^2$, however, the potential is flat -- we have a no-scale model in de Sitter spacetime with the cosmological constant $V=3g^2/2$. The VEVs of $F$- and $D$-terms are \begin{equation} \langle F\rangle=\frac{g}{\sqrt{2}}(1+z_0^2)~,~~~\langle D\rangle=g\frac{1+z_0^2}{1-z_0^2}~, \end{equation} where $z_0$ (the VEV of $z$) is arbitrary at the classical level. Thus, SUSY and R-symmetry are broken (as long as $z_0\neq 0$). The fact that $z^2=\tanh^2{(\phi/\sqrt{2\alpha})}$ has the range $0\leq z^2<1$ implies that \begin{align} \frac{g}{\sqrt{2}}\leq \langle F\rangle &<\sqrt{2}g~,\\ g\leq \langle D\rangle &<\infty~. \end{align} Small cosmological constant requires proportionally small $g^2$. Then $\langle F\rangle$ must also be small because it is proportional to $g$, but $\langle D\rangle$ can take large values if $z_0^2$ is close to one. The same is true for the gravitino mass, \begin{equation} \langle m_{3/2}\rangle^2=\frac{2g^2z_0^2}{(1-z_0^2)^2}~. \end{equation} $\bm{\alpha=3}$. Similarly to the $\alpha=2$ case, when $\alpha=3$ there is only one critical point, $z=0$, and if $9g^2>2\mu^2$ it is a dS minimum, whereas if $9g^2<2\mu^2$ it is a maximum. If $9g^2=2\mu^2$ we once again arrive at a no-scale de Sitter model, this time with the cosmological constant $V=2g^2$. The auxiliary fields and the gravitino mass at the minimum are \begin{gather} \langle F\rangle=\frac{g}{\sqrt{2}}\frac{1+2z_0^2}{\sqrt{1-z^2_0}}~,~~~\langle D\rangle=g\frac{1+2z^2_0}{1-z_0^2}~,\\ \langle m_{3/2}\rangle^2=\frac{9g^2z_0^2}{2(1-z_0^2)^3}~. \end{gather} and have the following range \begin{align} \frac{g}{\sqrt{2}}\leq \langle F\rangle &<\infty~,\\ g\leq \langle D\rangle &<\infty~, \end{align} while $\langle m_{3/2}\rangle$ can take any value from zero (when $z_0=0$) to infinity (when $|z_0|\rightarrow 1$). Unlike the previous case, here both $\langle F\rangle$ and $\langle D\rangle$ can be large regardless of the value of $g$, if $z_0$ is close to one. However, in both $\alpha=2$ and $\alpha=3$ cases the $D$-term VEV necessarily dominates, $\langle D\rangle\gtrsim\langle F\rangle$. $\bm{\alpha=4}$. In this case Eq. \eqref{zcrit} is solved by \begin{equation} z^2=\frac{1}{2A}\left(2A-3+\sqrt{9-8A}\right)~,~~~A\equiv\frac{8g^2}{\mu^2}~.\label{z0_and_A} \end{equation} This is complemented by the condition \begin{equation} 0<A<1~\Longrightarrow~0<g^2<\mu^2/8~,\label{A_condition} \end{equation} that ensures that $z^2>0$. The cosmological constant corresponding to this minimum reads \begin{equation} V_0=\frac{g^2}{2\mu^2}(9\mu^2-32g^2)~. \end{equation} If we require $V_0$ to be very small, the only choice is $g\ll 1$, because the cancellation $9\mu^2-32g^2\approx 0$ is incompatible with the condition \eqref{A_condition}. F-/D-terms and the gravitino mass are non-vanishing, \begin{gather} \langle F\rangle=\frac{\mu}{4}\sqrt{9-8A}~,~~~\langle D\rangle=g\sqrt{9-8A}~,\\ \langle m_{3/2}\rangle^2=8\mu^2A^3\frac{2A-3+\sqrt{9-8A}}{(-3+\sqrt{9-8A})^4}~.\label{m32_alpha=4} \end{gather} Since $A$ ranges from zero to one, we have \begin{align} \frac{\mu}{4}<\langle F\rangle &<\frac{3\mu}{4}~,\\ g<\langle D\rangle &<3g~. \end{align} Also $\langle F\rangle>\langle D\rangle/\sqrt{2}$, due to the condition \eqref{A_condition}. If $g\ll 1$, as required to describe dark energy, $\langle D\rangle$ becomes small, but there is still a freedom to control $\langle F\rangle$ and $\langle m_{3/2}\rangle$ by choosing the parameter $\mu$. In particular, the gravitino mass \eqref{m32_alpha=4} can be expanded in the limit $g\rightarrow 0$ (or $A\rightarrow 0$) as \begin{equation} \langle m_{3/2}\rangle^2\approx \frac{27}{16}\mu^2~. \end{equation} As regards the scalar mass, it reads \begin{equation} m^2_{\delta\phi}=\frac{\mu^2}{32}(9-8A)(3-4A+\sqrt{9-8A})~,\label{m_deltaphi_2} \end{equation} where $A$ is defined in Eq. \eqref{z0_and_A}. In the limit of vanishing $g$, it becomes $m^2_{\delta\phi}\approx\langle m_{3/2}\rangle^2\approx 27\mu^2/16$. For illustration purposes we provide the plots of the scalar potential for $\alpha=1,2,3,4$ in Figure \ref{Fig}. \begin{figure} \centering \begin{subfigure}{.47\textwidth} \centering \includegraphics[width=1\linewidth]{V_a_1.pdf} \caption{The case $\alpha=1$ and $g=0.5$. Solid line corresponds to $\mu=0.6$, dashed line to $\mu=0.65$, and dotted line to $\mu=0.1$.} \label{alpha1} \end{subfigure} \hspace{1em} \begin{subfigure}{.47\textwidth} \centering \includegraphics[width=1\linewidth]{V_a_4.pdf} \caption{The case $\alpha=4$ and $g=0.1$. Solid line corresponds to $\mu=1.4$, dashed line to $\mu=1.7$, and dotted line to $\mu=0.1$.} \label{alpha4} \end{subfigure} \begin{subfigure}{.47\textwidth} \centering \includegraphics[width=1\linewidth]{V_a_2.pdf} \caption{The case $\alpha=2$ and $g=0.5$. Solid line corresponds to $\mu=\sqrt{2}g\approx 0.707$ (no-scale choice), dashed line to $\mu=0.6$, and dotted line to $\mu=0.8$.} \label{alpha2} \end{subfigure} \hspace{1em} \begin{subfigure}{.47\textwidth} \centering \includegraphics[width=1\linewidth]{V_a_3.pdf} \caption{The case $\alpha=3$ and $g=0.4$. Solid line corresponds to $\mu=3g/\sqrt{2}\approx 0.849$ (no-scale choice), dashed line to $\mu=0.6$, and dotted line to $\mu=1$.} \label{alpha3} \end{subfigure} \captionsetup{width=1\linewidth} \caption{Scalar potential $V(\phi)$, where $\phi$ is the canonical scalar, for $\alpha=1,2,3,4$ and different choices of the parameters $\mu$ and $g$.} \label{Fig} \end{figure} \section{Generalizations} Let us generalize $\alpha$, and recall the equation for critical points \eqref{zcrit}, \begin{equation} \alpha^2g^2(1-z^2)^\alpha+\mu^2(1-z^2)^2\left((\alpha-2)(\alpha-1)z^2-2\right)=0~.\label{zcrit2} \end{equation} It is convenient to introduce the notation \begin{gather} 1-z^2\equiv Y~,\nonumber\\ (\alpha-1)(\alpha-2)-2\equiv B_1~,\\ (\alpha-1)(\alpha-2)\equiv B_2~,\nonumber \end{gather} and rewrite Eq. \eqref{zcrit2} as \begin{equation} \alpha^2g^2Y^\alpha+\mu^2 B_1 Y^2-\mu^2 B_2 Y^3=0~.\label{zcrit3} \end{equation} The no-scale structure can arise when (a) $B_1$ (or $B_2$) vanishes and (b) the remaining powers of $Y$ coincide, namely $\alpha=3$ (or $\alpha=2$). Then, since $Y$ cannot vanish (because $Y=1-z^2$ and $z=\tanh(\phi/\sqrt{2\alpha})$), Eq. \eqref{zcrit3} reduces to a relation between the parameters $\mu$ and $g$, that, if satisfied, leads to flatness of the potential. $B_1$ vanishes for $\alpha=0,3$, while $B_2$ vanishes for $\alpha=1,2$. Thus, for $\alpha=2,3$ the both conditions (a) and (b) are satisfied, and no-scale potential can be obtained. For other values of $\alpha$ flatness of the potential cannot be achieved (as long as $\mu,g\neq 0$) because all three powers of $Y$ in Eq. \eqref{zcrit3} are present and distinct. However, SUSY may still be broken by fixed VEVs of $z$ (or $Y$) as in the cases $\alpha=1,4$ that we studied. In Figure \ref{Fig2} we include plots of scalar potentials with three critical points, obtained for $\alpha=5,6,7$ (Figure \ref{alpha567}) and also fractional values $\alpha=1/2,3/2,5/2$ (Figure \ref{alphafrac}). As can be seen, certain parameter values of $\mu$ and $g$ allow for double-well potentials (with tunable minimum $V_0$) in all the above cases except $\alpha=5/2$ where the two $z\neq 0$ critical points become maxima rather than minima, and the potential is unbounded from below. \begin{figure} \centering \begin{subfigure}{.46\textwidth} \centering \includegraphics[width=1\linewidth]{alpha_567.pdf} \caption{$\alpha=5$ (solid line), $\alpha=6$ (dashed line), and $\alpha=7$ (dotted line). The parameters values are $\mu=1$ and $g=0.1$ in all three cases.} \label{alpha567} \end{subfigure} \hspace{1em} \begin{subfigure}{.49\textwidth} \centering \includegraphics[width=1\linewidth]{alpha_frac.pdf} \caption{Solid line: $\alpha=1/2$, $\mu=0.4$, $g=0.4$. Dashed line: $\alpha=3/2$, $\mu=0.68$, $g=0.5$. Dotted line: $\alpha=5/2$, $\mu=0.6$, $g=0.4$.} \label{alphafrac} \end{subfigure} \captionsetup{width=1\linewidth} \caption{Scalar potential for $\alpha=5,6,7$ (a) and $\alpha=1/2,3/2,5/2$ (b).} \label{Fig2} \end{figure} As regards the generalization of $n$ in the superpotential \eqref{WZn}, it leads to the following equation for critical points, \begin{multline} \alpha^2g^2(1-z^2)^\alpha+\mu^2(1-z^2)^2z^{2n-4}[n(1-z^2)(n-1-z^2-nz^2)\\+\alpha z^2(2n-2-z^2-2nz^2)+\alpha^2z^4]=0~,\label{zcritn} \end{multline} that is a generalization of Eq. \eqref{zcrit2}. This introduces more diversity to the vacuum structure of the models. For example, taking $\alpha=1$ and $n=2$ we demonstrate in Figure \ref{Fig3} the case with five critical points (i.e. with Eq. \eqref{zcritn} having four real solutions with $0<z<1$). We fix $\mu=0.35$, and consider three values of $g$. When $g=0.171$ (solid line in Figure \ref{Fig3}) we have two maxima, one metastable minimum (false vacuum) at $z=\phi=0$ with preserved SUSY and $U(1)_R$, and two stable minima (true vacua) at $z\neq 0$ with broken SUSY and $U(1)_R$. In such scenario domain walls may form that divide the vacua with broken and unbroken SUSY and $U(1)_R$, depending on relative height of stable and metastable minima. The domain wall "bubbles" would be metastable and eventually decay~\footnote{This can leave stable domain walls that divide true vacua with $z=+|z_0|$ and $z=-|z_0|$.}, as the true vacuum with $z\neq 0$ is energetically favoured. For $g=0.19$ (dashed line in Figure \ref{Fig3}), on the other hand, the $z=0$ minimum becomes stable while $z\neq 0$ minima become metastable. In this case the decay of the domain walls would restore SUSY and R-symmetry. Finally, for $g=0.213$ (dotted line in Figure \ref{Fig3}) we have a single stable minimum at $z=0$, and two inflection points. When $g>0.213$ Eq. \eqref{zcritn} does not admit real solutions with $0<z<1$, so the $z\neq 0$ critical points disappear. \begin{figure} \centering \includegraphics[width=.5\linewidth]{n_2.pdf} \caption{Scalar potential for $\alpha=1$, $n=2$, and $\mu=0.35$. Solid line represents $g=0.171$, dashed line $g=0.19$, and dotted line $g=0.213$.} \label{Fig3} \end{figure} \section{Discussion and conclusion} We constructed new models of spontaneous supersymmetry and R-symmetry breaking, based on $N=1$ four-dimensional supergravity coupled to a chiral multiplet with $SU(1,1)/U(1)$ (Poincar\'e plane) target space. The crucial part of our construction is gauged $U(1)_R$ symmetry that acts linearly on the Poincar\'e disk variable $Z$. This allows for SUSY breaking in de Sitter vacuum for appropriate parameter ranges. More specifically, we considered the K\"ahler potential and superpotential \begin{equation} K=-\alpha\log(1-Z\overbar{Z})~,~~~W=\mu Z~,\label{KW_setup} \end{equation} with integer values of $\alpha$ motivated by string theory constructions. We found that when $\alpha=1,4$, SUSY and R-symmetry are spontaneously broken provided that $2\mu^2>g^2$ (if $\alpha=1$) and $\mu^2>8g^2$ (if $\alpha=4$). In both cases positive cosmological constant can be generated. For $\alpha=2$ and $\alpha=3$ the situation is different -- for the specific choices $\mu^2=2g^2$ and $2\mu^2=9g^2$, respectively, we have flat potentials with positive tunable height. Consequently, the VEV of $Z$ is classically undetermined (to be fixed by perturbative corrections), and the SUSY breaking scale is arbitrary (with some restrictions), i.e. these two cases are examples of de Sitter no-scale supergravity. We also demonstrated that other values of $\alpha$ (including fractional ones) may lead to spontaneous SUSY and R-symmetry breaking as well, but the no-scale structure remains unique to $\alpha=2,3$. We discussed the generalization of $n$ in the superpotential $W=\mu Z^n$, and showed that it can generate potentials with more that two local minima, which can lead to some interesting implications such as formation of metastable domain wall bubbles that can decay into true vacua with broken or unbroken supersymmetry and R-symmetry, depending on the values of $\mu$ and $g$. The tree-level spectrum of the models (after SUSY and R-symmetry breaking) consists of a massive vector, massive spin-1/2 field, and a massive real scalar (except for the no-scale cases where the potential is to be generated at one loop). The spin-1/2 field is a linear combination of the chiral fermion $\chi$ (superpartner of $Z$) and the gaugino $\lambda$, orthogonal to the goldstino. The $\chi$ and $\lambda$ have $U(1)_R$ charges $q(\chi)=q(\lambda)=1/2$, and therefore the pure model contains anomalies that must be cancelled after including the Supersymmetric Standard Model (SSM) and other possible fields. Also, the $U(1)_R$ gauge symmetry introduces a non-trivial task of assigning appropriate R-charges to the fields. For example, if the full superpotential is the sum $\mu Z+W_{\rm SSM}$, then the Standard Model R-charge assignments can be done along the lines of Ref. \cite{Chamseddine:1995gb}. Alternatively, $W_{\rm SSM}$ can be coupled to some power of $Z$ and thus carry different R-charge, or even be neutral. We also checked whether or not viable single-field (hilltop) inflation can be realized with the models where $\alpha=1$ and $\alpha=4$ (with $n=1$). Unfortunately, it does not seem to be possible because the curvature of the potential around its maximum is too large. To be specific, for $\alpha=1$ the slow-roll parameter $\eta_*$ is \begin{equation} \eta_*\equiv\frac{V''(\phi_*)}{V(\phi_*)}\approx -1~, \end{equation} taken at the initial value of $\phi$ which we assume to be $\phi_*\approx 0$ (close to the maximum of the potential). Meanwhile the parameter \begin{equation} \epsilon_*\equiv\frac{1}{2}\left(\frac{V'(\phi_*)}{V(\phi_*)}\right)^2~ \end{equation} can be made small if the initial value of $\phi$ is close enough to zero. This means that the spectral tilt $n_s=1+2\eta_*-6\epsilon_*$ takes the value $n_s\approx -1$ that is incompatible with CMB data, $n_s\approx 0.965$ (see e.g. PLANCK 2018 results \cite{Akrami:2018odb}). On the other hand, the $\alpha=4$ case predicts smaller value of $\eta_*$, namely $\eta_*\approx -0.5$, but the tilt becomes $n_s\approx 0$ which is still unsatisfactory.~\footnote{For values $\alpha=5,6,7$ the scalar $\phi$ cannot be identified with the inflaton, because requiring $V_0\sim 10^{-120}$ would imply unacceptably small inflationary (Hubble) scale of similar order as $V_0$, while for $\alpha=1/2,3/2$ the problem of large $\eta_*$ remains.} The situation is somewhat similar to the construction of Refs. \cite{Antoniadis:2017gjr,Antoniadis:2019dpm} where the K\"ahler potential is canonical (plus a quartic term), while the superpotential is linear due to the requirement of local R-symmetry. In this model viable hilltop inflation becomes possible only after including certain higher-order corrections to the K\"ahler potential. It is therefore of interest to continue the investigation of inflationary scenario in our models after including corrections/modifications to the K\"ahler potential, compatible with local R-symmetry. \section*{Acknowledgements} Y.A. was supported by the CUniverse research promotion project of Chulalongkorn University under the grant reference CUAASC, and the Ministry of Education and Science of the Republic of Kazakhstan under the grant reference BR05236322.
1,116,691,501,380
arxiv
\section{Introduction} A major breakthrough in understanding the nature of fast radio bursts \citep[FRBs,][]{2007Sci...318..777L, 2013Sci...341...53T} came when the repeater FRB121102 was precisely localized to be in a dwarf galaxy at redshift $z=0.193$ \citep{2014ApJ...790..101S, 2016Natur.531..202S, 2017Natur.541...58C, 2017ApJ...834L...7T, 2017ApJ...834L...8M, 2017ApJ...843L...8B}. Confirmation of the cosmological origin of FRBs means that they are highly energetic events seen in the radio band. The bursts from the repeater show a power-law distribution of isotropic equivalent luminosities $\mathrm{d} N/\mathrm{d} L\propto L^{-\beta}$ in the range from $\sim$$10^{40}$ to $\sim$$10^{43}\rm\,erg\,s^{-1}$ and $\beta\sim 1.7$ \citep{2016ApJ...833..177S, 2017ApJ...850...76L, 2017MNRAS.472.2800H, 2017ATel10693....1O}. The luminosity distribution of other so-far non-repeating FRBs is less certain due to poor localization and unknown distances. If the Milky-Way-subtracted dispersion measures (DMs) are dominated by the intergalactic medium, their isotropic equivalent luminosities range from $\sim$$10^{42.5}$ to $\sim$$10^{44.5}\rm\,erg\,s^{-1}$ \citep[see the FRB catalog by][]{2016PASA...33...45P}, with FRB160102 \citep{2018MNRAS.475.1427B} and FRB170107 \citep{2017ApJ...841L..12B} being the brightest ones detected so far. We note that these luminosities may not correspond to intrinsic values because (i) the reported peak fluxes in most cases are based on the assumption that the burst occurred at the beam center, (ii) many FRBs are temporarily broadened due to multi-path propagation \citep{2017arXiv171008026R}, and (iii) lensing by plasma structures in the host galaxy could further introduce magnification biases \citep{2017ApJ...842...35C}. Many models have been proposed to explain FRBs based on considerations of their event rate, duration and energetics. They generally fall into two categories \citep[see][for recent reviews of these models]{2016MPLA...3130013K, 2018arXiv180409092K}: emission from a relativistic outflow which dissipates its energy at large distances from the central compact object (a black hole or neutron star); emission from the magnetospheric plasma of a neutron stars (NS). The high brightness temperatures $T_{\rm b}\gtrsim 10^{35}\,$K of FRBs mean that the emission mechanism must be coherent. \citet{2018MNRAS.477.2470L} showed that models in the first category, i.e. an outflow undergoing internal dissipation or interacting with the surrounding medium, cannot reach typical FRB brightness temperatures before the waves lose energy by induced Compton scattering. On the other hand, if FRBs are produced within the magnetosphere of NSs, the emission process is most likely powered by the dissipation of magnetic energy, instead of rotational energy \citep{2017ApJ...838L..13L, 2017ApJ...841...14M}. The energy density of the FRB electromagnetic (EM) waves at radius $r$ from the source (in the limit $r\gg$ source size) is $U_{\rm EM} = L/(4\uppi r^2 c)$, where $L$ is the isotropic equivalent luminosity, and $c$ is the speed of light. The magnetospheric B-field configuration at radius $r\gg R_*$ ($R_*\approx 10\,$km being the NS radius) is largely dipolar $B(r) \simeq B_*(r/R_*)^{-3}$, where $B_*$ is the surface dipolar field. We require the energy density of the B-field $B^2/8\uppi$ to be higher than $U_{\rm EM}$ and obtain an upper limit for the radius of emission \begin{equation} \label{eq:1} r\lesssim (6.2\times10^{7}\mathrm{\,cm})\, B_{*,15}^{1/2} L_{\rm 45}^{-1/4}, \end{equation} where $B_{*,15} = B_*/10^{15}\,$G and we use the highest inferred isotropic equivalent luminosity of $L = 10^{45}L_{45}\rm\,erg\,s^{-1}$ as a fiducial value \citep{2017ApJ...841L..12B, 2018MNRAS.475.1427B}. If the EM waves are powered only by particles' kinetic energy, the number density needs to be extremely high $n\gtrsim (3\times10^{25}\mathrm{\,cm^{-3}})\,L_{45}r_7^{-2} \gamma^{-1}(m/m_{\rm e})^{-1}$, where $r_7 = r/10^7\,$cm, $\gamma$ is the mean Lorentz factor, and $m/m_{\rm e}$ is the rest mass of the particles divided by electron mass. For any reasonable Lorentz factor, this number density would make the source plasma extremely optically thick due to free-free and/or curvature absorption \citep{2017MNRAS.468.2726K, 2017arXiv170807507G} and radio waves cannot escape. To circumvent this problem, we assume that the FRB waves are emitted by the coherent curvature process when particles are continuously accelerated by a quasi-static E-field parallel to the local magnetospheric B-field, following \citet{2017MNRAS.468.2726K}. In this \textit{Letter}, we show that FRBs should have a maximum luminosity $L_{\rm max}$ because this parallel E-field must not exceed $\sim$5\% of the quantum critical field $E_{\rm c} = m_{\rm e}^2 c^3/(e\hbar)\simeq 4.4\times10^{13}\,$esu, where $m_{\rm e}$ and $e$ are the electron mass and charge, and $\hbar$ is the reduced Planck's constant. Since the strength of the parallel E-field depends on the location of the source plasma in the magnetosphere, we can use $L_{\rm max}$ to constrain the source properties. In \S2, we derive the upper limit of the parallel E-field and then calculate the maximum luminosity of FRBs. In \S3, we discuss the effects of the maximum luminosity on the observed flux distributions for repeating bursts from the same object and for the entire population of FRBs. In \S4, we discuss some caveats of our simplified picture. Our main conclusions are summarized in \S5. We use CGS units and the convention $Q = 10^nQ_n$. All luminosities are in the isotropic equivalent sense, unless otherwise explicitly stated. We use the {\it Planck} best-fit cosmology \citep{2016A&A...594A..13P}. \section{Luminosity Upper Limit due to Schwinger Pair Production} We consider the situation of a quasi-static and uniform E-field and B-field near the surface of a strongly magnetized NS, with $B\gg E$ and $\boldsymbol{E}\cdot\boldsymbol{B}/B\ll E_{\rm c}$. It is possible to find an inertial frame where the E-field is parallel to the B-field by applying a non-relativistic Lorentz transform (in the original $\boldsymbol{E}\times\boldsymbol{B}$ direction). In this new frame, the B-field strength is nearly unchanged and the E-field strength is given by $E_{\parallel}\simeq \boldsymbol{E}\cdot\boldsymbol{B}/B$. It is well known that, when $E_{\parallel}/E_{\rm c}\gtrsim 5\%$, the E-field will get quickly shielded by copious Schwinger pairs and most of the energy in the E-field gets converted into kinetic/rest-mass energy of pairs \citep{Sauter1931, HeisenbergEuler36, Schwinger51}. For completeness reason, we first re-derive the limiting E-field strength \citep[following][]{2015arXiv150506400S} and then discuss the implications on the maximum FRB luminosity. The volumetric rate of pair production is given by \citep[e.g.][]{2006PhRvD..73f5020K, 2010PhR...487....1R} \begin{equation} \label{eq:4} \Gamma \simeq \alpha B E_{\parallel}/(\uppi \hbar) \coth(\uppi B/E_{\parallel})\mathrm{exp}(-\uppi E_{\rm c}/E_{\parallel}), \end{equation} where $\alpha \simeq 1/137$ is the fine structure constant and $\coth(\uppi B/E_{\parallel})\simeq 1$ when $E_{\parallel}\ll \uppi B$. Since $\partial^2 E_{\parallel}/\partial t^2 = -4\uppi \partial J/\partial t \simeq -8\uppi ec\Gamma$ (where $J$ is the current density), the timescale over which the E-field is shielded is given by $\Delta t \simeq (\hbar/8ec\alpha B)^{1/2} \mathrm{exp}(\uppi E_{\rm c}/2E_{\parallel})$. When $E_{\parallel}\ll E_{\rm c}$, this timescale is an extremely sensitive function of $E_{\parallel}$, and the limiting E-field is \begin{equation} \label{eq:3} E_{\parallel,\rm lim} \simeq {\uppi E_{\rm c}\over \mathrm{ln}(8ec\alpha B\Delta t^2/\hbar)} \simeq {2.5\times10^{12}\mathrm{\,esu} \over 1 + 0.018\mathrm{ln}(\Delta t_{-9}^2 B_{15})}, \end{equation} where $\Delta t_{-9} = \Delta t/1\,$ns and $B_{15} = B/10^{15}\,$G. We can see that the parallel E-field is quickly shielded on sub-ns timescale when the parallel E-field exceeds $2.5\times10^{12}\rm\,esu$. In the following, we use simple arguments based on energy conservation and source coherence to show that the strength of the parallel E-field is directly related to the FRB luminosity. To generate waves of frequency $\nu$, the maximum source length in the longitudinal direction is $\sim$$\lambdabar \equiv c/(2\uppi \nu)$ in the NS rest-frame. Consider a source of longitudinal size $\lambdabar$ and transverse size $\ell_\perp$, and moving along the local magnetospheric B-field towards the observer at a Lorentz factor $\gamma$ in the NS rest-frame. The local curvature radius of the B-field line is denoted as $\rho$. For a fixed line of sight, the radiation formation length in the NS rest-frame is $\rho/\gamma$, which corresponds to radiation formation time of $\rho/(\gamma^2c)$ in the comoving frame of the source. During this time, the EM fields or the influence by one particle on another travels a distance of $\rho/\gamma^2$ in the comoving frame, so the transverse size of the source (which is the same in the comoving frame and NS rest-frame) is limited by \begin{equation} \label{eq:9} \ell_\perp\lesssim \rho/\gamma^2. \end{equation} The emitting power of the source in the NS rest-frame is a factor of $\sim$$\gamma^{-4}$ smaller\footnote{A factor of $\gamma^{-2}$ comes from relativistic beaming, and another factor of $\gamma^{-2}$ is because the difference between the speeds of photons and emitting particles is $\sim c/\gamma^2$ in the limit $\gamma\gg 1$.} than the isotropic equivalent luminosity $L$ seen by the observer. This emitting power is supplied by $N\sim n\lambdabar\ell_\perp^2$ particles in the coherent volume, where $n$ is the number density of radiating particles in the NS rest-frame. From energy conservation, the emitting power of each particle in the NS rest-frame is given by $E_\parallel e c$. Thus, we obtain \begin{equation} \label{eq:10} \gamma^{-4}L \sim n\lambdabar\ell_\perp^2 E_\parallel e c,\ \mathrm{or}\ L\sim (ne\lambdabar) (\ell_\perp\gamma^2)^{2} E_\parallel c. \end{equation} Since all \textit{radiating} particles in the coherent volume are of the same charge sign (we ignore other background particles that do not contribute to the observed FRB waves), we require that their Coulomb field does not exceed and shield the parallel E-field --- the source of energy, i.e., \begin{equation} \label{eq:11} ne\lambdabar\lesssim E_\parallel. \end{equation} We insert inequalities (\ref{eq:9}) and (\ref{eq:11}) into eq. (\ref{eq:10}) and obtain \begin{equation} \label{eq:12} L\lesssim E_\parallel^2\rho^2 c. \end{equation} Using the upper limit of the parallel E-field $E_{\rm lim}$, we obtain the maximum isotropic equivalent luminosity of an FRB \begin{equation} \label{eq:5} L< L_{\rm max,1}\sim (2\times10^{47}\mathrm{\,erg\,s^{-1}})\, \rho_6^2. \end{equation} We note that above maximum luminosity has no dependence on the Lorentz factor of the emitting particles. Below, we show that there is another Lorentz-factor-dependent maximum luminosity. We assume that the emitting particles move close to the speed of light ($\gamma\gg 1$) along the magnetospheric B-field, and hence there is a current density $nec$ parallel to the B-field. This current induces a transverse magnetic field $B_{\rm ind}\sim ne \ell_\perp$, which must not perturb (or twist) the original B-field by more than a fraction of $\gamma^{-1}$ (the beaming angle): \begin{equation} \label{eq:13} ne \ell_\perp\lesssim B/\gamma. \end{equation} We insert the above inequality into eq. (\ref{eq:10}) and obtain \begin{equation} \label{eq:14} L\lesssim BE_\parallel \gamma\lambdabar (\ell_\perp\gamma^2)c\lesssim BE_\parallel\gamma\lambdabar\rho c, \end{equation} where eq. (\ref{eq:9}) has been used in the second step. In the coherent curvature emission model, the radiation formation length is $\rho/\gamma\simeq \gamma^2\lambdabar$ in the NS rest-frame, so we obtain the typical Lorentz factor of emitting particles to be $\gamma\simeq (\rho/\lambdabar)^{1/3}$. We plug this Lorentz factor into eq. (\ref{eq:14}) and make use of $E_\parallel<E_{\rm \parallel,lim} = 2.5\times10^{12}\,$esu, and then obtain \begin{equation} \label{eq:15} L\lesssim L_{\rm max,2} \sim (2\times10^{46}\mathrm{\,erg\,s^{-1}})\, B_{15} \rho_6^{4/3}\nu_9^{-2/3}, \end{equation} where $B_{15} = B/10^{15}\,$G is the B-field strength in the source region and $\nu_9 = \nu/$GHz. The strongest B-fields of NSs are believed to be produced due to amplification by the $\alpha${--}$\Omega$ dynamo and may reach $\mathrm{a\ few}\times 10^{17}\,$G, limited by the energy budget of the differential rotation \citep{1993ApJ...408..194T}. Thus, the inequality (\ref{eq:15}) may be weaker than (\ref{eq:5}). Nevertheless, we combine these two conditions and obtain \begin{equation} \label{eq:16} L\lesssim L_{\rm max} \sim (2\times10^{47}\mathrm{\,erg\,s^{-1}})\, \mathrm{min}(\rho_6^2, B_{16}\rho_6^{4/3}\nu_9^{-2/3}). \end{equation} \section{Observations} In this section, we discuss the effects of $L_{\rm max}$ on the observed flux distributions for repeating bursts from the same object and for the entire population of FRBs. \subsection{Repeating Bursts from the Same Object} The luminosity function for the repeater FRB121102 is a power-law with $\beta=1.7^{+0.3}_{-0.5}$ \citep{2017ApJ...850...76L} extending from $\sim$$10^{40}$ to $\sim$$10^{43}\rm\,erg\,s^{-1}$, with (so-far) the brightest one having peak flux $S=24\pm7\,$Jy \citep{2017ATel10693....1O}. One possible scenario is that the bursts are produced near the surface of a NS where $\rho\sim 10\,$km and that the B-field strength near the source is $\sim$$10^{15}\,$G (typical dipole surface field strength inferred from Galactic magnetars). In this case, we have $L_{\rm max}\sim 10^{46}\rm\,erg\,s^{-1}$ and the observed flux distribution should have a cut-off at $\sim$$10^4\,$Jy. Note that, if the bursts are produced far-away from the NS surface (but within the light cylinder), then the B-field strength in the source region is much weaker than that near the surface, and hence the cut-off should show up at a lower flux level $\ll 10^{4}\,$Jy. In the future, we may detect more repeaters, and then an interesting possibility is that each repeating source may have different B-field strength and curvature radius and hence different $L_{\rm max}$. We can see that the cut-off luminosity $L_{\rm max}$ provides a powerful probe of the emission location and the B-field strength near the source. \subsection{The Entire FRB Population} To show the observational effects of $L_{\rm max}$ for the entire FRB population, we assume a global power-law luminosity function in the form \begin{equation} \label{eq:6} {\mathrm{d} \dot{N}\over \mathrm{d} L} = \Phi(z) (\beta -1) L_{\rm 0}^{\beta-1} L^{-\beta}, \end{equation} where $L_{\rm 0}$ is a (fixed) reference luminosity, $\beta$ is the power-law index, and $\Phi(z)$ is the normalization including the cosmological evolution. We do not assume that all FRBs repeat the same way as FRB121102, so the global power-law index may not be the same as the repeater. Effectively, we treat each repetition as a separate FRB originated from the same redshift. We leave $\beta$ as a free parameter between 1 and 2.5. The lower and upper limits are motivated by the observations that brighter bursts are rarer than dimmer ones and that the DM distribution of known FRBs is not concentrated near the lowest end. For simplicity, we also assume that FRBs (on average) have a flat spectrum near $\sim$GHz frequencies \citep{2018arXiv180404101G}, otherwise a receiver operating at a certain frequency band will observe different parts of the intrinsic spectrum for sources at different redshifts. This complication can be effectively included in the $\Phi(z)$ factor and does not significantly affect our calculations below. In the ideal case of no propagation effects such as scattering broadening, plasma lensing, absorption and gravitational lensing (these complications will be discussed in \S 4), the flux distribution of the observed bursts is \begin{equation} \label{eq:7} \begin{split} \dot{N}_{\rm det}(>S) =& \int_0^{z_{\rm max}} {\mathrm{d} z\over 1+z} {\mathrm{d} V \over \mathrm{d} z} \int_{4\uppi D_{\rm L}^2 S}^{L_{\rm max}} {\mathrm{d} \dot{N}\over \mathrm{d} L} \mathrm{d} L \\ = &\int_0^{z_{\rm max}} {\mathrm{d} z\over 1+z} {\mathrm{d} V \over \mathrm{d} z} \Phi(z)\, \mathrm{max}\biggl[0, \\ &\left({4\uppi D_{\rm L}^2 S\over L_{\rm 0}}\right)^{1-\beta} - \left({L_{\rm max}\over L_{\rm 0} }\right)^{1-\beta} \biggr], \end{split} \end{equation} where $z_{\rm max}$ is the maximum redshift at which FRBs can be produced, $\mathrm{d} V/\mathrm{d} z$ is the differential comoving volume within the field of view for a certain telescope, $D_{\rm L}(z)$ is the luminosity distance for redshift $z$, and $L_{\rm max}$ is the maximum isotropic equivalent luminosity of FRBs as given by eq. (\ref{eq:16}). In the limit $L_{\rm max}\rightarrow \infty$, the $(L_{\rm max}/L_0)^{1-\beta}$ term in eq. (\ref{eq:7}) vanishes, so we obtain a power-law flux distribution $\dot{N}_{\rm det}(>S)\propto S^{1-\beta}$, independent of the cosmological evolution of FRB rate $\Phi(z)$. This is because the redshift distribution of bursts in each flux bin $[S, S+\mathrm{d} S]$ is independent of $S$. The flux distribution of the observed FRBs from Parkes telescope is consistent with a single power-law but the power-law index is not well constrained \citep[see the discussions by][]{2016ApJ...830...75V, 2018MNRAS.475.1427B, 2018MNRAS.474.1900M}, due to the lack of a homogeneous sample with sufficient number of bursts. For the luminosity function in eq. (\ref{eq:6}), since $\beta > 1$, bursts near the cut-off luminosity are very rare and the only way to increase their detection rate is to use telescopes with larger field of views. The critical flux at which the two terms on the RHS of eq. (\ref{eq:7}) equal is given by \begin{equation} \label{eq:8} S_{\rm c} = \left[ {\int_0^{z_{\rm max}} {\mathrm{d} z\over 1+z} {\mathrm{d} V \over \mathrm{d} z} \Phi(z) ({4\uppi D_{\rm L}^2})^{1-\beta} \over \int_0^{z_{\rm max}} {\mathrm{d} z\over 1+z} {\mathrm{d} V \over \mathrm{d} z} \Phi(z) }\right]^{{1\over\beta-1}} L_{\rm max}, \end{equation} which is linearly proportional to $L_{\rm max}$ and depends on the power-law index $\beta$, the cosmological rate evolution $\Phi(z)$, and the maximum redshift $z_{\rm max}$. At flux levels much below $S_{\rm c}$, the flux source count is a power-law $\dot{N}_{\rm det}(>S) \propto S^{1-\beta}$, but above this flux level, the deficit of FRBs with $L\gtrsim L_{\rm max}$ will be seen as a break in the observed flux distribution. From eq. (\ref{eq:7}), one can show that the distribution at $S\gg S_{\rm c}$ approaches $N(>S)\propto S^{-1.5}$ (Euclidean), since bursts with $L\sim L_{\rm max}$ from the nearby Universe will dominate. In Fig. \ref{fig:Fcutoff}, we show the critical flux level $S_{\rm c}$ as a function of the power-law index for four different cases: (i) FRB rate $\Phi(z)$ either tracks the cosmic star-formation history or is non-evolving throughout the history; (ii) the maximum redshift $z_{\rm max}$ is either 2 or 6. The choices of $z_{\rm max}$ is motivated by the highest redshift of $z\sim 2$ inferred from the DM of FRB 160102 \citep{2018MNRAS.475.1427B}. We find that the value of $S_{\rm c}$ has a weak dependence on the cosmic evolution of FRB rate and that the dependence on the power-law index $\beta$ is also fairly mild (varying by about one order of magnitude). \begin{figure} \centering \includegraphics[width = 0.48 \textwidth, height=0.23\textheight]{Fcutoff.pdf} \caption{The critical flux $S_{\rm c}$ (eq. \ref{eq:8}) as a function of the power-law index of the global luminosity function, for four different cases. For the red (solid) and blue (long-dashed) curves, we assume that the FRB rate tracks the cosmic star-formation history (SFH) given by \citet{2014ARA&A..52..415M}. For the yellow (dotted) and green (short-dashed) curves, we assume a non-evolving (FLAT) FRB rate history. Two choices of maximum redshifts are shown $z_{\rm max} = 2$ and 6. }\label{fig:Fcutoff} \end{figure} Therefore, for a power-law global luminosity function, we predict the cumulative flux distribution to be $\dot{N}_{\rm det}(>S)\propto S^{1-\beta}$ below the flux level $S_{\rm c}\sim(10^3${--}$10^4)L_{\rm max,47}\,$Jy and $\dot{N}_{\rm det}(>S)\propto S^{-1.5}$ at $S\gg S_{\rm c}$. The deficit of high flux FRBs should be noticeable with sufficiently large number of detections near and above $S\sim S_{\rm c}$. The cut-off luminosity $L_{\rm max}$ can be inferred from the critical flux $S_{\rm c}$ via eq. (\ref{eq:8}). Unfortunately, the expected all-sky rate of FRBs near $S_{\rm c}$ is highly uncertain, mainly because the power-law index $\beta$ is only weakly constrained by current data. From the Parkes FRB sample, \citet{2018MNRAS.475.1427B} inferred a rate\footnote{We note that the reported fluxes in their sample are based on the assumption that the bursts occurred within the half-power width of the discovery beam. It was later realized that, at discovery, FRB121102 (the repeater) was in a side lobe where the sensitivity is $\sim$10\% of that at the beam center \citep{2014ApJ...790..101S, 2017Natur.541...58C}. Thus, the locations of some Parkes bursts may also be in the side lobes and hence their true fluxes are higher than those reported. Since the effective field of view (including side lobes) is larger, this will give a lower all-sky rate $\dot{N}_{\rm det}(\gtrsim S_{\rm th, eff})$ above a higher effective completeness threshold flux $S_{\rm th,eff}$. } of $\dot{N}_{\rm det}(\gtrsim S_{\rm th}) \sim 2\times10^{3} \rm\,sky^{-1}\,d^{-1}$ above the completeness threshold flux $S_{\rm th}\sim 3\mathrm{\,Jy\,GHz}$. Taking their rate at face value, we expect the all-sky rate near the flux level $S_{\rm c}\sim3\times10^3L_{\rm max,47}\rm\,Jy\,GHz$ to be $\dot{N}_{\rm exp}(\gtrsim S_{\rm c})\sim 2\times10^{3(2-\beta)}L_{\rm max,47}^{1-\beta}\rm\,sky^{-1}\,d^{-1}$. For $L_{\rm max} = 10^{47}\rm\,erg\,s^{-1}$ and $\beta = 1.7$ (or 2.3), the product of solid angle and observing time per FRB detection with $S\sim S_{\rm c}$ is $\sim$$20\rm\,sr\cdot hr$ (or $1.2\times10^3\rm\, sr\cdot hr$). \section{Discussion} In this section, we discuss some caveats of our simplified picture. As more data accumulates, they may become important issues to look at in detail in future works. (1) The signal-to-noise ratio of an FRB is determined by a combination of flux $S$ and duration $\tau$ as $\mathrm{SNR}\propto S\sqrt{\tau}$ \citep{2015MNRAS.447.2852K}. In eq. (\ref{eq:7}), the luminosity at the detection threshold $4\uppi D_{\rm L}^2 S$ (for a given redshift and flux) should include an additional factor $\propto \tau^{-1/2}$ and then we integrate over the intrinsic distribution of burst durations. We can see that the shape of the flux distribution function $\mathrm{d} \dot{N}_{\rm det}/\mathrm{d} S$ stays the same, as long as the intrinsic distribution of burst durations is not correlated with their luminosities (such a correlation has not been found in the literature). (2) The observed flux $S_{\rm obs}$ may be different from the intrinsic/unattenuated flux $S = L/4\uppi D_{\rm L}^2$ for a given redshift and luminosity. When there is significant scattering broadening, intra-channel dispersion smearing, absorption, insufficient time resolution, or when the location of the burst is far away from the center of the discovery beam, we have $S_{\rm obs}<S$. On the other hand, magnification bias due to lensing of FRBs by plasma structures in the host galaxies \citep{2017ApJ...842...35C} may lead to $S_{\rm obs}>S$ for a fraction of the observed bursts. Thus, the critical flux above which the luminosity function cut-off is noticeable in the source count may be different than the unattenuated flux $S_{\rm c}$ in eq. (\ref{eq:8}). These effects make it harder to infer the maximum luminosity $L_{\rm max}$ from observations, but the existence of a cut-off in the luminosity function can still be tested. (3) We have assumed the luminosity function to be a single power-law with a cut-off at $L_{\rm max}$ and the power-law index to be in the range (1, 2.5). For other luminosity function models, eqs. (\ref{eq:7}) and (\ref{eq:8}) are generally valid. For instance, an alternative luminosity function is a broken power-law and in this case $\beta\geq2.5$ is allowed on the high-luminosity end (as is the case of long gamma-ray bursts). Another possibility is that there is another cut-off at the low luminosity end. In these cases, it is straightforward to solve eq. (\ref{eq:8}) for the critical flux $S_{\rm c}$ (which may be significantly different from that shown in Fig. \ref{fig:Fcutoff}) and determine where the deficit of high-luminosity FRBs above $L_{\rm max}$ will show up in the observed flux distribution $\mathrm{d}\dot{N}_{\rm det}/\mathrm{d} S$. (4) The observed flux distribution suffers from magnification bias due to strong gravitational lensing by intervening galaxies. For FRBs at $z\sim2$ (near the peak of the cosmic star-formation history), the optical depth for large magnification $\mu\gg 1$ is roughly $P(>\mu)\sim 10^{-3}\mu^{-2}$ \citep[e.g.][]{2011ApJ...742...15T}, which should be multiplied by the luminosity function $\mathrm{d}\dot{N}/\mathrm{d} \mathrm{ln}L\propto L^{1-\beta}$ to calculate the contribution to the source count at a given flux. If $\beta < 3$, then the majority of the lensed sources with apparent luminosity $\gg L_{\rm max}$ come from those sources with intrinsic luminosity $L\sim L_{\rm max}$ \citep{1992ARA&A..30..311B}. Thus, the observed flux distribution of lensed (L) FRBs should be $\dot{N}_{\rm det, L}(>S)\propto S^{-2}$ above the critical flux $S_{\rm c}$, which is steeper than $\dot{N}_{\rm det, NL}(>S)\propto S^{-1.5}$ for unlensed (NL) FRBs at $S\gg S_{\rm c}$. Therefore, the unlensed population always dominate at all flux levels and magnification bias should not a serious problem for constraining the cut-off luminosity $L_{\rm max}$. \section{Summary} In this \textit{Letter}, we provide a novel way to test the model that FRBs are from coherent curvature emission powered by the dissipation of magnetic energy in the magnetosphere of NSs. In this model, the emitting particles are continuously accelerated by a quasi-static E-field parallel to the local B-field. We use simple arguments based on energy conservation and source coherence to show that the isotropic equivalent luminosity of an FRB is directly related to the parallel E-field strength. When this parallel E-field exceeds about 5\% of the quantum critical field strength, it is quickly shielded by Schwinger pairs on sub-ns timescales (and hence the FRB emission cannot be sustained). Based on this limiting E-field, we show that there is a maximum isotropic equivalent luminosity of $L_{\rm max}\sim (2\times 10^{47}\, \mathrm{erg\,s^{-1}})\, \mathrm{min}(\rho_6^2, B_{16} \rho_6^{4/3}\nu_9^{-2/3})$, where $\rho$ is the curvature radius of the magnetic field lines near the source region. Future observations can measure $L_{\rm max}$ and hence probe the source location and B-field strength. For the repeater FRB121102, this cut-off luminosity corresponds to a maximum flux of $S_{\rm max} = L_{\rm max}/4\uppi D_{\rm L}^2\sim 10^5L_{\rm max,47}\,$Jy. Each repeating source may have a different $L_{\rm max}$ from the others, depending on the source location and B-field strength. We encourage monitoring the repeater for an extended amount of time with a low-sensitivity telescope. If the entire population of FRBs has a global luminosity function, then the cut-off luminosity $L_{\rm max}$ should be observable as a deficit of high-flux FRBs in the the observed flux distribution. Taking the simplest case of a power-law luminosity function $\mathrm{d} N/\mathrm{d} L\propto L^{-\beta}$ as an example, we show that there is a critical flux $S_{\rm c}\sim (10^3${--}$10^4)L_{\rm max,47}\,$Jy, below and above which the cumulative flux distribution will be $\dot{N}_{\rm det}(>S)\propto S^{1-\beta}$ (for $S\ll S_{\rm c}$) and $\dot{N}_{\rm det}(>S)\propto S^{-1.5}$ (for $S\gg S_{\rm c}$). Bright FRBs near or above the critical flux $S_{\rm c}$ have a much lower all-sky rate than those currently detected. Extrapolating the rate of Jy-level FRBs to higher fluxes and assuming $L_{\rm max} = 10^{47}\rm\,erg\,s^{-1}$, we estimate the detection rate of bright FRBs near $S_{\rm c}$ by ASKAP \citep[sky coverage $\Omega/4\uppi\sim 4\times10^{-3}$ at 0.7-1.8 GHz,][]{2017ApJ...841L..12B} to be $0.06\rm\,d^{-1}$ for $\beta = 1.7$ and $0.001\rm\,d^{-1}$ for $\beta = 2.3$. The rate for CHIME \citep[sky coverage $\Omega/4\uppi\sim 7\times10^{-3}$ at 400{--}800 MHz,][]{2018arXiv180311235T} may be slightly higher. We encourage searching for ultra-bright FRBs by low-sensitivity telescopes with large field of views. \section{acknowledgments} We thank Vikram Ravi for useful discussions. We also thank the referee for comments which improved the clarity of the presentation. This research benefited from interactions at the ZTF Theory Network Meeting, funded by the Gordon and Betty Moore Foundation through Grant GBMF5076. W.L. was supported by the David and Ellen Lee Fellowship at Caltech. \bibliographystyle{mnras}
1,116,691,501,381
arxiv
\section{Introduction} Object proposal generation is a common pre-processing step for object detection in images, which is a key challenge in computer vision. Object proposals dramatically decrease the number of detection hypotheses to be assessed. Thus, use of CNN-features \cite{girshick14CVPR}, which is more effective but computationally expensive, have turned out to be feasible for accurate detection. For the detection of video objects, proposals not only need to consider the space-time complexity, but also need to address the temporal consistency. We propose generating Video Object Proposals (VOP) by scoring candidate windows based on spatio-temporal edge content and show that these VOPs help in learning better video object detectors. Further, we propose an efficient online clustering of these proposals in order to process arbitrary long videos. \begin{figure} \begin{center} \includegraphics[scale=0.45]{OVERLAP.png} \end{center} \setlength{\abovecaptionskip}{12pt} \caption{OVERLAP: Object class labels get propagated by streaming clustering of temporally consistent proposals. The system classifies only those VOPs which belong to a new cluster.} \label{OVERLAP framework} \vspace{-2.5mm} \end{figure} We show that the joint-analysis of all such windows provides a way towards multiple object segmentations and helps in reducing test time object detection complexity. We divide a video into sub-sequences with one-frame overlap in a streaming fashion \cite{XuXiCoECCV2012, StreamGBHppST14}. We analyze all candidate windows jointly within a sub-sequence followed by affinity-based clustering to produce temporally consistent clusters of object proposals at every video frame. The advantage of performing a streaming spatio-temporal clustering on the object proposals is that it enables an easy label propagation through the video in an online framework. Presumably all object proposals of a cluster have the same object class type. We propose deep-learning based video object detection through objects' class label propagation using online clustering of VOPs. As opposed to applying R-CNN \cite{girshick14CVPR}-like approaches, which essentially classify every window based on the expensive CNN features, at every video frame, the proposed method of label propagation requires detection/classification only on video frames which has new clusters (fig. \ref{OVERLAP framework}) . Our main contributions are as follows: \vspace{-2.5mm} \begin{itemize} \item We present a simple yet effective Video Object Proposal (VOP) method for detecting moving and static video objects by quantifying the spatio-temporal edge contents; \vspace{-2.5mm} \item We present a \textbf{novel algorithm}, ``Objects in Video Enabler thRough LAbel Propagation'' (OVERLAP), that exploits objects' class label propagation through streaming clustering of VOPs to efficiently detect objects in video with temporal consistency. \end{itemize} \vspace{-2.5mm} We also present the following minor contributions: \vspace{-2.5mm} \begin{itemize} \item Demonstrating VOP's efficacy in learning a better CNN-based video object detector model; \vspace{-2.5mm} \item Object segmentation as a by-product of object detection framework, OVERLAP. \end{itemize} \section{Related Works} There are several approaches towards video object detection which broadly fall into three categories : (1) image object proposals for each frame (2) motion segmentation in video (3) supervoxel aggregation. \textbf{Object Proposals and detection:} Unsupervised category-independent detection proposals are evidently shown to be effective for object detection in images. Some of these methods are Objectness \cite{Objectness2010CVPR}, category-independent object proposals \cite{Endres10ECCV}, SelectiveSearch \cite{SelectiveSearch2013IJCV}, MCG \cite{APBMM2014}, GOP \cite{GOP2014}, BING \cite{BingObj2014}, EdgeBoxes \cite{Dollar2014ECCV}. A comparative literature survey on object proposal methods and their evaluations can be found in \cite{Hosang2014Bmvc,Hosang2015pami}. Although there is no ``best'' detection proposal method, EdgeBoxes, which scores windows based on edge content, achieve better balance between recall and repeatabilty. Applying image object proposals directly for each frame in video may be problematic due to time complexity and temporal consistency. In addition, issues like motion blur and compression artifacts can pose significant obstacles to identifying spatial contours, which degrades the object proposal qualities. Recent advances like SPPnet \cite{SPP_NET_HeZR014}, Fast R-CNN \cite{fast_RCNN_15}, and Faster R-CNN \cite{Faster_RCNN_RenHG015} have dramatically reduced the running time by computing deep features for all image locations at the same time and snapping them on appropriate proposal boxes. Per-frame object detection still needs classification of proposal windows and temporal consistency still remains a challenge. The proposed framework dispenses with the need of classifying every candidate window of every video frame through spatio-temporal clustering, thus addressing temporal consistency. \textbf{Motion Segmentation in Video:} Motion based segmentation is the task of separating moving foreground objects from the background. Several popular methods of motion segmentation include the layered Directed Acyclic Graph (DAG) based framework \cite{Zhang13CVPR}, Maximal weight cliques \cite{Ma12CVPR}, fast motion segmentation \cite{Papazoglou13ICCV}, tracking many segments \cite{Li_2013_ICCV}, identifying key segments \cite{key_segments_Lee11}, and many more. Although video motion segmentation can detect moving foregrounds robustly, it is not easy to detect multiple objects or if objects suddenly stop or change motion abruptly. \textbf{Supervoxel aggregation:} Spatio-temporal object proposals have been considered in the context of aggregating supervoxels with spatio-temporal connectivity between neighboring labels. Jain {\em et al} \cite{Jain14CVPR} developed an extension of the hierarchical clustering method of SelectiveSearch \cite{SelectiveSearch2013IJCV} to obtain object proposals in video. Even though their independent motion evidence effectively segment objects with motions from the background, static objects can not be recovered. Oneata {\em et al.} \cite{SpTmp-Obj_prop14} presented spatio-temporal object proposals by a randomized supervoxel merging process. Sharir {\em et al.} \cite{VidObjProp12} proposed the extension of category-independent object proposals \cite{Endres10ECCV} from image to video by extracting object proposals at each frame and linking across frames into object hypotheses in a framework of graph-based segmentation using higher-order potentials leading to a high computational expense. All these supervoxels often cannot replace the object proposal step for object detection either due to its complexity or the associated over-segmentations. \section{Video Object Proposals} We extend EdgeBoxes \cite{Dollar2014ECCV} from generating image object proposals to video object proposals. In addition to the spatial edge responses, $\mathbf{E_s}$, at every pixel in EdgeBoxes \cite{Dollar2014ECCV}, we consider exploiting temporal edge responses, $\mathbf{E_t}$, at every pixel location using mid-range optical flow analysis. \vspace{-2.5mm} \begin{equation} \label{edge_define} \mathbf{E_t}, \mathbf{E_t} \in \mathbb{R}^{M\times N}_{\geq 0} \end{equation} $M$ and $N$ are the height and width of an image. \subsection{Spatio-Temporal Contours and VOP} \label{spatio-temp-contours} Optical flow field between pairs of consecutive frames provide approximate motion contours. We perform 2-frame forward optical flow \cite{TBroxOF11} for every consecutive frame-pair. At every pixel, the magnitude of the flow field's gradient and the difference in direction of motion from its neighbor \cite{Papazoglou13ICCV}, contribute to the measure of the motion contour. To address incompleteness and inaccuracies of two-frame optical flow estimation, we analyze mid-range optical flow over a subset of video frames. Within a sub-sequence, we approximate which pixels consistently reside inside a moving object using inside-outside maps \cite{Papazoglou13ICCV}. In our experiments, the inside-outside maps, accumulated over 3 - 5 frames, provide good estimates of time-consistent gross location priors for moving objects. In Section \ref{Streaming_Clustering}, We describe how this mid-range accumulation can effectively be exploited for object label propagation through streaming clustering. A simple edge detector on this location prior is called temporal edge, $\mathbf{E_t}$. \begin{figure} \begin{center} \includegraphics[scale=0.42]{spatio-temporal-edgebox-1} \includegraphics[scale=0.42]{spatio-temporal-edgebox-2} \end{center} \caption{Spatio-Temporal EdgeBoxes. Clock-wise from top-left - (i) A video frame from Youtube-Objects dataset, (ii) Temporal edge $\mathbf{E}_t$ \ie normalized gradient of location prior from motion analysis, (iii) Spatial edge $\mathbf{E}_s$ \ie structured edge detection on video frame, (iv) linear combination of spatial and temporal edge $\mathbf{E}$ brings out the prominent contours of both static and/or moving objects. } \label{fig:spatio-temporal-edgebox} \end{figure} EdgeBoxes \cite{Dollar2014ECCV} employs efficient data structures to score millions of candidates based on the difference of number of spatial contours that exist in the box and those that straddle box's boundary. We use a similar scoring strategy, but on spatio-temporal edge $\mathbf{E} \in \mathbb{R}^{M\times N}_{\geq 0}$ which is formed according to Eq \ref{spatio-temp-edge}. \vspace{-2.5mm} \begin{equation} \label{spatio-temp-edge} \mathbf{E = \lambda E_t + (1-\lambda )E_s} , \mathbf{\lambda} \in [0,1] \end{equation} As the value of $\lambda$ increases, the system favors detecting only moving objects. One example of spatio-temporal edge is demonstrated in figure \ref{fig:spatio-temporal-edgebox}. A linear combination of spatial and temporal edge responses represents spatio-temporal contours. This enables a simple yet efficient strategy for scoring based on spatio-temporal edge content through edge groups. We find intersecting edge groups along horizontal and vertical boundaries using two efficient data structures\cite{Dollar2014ECCV}. We use the integral image-based implementation to speed up scoring of boxes in sliding window fashion. As described in the next section, object proposals based on these spatial-temporal contours outperforms those based on only spatial contours for video object detection. The presence of motion blur in spatial edges affects the performance of spatial contour based proposal prediction in video frames. In practice, $\lambda=0.2$ to $0.5$ works well for Youtube-Objects dataset. \section{Learning Video Object Detector Model} We aim for detecting objects in generic consumer videos. Due to the domain shift issues between images and video frames \cite{KalogeitonFS15}, our 10-class video object detection uses supervised pre-training from ImageNet reference model for classification and fine-tuning on annotated frames from Youtube-Objects dataset v2.0 \cite{youtube-Objects, KalogeitonFS15, ObjClsDet2012} for video objects detection. \textbf{Youtube-Objects dataset.} The dataset is composed of videos collected from Youtube by querying for the names of 10 object classes of the PASCAL VOC Challenge. It contains 155 videos in total and between 9 and 24 videos for each class. The duration of each video varies between 30 seconds and 3 minutes. However, only $6087$ frames are annotated with a bounding-box around an object instance. Hence, the number of annotated samples is approximately 4 times smaller than in PASCAL VOC. The bottom-up region proposal methods play an important role. Motion blur and compression artifacts affects the quality of spatial edges in video frames, thus, generating good object proposals becomes more challenging. This is to be noted that R-CNN \cite{girshick14CVPR}, or, Fast R-CNN \cite{fast_RCNN_15} are fine-tuned for image object detection task, especially for 20-class PASCAL VOC image object categories which is a superset of Youtube-objects categories\textit{}. \textbf{Feature extraction.} We extract a 4096-dimensional feature vector corresponding to each region proposal using GPU-based (GeForce GTX 680) the Caffe \cite{Caffe13} implementation of the CNN described by Krizhevsky \etal \cite{AlexNet12}. Features are computed by forward propagating a mean-subtracted 227 $\times$ 227 R-G-B image through five convolutional layers and two fully connected layers. \textbf{Region Proposals.} We use approximately $2000$ candidate proposals per video frame to be processed for learning detectors. We investigate the object detection model with different region proposal methods such as selective search\cite{SelectiveSearch2013IJCV}, EdgeBoxes\cite{Dollar2014ECCV}. As the resolution of different videos varies from VGA to HD, we re-size every video frame to $500\times 500$ before performing proposal generation task. \textbf{Training.} We discriminatively pre-train the CNN on a large auxiliary dataset (ILSVRC2012 classification) using image-level annotations, followed by domain specific fine-tuning by replacing the last layer of AlexNet \cite{AlexNet12} model with $10+1$ softmax output layer. We use two-step initialization for fine-tuning as described in \cite{InitializationTip14}. As per PASCAL detection criteria, we treat all region proposals with $\geq$ 0.5 IoU overlap with a ground-truth box as positives for that class of the box and the rest as negatives. Once features are extracted and training labels are applied, we optimize one linear SVM per class. \textbf{Test time detection.} During test time, approximately $500$ to $2000$ VOPs are generated. Then forward propagation is performed through the CNN to compute features. Finally, we perform scoring using per-class trained SVM similar to \cite{girshick14CVPR} followed by non-maximum suppression. In the below section \ref{OVERLAP}, we describe the alternate test time detection using OVERLAP through objects' class label propagation. \section{OVERLAP: Objects in Video Enabler thRough LAbel Propagation } \label{OVERLAP} Classical approach for object localization has traditionally been image window classification, where each window is scored independent of other candidate windows. Recently, more success in object detection has been reported by considering spatial relations to all other windows and their appearance similarity \cite{VezhnevetsF15} with examplar-based associative embedding. Our approach towards video object detection considers spatial relationship and appearance similarity with windows within and even in other nearby video frames, yet in much simpler way through spatio-temporal clustering, to avoid classifying every candidate windows. \subsection{Joint analysis of windows} We aim to detect dissimilarity between VOPs generated within a sub-sequence based on a simple underlying principle: proposals corresponding to the same object exhibit higher statistical dependencies than proposals belonging to different objects. As a motivation for our approach, we consider generating proposal boxes by perturbing the ground truth locations of PASCAL VOC 2007 objects' bounding boxes and observe the statistical association of those proposal boxes. Let, $A$ and $B$ denote generic features of neighboring proposal windows, where neighborhood is characterized by non-zero Intersection-Over-Union (u). We investigate the joint distribution over pairings ${A,B}$. Let, $\mathit{p}(A,B;u)$ be the joint probability of features A and B of windows with spatial overlap value, $u$. Then, $P(A,B)$ could ideally be computed as : \vspace{-2.5mm} \begin{equation} \label{joint_density} P(A,B) = \frac{1}{Z} \int_{u}^{1} {w(u) \mathit{p}(A,B; u)du} \end{equation} where, $u \in [0,1]$, $w$ is a weighting function, $w(0) = 0$, and $Z$ is a normalization constant. To simplify the process, we use uniform weighting function and work in the discrete (quantized) space of $u$ and replace the integral with summation. We take the marginals of the distribution to get $P(A)$ and $P(B)$. Motivated by the analysis presented in crisp boundary detection work by Isola \textit{\etal} \cite{crisp_boundaries}, we model affinity with point-wise mutual information like function: \vspace{-2.5mm} \begin{equation} \label{PMI} PMI_{\rho}(A,B) = log \frac{P(A,B)^{\rho}} {P(A) P(B)} \end{equation} We choose the value of $\rho$ to be $1.2$ which produces best performance in PASCAL VOC dataset with perturbed ground truth proposals. In order to identify the boundary between two features, the model needs to be able to capture the low probability regions of P(A,B). We use a non-parametric kernel (Epanechnikov) density estimator. The number of sample points are the number of overlapping candidate windows. We perform affinity-based clustering afterwards. The affinity matrix, $\mathbf{W}$, for a sub-sequence is created from the affinity function, $PMI_{\rho}$, as follows: \vspace{-2.5mm} \begin{equation} \label{affinity_matrix} \mathbf{W}_{i,j} = e^{PMI_{\rho}(\mathbf{f}_i, \mathbf{f}_j)} \end{equation} Where $i$ and $j$ are the indices of proposal windows and $\mathbf{f}$ is the feature vector defined for a proposal window. Figure \ref{fig:affinity-based-clustering} shows an example of spatial clustering of proposal windows. The boxes drawn in same colors correspond to same clusters. In the streaming VOP clustering framework for Youtube videos, we perform the joint analysis on all proposals within a sub-sequence. Intuitively this is an easy yet effective clustering technique which works on actual test proposals (see Figure \ref{fig:Streaming-VOP}) which are not simply perturbed ground truth bounding boxes. This is to be noted that the proposed method measures the affinity between different object proposal windows (of varying sizes) within a video sub-sequence (in different video frames) unlike \cite{crisp_boundaries}, where the affinity is between the neighboring pixels in an image. \begin{figure} \begin{center} \includegraphics[scale=0.55]{affinity-based-clustering_old} \end{center} \caption{Synthetic experiment using perturbed ground truth locations in PASCAL VOC showing affinity-based clustering of candidate windows. The clustering technique works efficiently with some exceptions where overlapping object instances share very similar color. } \label{fig:affinity-based-clustering} \end{figure} \begin{figure} \begin{center} \includegraphics[scale = 0.47]{segmentation_mask1-1}\\ \vspace{-0.1cm} \includegraphics[scale = 0.42]{segmentation_mask1-2} \includegraphics[scale = 0.42]{segmentation_mask1-3}\\ \vspace{-0.1cm} \includegraphics[scale = 0.42]{segmentation_mask1-4} \includegraphics[scale = 0.42]{segmentation_mask1-5}\\ \vspace{0.1cm} \includegraphics[scale = 0.47]{segmentation_mask2} \end{center} \caption{Segmentation masks from clustered object proposals. } \label{fig:segmentation_mask1} \vspace{-2.5mm} \end{figure} We demonstrate object segmentation can be achieved as a by-product of this clustering algorithm. We cast the segmentation problem through random-field based background-foreground segmentation without manual labeling \cite{SegProp12}. Uniformly weighted sum of the location of every window corresponding to a unique cluster defines the foreground location prior for that cluster. However, unlike \cite{SegProp12}, in our approach, the location prior is not coming from the global neighbors of the image but from within itself and the clustering allows multiple objects segmentations. The segmentation works well if the proposal boxes tightly enclose an object as shown in figure \ref{fig:segmentation_mask1} with some failure cases as shown in figure \ref{fig:segmentation_mask3} where the proposal boxes do not tightly enclose the object in a cluttered background. Figure \ref{fig:youtube_segmentation} shows two segmentation masks generated on individual video frames from the clustered video object proposals generated by the proposed VOP on real videos. \begin{figure} \begin{center} \includegraphics[scale = 0.34]{segmentation_mask3} \end{center} \caption{Segmentation masks generation is not successful where proposal boxes do not tightly enclose the actual object. This happens in cluttered background cases. } \label{fig:segmentation_mask3} \vspace{-2.5mm} \end{figure} \begin{figure} \begin{center} \includegraphics[scale = 0.48]{youtube_segmentation} \end{center} \caption{Two segmentation masks from clustered video object proposals from Youtube video ``Bird and Cat'' for frames \#5, \#20 and \#45. } \label{fig:youtube_segmentation} \vspace{-2.5mm} \end{figure} \subsection{Streaming clustering of proposals} \label{Streaming_Clustering} One of the main contributions of this paper is a simple, principled, and unsupervised approach to spatio-temporal grouping of candidate regions in streaming fashion. We describe a clustering framework which enforces a Markovian assumption on the video stream to approximate a batch grouping of VOPs. A video is divided into a series of sub-sequences with one frame overlap with the previous sub-sequence as described in \cite{XuXiCoECCV2012}. VOP clustering within the current sub-sequence depends on the results from only the previous sub-sequence. We consider sub-sequence length of 3 to 5 frames as a trade-off between quality and complexity. This is the same sub-sequence volume, where mid-range motion analysis is performed for detecting temporal edges (Section \ref{spatio-temp-contours} ). The color-histogram features are used for estimating joint probability between any overlapping window-pair and affinity-based clustering is performed afterwards. There are two important aspects in this streaming clustering method. The first is generic to any clustering algorithm \ie how to select the number of clusters and the second is specific to streaming method \ie how to associate cluster number of the current sub-sequence with any of the clusters of previous and/or future sub-sequences? \vspace{-2.5mm} \subsubsection{Number of clusters.} Common consumer or Youtube videos contain limited number of moving objects, often less than five. Youtube-Objects dataset contains maximum of $3$ object instances and quite often a single moving object. We assume the presence of at most 5 objects to keep the computational complexity tractable and amenable to practical applications. We explore two modes of operations. The first method uses fixed number of clusters, $k=5$, with careful initialization of cluster centers using k-mean++ \cite{Kmeanspp07} during spectral clustering. The second one is the spectral clustering with self-tuning \cite{ZPClustering07}. We observe that while self-tuning outperforms the fixed cluster number case for hypothetically good object windows $($ such as the perturbed ground truth regions for PASCAL VOC $)$, both modes perform similarly in case of real object proposals generated by some proposal method. \vspace{-2.5mm} \subsubsection{Cluster Label Association.} In the streaming framework, any sub-sequence except for the first one, needs to address the problem of either associating a cluster with a cluster number in the previous sub-sequence or generating a new one. We perform density estimation using an Epanechnikov kernel using the KD-tree implementation from \cite{KDE} for every cluster using the 4-dimensional location (2D center, height and width) and 45-bin color histogram (15 for each color channel) of the regions of the proposals corresponding to each and every cluster. If the minimum KL-divergence between a distribution of the current cluster and a cluster from the previous sub-sequence is less than a threshold, we perform the cluster assignment. Otherwise, we create a new cluster. This is to be noted that, for detecting primarily moving objects in videos, the weight of temporal edges could be as high as 0.7 or more as described in section \ref{spatio-temp-contours}. In such cases, considering as low as only 100 VOPs can potentially detect moving objects. Clusters may contain fewer number of proposal windows than the dimension of the original feature space which is 49-dimensional. Thus we perform PCA-based dimensionality reduction before estimating the distribution. Also, we perform scaling of features to ease the process of selecting kernel evaluation points. \subsection{Object Label Propagation.} Time-consistent clustering enables object label propagation through the video. We perform CNN-based object detection \ie classification of every window in CNN feature space at a video frame only when we encounter a new cluster label. An assigned cluster label means the object category is the same as what was already detected in the associated cluster of the previous sub-sequence. We still need to perform the localization, however. In order to address the localization, we fit a 4-D Gaussian distribution on the location parameters \i.e. center (x,y), height(h) and width(w), of windows in a cluster. We simply keep track of the distance, $\mathbf{d}$, of the detected final object location (after first-time detection using R-CNN like approach) from the mean of the fitted Gaussian for every cluster. Furthermore, we localize the object by adding $\mathbf{d}$ with the mean of the 4-D Gaussian location distribution of the cluster in current sub-sequence. In general videos, new objects do not appear in every video frame. Thus, we do not need to detect objects at every video frame. Even when a new object appears, we need to detect/classify only for the proposals assigned to the new cluster. Thus, OVERLAP framework requires to process CNN features for only a small fraction of the number of proposals generated. In some sense, the spatio-temporal clustering for object detection is related to tracking. However, a set of windows is tracked instead of a single region/object. Ability to bypass critical tracker initialization step and the possibility for applying R-CNN like detection at a more frequent and configurable interval to increase the detection accuracy (if needed) are the major advantages. \section{Experimental Results} \subsection{Video Object Detector using VOP} We observe that the proposed VOP helps in learning a better object detector model. Table \ref{table:youtube-detection} shows the per-class detection accuracy and the mean Average Precision (mAP) for the 10-class Youtube-Objects Test set \cite{youtube-Objects}. \begin{figure*}[t] \begin{center} \includegraphics[scale=0.8]{Obj_detect} \end{center} \begin{center} \includegraphics[scale=0.3]{failure-det-1} \includegraphics[scale=0.3]{failure-det-2} \includegraphics[scale=0.3]{failure-det-3} \end{center} \caption{ Sample results of Video Object Detection with VOP. First 6 rows show successful detection cases and the last row shows false detection cases.} \label{fig:Obj-det-VOP} \vspace{-2.5mm} \end{figure*} \begin{table}[!h] \centering \begin{tabular}{ |p{1cm}|p{0.8cm}|p{0.8cm}|p{1.1cm}|p{1.1cm}|p {1.1cm}| } \hline classes & R-CNN & DPM & Fine-tune SS & Fine-tune EB & Fine-tune VOP \\ \hline plane & 14.1 & 28.42 & 25.57 & 26.52 & \textbf{29.77}\\ bird & 24.2 & \textbf{48.14} & 27.27 & 27.27 & 28.82\\ boat & 16.9 & 25.50 & 27.52 & 33.69 & \textbf{35.34}\\ car & 27.9 & \textbf{48.99} & 35.18 & 36 & 41\\ cat & 17.9 & 1.69 & 25.02 & 27.05 & \textbf{33.7}\\ cow & 28.6 & 19.24 & 43.01 & 44.76 & \textbf{57.56} \\ dog & 12.2 & 15.84 & 24.05 & 27.07 & \textbf{34.42}\\ horse & 29.4 & 35.10 & 41.84 & 44.82 & \textbf{54.52}\\ mbike & 21.3 & \textbf{31.61} & 26.70 & 27.07 & 29.77\\ train & 13.2 & \textbf{39.58} & 20.48 & 24.93 & 29.23\\ \hline mAP & 20.57 & 29.41 & 29.67 & 31.92 & \textbf{37.413}\\ \hline \end{tabular} \setlength{\abovecaptionskip}{15pt plus 3pt minus 2pt} \caption{Object Detection Results on Youtube-Objects test set. Pre-trained R-CNN detector\cite{girshick14CVPR} is downloaded from \cite{R-CNN-model}. Detection result using Deformable Parts Model \cite{DPM10} (DPM) are from \cite{KalogeitonFS15}. Fine-tune SS uses fine-tuning with Selective Search proposals (similar to R-CNN), Fine-tune EB uses fine-tuning with EdgeBoxes proposals and Fine-tune VOP uses fine-tuning with proposed video Object Proposals ($\lambda = 0.2$).} \label{table:youtube-detection} \vspace{-2.5mm} \end{table} Fine-tuning on Youtube-Objects training data with Selective Search proposals \cite{SelectiveSearch2013IJCV} improves the detection results by at least $9$\%\ compared with the model fine-tuned for the image dataset PASCAL VOC. The detector learned with EdgeBoxes performs better than the one learned with Selective Search proposals. However, the detector learned with VOP even outperforms the detection rate by another $5.5$\%\ and achieves state-of-the-art detection accuracy ($37.4$\%) on this dataset. Although detection accuracy for ``cat'', ``cow'', ``dog'' and ``horse'' have improved by a huge margin after using CNN features, categories like ``train'' and ``bird'' are still best detected using DPM \cite{DPM10} detector. \begin{table*}[t] \centering \begin{tabular}{|c| c|c| c|c| c|c|c|c| c|c|c|c|} \hline \multirow{3}{*}{} & \multirow{3}{*}{PF SS} & \multirow{3}{*}{PF EB} & \multicolumn{2}{|c|}{PF VOP} &\multicolumn{8}{|c|}{OVERLAP}\\ \cline{4-13} & & & \multirow{2}{*}{CPU OF} & \multirow{2}{*}{GPU OF} & \multicolumn{4}{|c|}{CPU Optical Flow} & \multicolumn{4}{|c|}{GPU Optical Flow}\\ \cline{6-13} & & & & &200 V &500 V&1K V &2K V &200 V &500 V &1K V &2K V\\ \hline Prop time & 10 & 0.3 & 3.8 & 1.3 & \multicolumn{4}{|c|}{3.8} & \multicolumn{4}{|c|}{1.3}\\ \hline Overall time & 30 & 20.3 & 23.8 & 21.3 & 6.2 & 9.3& 14.6 & 28.0 & 3.7 & 6.8 &12.1 & 26.5\\ \hline mAP & 29.62 & 31.95 & 37.72 & 37.72 & 28.59 & 33.59 & 35.82 & 36.63 & 28.59 & 33.59 & 35.82 & 36.63\\ \hline \end{tabular} \setlength{\abovecaptionskip}{15pt plus 3pt minus 2pt} \caption{Complexity and accuracy comparison. Proposal generation (Prop time), overall detection time for per-frame (PF) baseline methods with Selective Search (SS) proposals, EdgeBoxes (EB) proposals, proposed VOPs and OVERLAP are shown. Baseline methods use 2000 proposals per frame. GPU OF and CPU OF denote GPU-based and CPU-based optical flow (OF) respectively. mAP increases as number of VOPs increases. About 3$\times$ speedup achived with 500 VOPs with only 4\%\ drop in mAP compared to the baseline per-frame detection.} \label{table:OVERLAP-comparison} \end{table*} \subsection{Streaming Clustering of VOP } Figure \ref{fig:Streaming-VOP} shows the results of frame-level clustering vs streaming clustering at sub-sequence levels on arbitrary videos downloaded from Youtube. In these experiments, a sub-sequence contains 3 video frames, with an overlap of one frame from the previous sub-sequence. We use high $\lambda$ value ( $0.8$ ) to identify only the moving objects with very few number of proposals. For clear visualization, we use only 50 proposal windows at every frame and that makes less than 200 proposals per sub-sequence. We aim to take the advantage of fast approximate spectral clustering algorithm which scales linearly with the problem size. In our current implementation, clustering takes less than 0.1 second for 3 frames streaming-volume with 50 VOPs per frame. \begin{figure} \begin{center} \includegraphics[scale = 0.5]{streaming_VOP} \end{center} \caption{Temporal consistency in streaming clustering of VOPs on arbitrary videos ``Horse riding'', ``Bird-cat'' and ``Alaskan bear'' downloaded from the Internet. Windows drawn in same color belong to same cluster. Top rows of every pair show the proposals clustered on individual frame (frames \#2, \#10, \#25 in each case) level; bottom row shows the results of streaming clustering. } \label{fig:Streaming-VOP} \vspace{-2.5mm} \end{figure} \subsection{Video Object Detection} To investigate the relative detection rate with OVERLAP compared with frame-wise R-CNN like approach, we create a subset (955 frames) of Youtube-Objects test-set (1783 frames) where the video frames form a valid video play. We find that for OVERLAP, CNN feature extraction and classification is needed only for $10$\%\ to $30$\%\ windows among all of them. Table \ref{table:OVERLAP-comparison} corroborates the fact that the detection accuracy improves as we increase the number of VOPs from 200 to 2000 in OVERLAP at the cost of increased complexity needed for spectral clustering. Difference in mAP is between $1$-$9$\%\ . As an example, compared to per-frame detection, OVERLAP achieves about 3$\times$ speedup at the cost of only 4\% mean Average Precision (mAP) with 500 VOPs per frame. The non-GPU based spectral clustering implementation in MATLAB makes cases for more than 2000 VOPs even slower than per-frame RCNN. Per-frame proposal generation with Selective Search and EdgeBoxes takes about 10 seconds \cite{Hosang2015pami} and 0.3 seconds \cite{Hosang2015pami} respectively. Overall time for object detection per-frame in R-CNN per-frame becomes about 30 seconds and 20.3 seconds with the above corresponding methods. Generation of VOPs requires optical flow which takes 3.5 sec per frame in CPU-implementation and 1 sec per frame for GPU-implementation for about $500\times500$ resolution frame-pairs. ``CPU'' and ``GPU'' in Table \ref{table:OVERLAP-comparison} denote optical flow implementation in CPU \cite{TBroxOF11} and GPU \cite{GPU_Optical_flow_10} respectively. The Youtube-Objects dataset mostly contains moving objects. In addition, the test dataset does not contain significant number of test cases where multiple instances of objects with similar appearances are spatially overlapped. Thus, accuracy of OVERLAP successfully approaches the baseline per-frame detection accuracy as we increase the number of VOPs. For the fastest detection (with 200 VOPs only), we use more weight (lambda = 0.6) for temporal edge and still manage to get acceptable detection accuracy with over 5$\times$ speedup as shown in the column corresponding to 200 VOP in Table \ref{table:OVERLAP-comparison}. GPU-based spectral clustering can potentially lead to further speed up. \vspace{-2.5mm} \section{Conclusion} Experimental results show that VOP helps in learning a better moving or static video object detector model and achieves state-of-the-art detection accuracy on Youtube-Video dataset. We show that the proposed OVERLAP framework can detect temporally consistent objects in videos through object class label propagation using streaming clustering of VOPs with significant speedup compared with naive per-frame detection with acceptable loss of accuracy. We also show that multiple objects segmentation can also be achieved as a by-product of OVERLAP. {\small \bibliographystyle{ieee}
1,116,691,501,382
arxiv
\section{Introduction} In the 1970 paper \cite{Davis:Kahan} Davis and Kahan studied the rotation of spectral subspaces for $2\times 2$ operator matrices under off-diagonal perturbations. In particular, they proved the following result, the celebrated ``Tan $2\Theta$ theorem'': Let $A_\pm$ be strictly positive bounded operators in Hilbert spaces $\fH_\pm$, respectively, and $W$ a bounded operator from $\fH_-$ to $\fH_+$. Denote by \begin{equation*} A=\begin{pmatrix} A_+& 0 \\ 0 & -A_- \end{pmatrix}\quad \text{and} \quad B= A+V=\begin{pmatrix} A_+ & W \\ W^\ast & -A_-\end{pmatrix} \end{equation*} the block operator matrices with respect to the orthogonal decomposition of the Hilbert space $\fH=\fH_+\oplus \fH_-$. Then \begin{equation}\label{tan2} \|\tan 2\Theta\| \leq \frac{2\|V\|}{d},\qquad \spec(\Theta)\subset [0,\pi/4), \end{equation} where $\Theta$ is the operator angle between the subspaces $\Ran \EE_{A}(\R_+)$ and $\Ran \EE_{B}(\R_+)$ and \begin{equation*} d=\dist (\spec (A_+) , \spec(-A_-)) \end{equation*}(see, e.g., \cite{Kostrykin:Makarov:Motovilov:2}). Estimate \eqref{tan2} can equivalently be expressed as the following inequality for the norm of the difference of the orthogonal projections $P=\EE_{A}(\R_+)$ and $Q=\EE_{B}(\R_+)$: \begin{equation}\label{tan:2:Theta} \|P-Q\|\leq \sin \bigg ( \frac{1}{2}\arctan \frac{2\|V\|}{d} \bigg), \end{equation} which, in particular, implies the estimate \begin{equation}\label{a1} \|P-Q\|<\frac{\sqrt{2}}{2}. \end{equation} Independently of the work of Davis and Kahan, inequality \eqref{a1} has been proven by Adamyan and Langer in \cite{Adamyan:Langer:95}, where the operators $A_\pm$ were allowed to be semibounded. The case $d=0$ has been considered in the work \cite{KMM:2} by Kostrykin, Makarov, and Motovilov. In particular, it was proven that there is a unique orthogonal projection $Q$ from the operator interval $[\EE_B\left ((0, \infty)\right), \EE_B\left ( [0, \infty )\right)]$ such that $$ \|P-Q\|\le\frac{\sqrt{2}}{2}, $$ where $P\in\left [\EE_A\left ((0, \infty)\right), \EE_A\left ( [0, \infty )\right)\right ]$ is the orthogonal projection onto the invariant (not necessary spectral) subspace $\cH_+\subset \cH$ of the operator $A$. A particular case of this result has been obtained earlier by Adamyan, Langer, and Tretter, in \cite{Adamyan:Langer:Tretter:2000a}. Recently, a version of the Tan $2\Theta$ Theorem for off-diagonal perturbations $V$ that are relatively boun\-ded with respect to the diagonal operator $A$ has been proven by Motovilov and Selin in \cite{MS:1}. In the present work we obtain several generalizations of the aforementioned results assuming that the perturbation is given by an off-diagonal symmetric form. Given a sesquilinear symmetric form $\fa$ and a self-adjoint involution $J$ such that the form $\fa_J[x,y]:=\fa[x,Jy]$ is a positive definite and $$\fa[x,Jy]=\fa[Jx,y],$$ we call a symmetric sesquilinear form $\fv$ off-diagonal with respect to the orthogonal decomposition $\fH=\fH_+ \oplus \fH_-$ with $\fH_\pm =\Ran (I\pm J)$ if $$\fv[Jx,y]=-\fv[x,Jy].$$ Based on a close relationship between the symmetric form $\fa[x,y]+\fv[x,y]$ and the sectorial sesquilinear form $\fa[x,Jy]+\ii \fv[x,Jy]$ (cf.\ \cite{MS:1}, \cite{Veselic}), under the assumption that the off-diagonal form $\fv$ is relatively bounded with respect to the form $\fa_J$, we prove \begin{itemize} \item[(i)] an analog of the First Representation Theorem for block operator matrices defined as not necessarily semibounded quadratic forms, \item[(ii)] a relative version of the Tan $2\Theta$ Theorem. \end{itemize} We also provide several versions of the relative Tan $2\Theta$ Theorem in the case where the form $\fa$ is semibounded. \subsection*{Acknowledgments} \quad The authors thank S.~Schmitz for useful discussions and comments. K.A.M. is indebted to the Institute for Mathematics for its kind hospitality during his two months stay at the Johannes Gutenberg-Universit\"{a}t Mainz in the Summer of 2009. The work of K.A.M.\ has been supported in part by the Deutsche Forschungsgemeinschaft and by the Inneruniversit\"{a}ren Forschungsf\"{o}rderung of the Johannes Gutenberg-Universit\"{a}t Mainz. L.G.\ has been supported by the exchange program between the University of Zagreb and the Johannes Gutenberg-Universit\"{a}t Mainz and in part by the grant number 037-0372783-2750 of the MZO\v{S}, Croatia. K.V.~has been supported in part by the National Foundation of Science, Higher Education and Technical Development of the Republic of Croatia 2007-2009. \section{The First Representation Theorem for off-diagonal form perturbations}\label{sec:off} To introduce the notation, it is convenient to assume the following hypothesis. \begin{hypothesis}\label{hh1} Let $\fa$ be a symmetric sesquilinear form on $\Dom[\fa]$ in a Hilbert space $\fH$. Assume that $J$ is a self-adjoint involution such that $$ J\Dom[\fa]=\Dom[\fa]. $$ Suppose that $$ \fa[Jx,y]=\fa[x,Jy] \quad\text{for all } \quad x,y\in \Dom [\fa_J]=\Dom[\fa], $$ and that the form $\fa_J$ given by $$ \fa_J[x,y]=\fa[x,Jy], \quad x,y\in \Dom [\fa_J]=\Dom[\fa]. $$ is a positive definite closed form. Denote by $m_\pm$ the greatest lower bound of the form $\fa_J$ restricted to the subspace $$ \fH_\pm =\Ran (I\pm J). $$ \end{hypothesis} \begin{definition} Under Hypothesis \ref{hh1}, a symmetric sesquilinear form $\fv$ on $\Dom[\fv]\supset\Dom[\fa]$ is said to be off-diagonal with respect to the orthogonal decomposition \begin{equation*} \fH=\fH_+\oplus\fH_- \end{equation*} if \begin{equation*} \fv[Jx,y]=-\fv[x,Jy], \quad x,y\in \Dom[\fa]. \end{equation*} If, in addition, \begin{equation}\label{xyz} v_0:=\sup_{0\ne x\in \Dom[\fa]}\frac{|\fv[x]|}{\fa_J[x]}<\infty, \end{equation} the form $\fv$ is said to be an $\fa$-bounded off-diagonal form. \end{definition} \begin{remark}\label{rem:4} If $\fv$ is an off-diagonal symmetric form and $x=x_+ +x_-$ is a unique decomposition of an element $x\in \Dom[\fa]$ such that $x_\pm\in \fH_\pm\cap \Dom[\fa]$, then \begin{equation}\label{tog} \fv[x]=2\Re \fv[x_+,x_-],\quad x\in \Dom[\fa]. \end{equation} Moreover, if $v_0<\infty$, then \begin{equation}\label{vad} |\fv[x]|\leq 2v_0\sqrt{\fa_J[x_+] \fa_J[x_-]}. \end{equation} \begin{proof} To prove \eqref{tog}, we use the representation \begin{equation*} \fv[x]=\fv[x_+ +x_-, x_+ +x_-]=\fv[x_+]+\fv[x_-]+\fv[x_+, x_-]+\fv[x_-, x_+], \quad x\in \Dom[\fa]. \end{equation*} Since $\fv$ is an off-diagonal form, one obtains that \begin{equation*} \fv[x_+]=\fv[x_+, x_+]=\fv[Jx_+, Jx_+]=-\fv[x_+, x_+]=-\fv[x_+]=0, \end{equation*} and similarly $\fv[x_-]=0$. Therefore, \begin{equation*} \fv[x]=\fv[x_+,x_-]+\fv[x_-, x_+]=2\Re \fv[x_+,x_-],\quad x\in \Dom[\fa]. \end{equation*} To prove \eqref{vad}, first one observes that $$ \fa_J[x]=\fa_J[x_+]+\fa_J[x_-] $$ and, hence, combining \eqref{tog} and \eqref{xyz}, one gets the estimate \begin{equation*} |2\Re \fv[x_+,x_-]|\le v_0 \fa_J[x] = v_0 (\fa_J[x_+]+\fa_J[x_-]) \quad \text{for all}\quad x_\pm\in \fH_\pm\cap \Dom[\fa]. \end{equation*} Hence, for any $t\ge0$ (and, therefore, for all $t\in \R$) one gets that \begin{equation*} v_0 \fa_J[x_+]\,t^2-2|\Re \fv[x_+,x_-]|\,t+ v_0 \fa_J[x_-]\ge 0, \end{equation*} which together with \eqref{tog} implies the inequality \eqref{vad}. \end{proof} \end{remark} In this setting we present an analog of the First Representation Theorem in the off-diagonal perturbation theory. \begin{theorem}\label{repr} Assume Hypothesis \ref{hh1}. Suppose that $\fv$ is an $\fa$-bounded off-diagonal with respect to the orthogonal decomposition $\fH=\fH_+\oplus \fH_-$ symmetric form. On $\Dom[\fb]=\Dom[\fa]$ introduce the symmetric form \begin{equation*} \fb[x,y]=\fa[x,y]+\fv[x,y], \quad x,y\in \Dom[\fb]. \end{equation*} Then \begin{itemize} \item[(i)] there is a unique self-adjoint operator $B$ in $\fH$ such that $\Dom(B)\subset\Dom[\fb]$ and \begin{equation*} \fb[x,y]= \langle x, By\rangle\quad\text{for all}\quad x\in\Dom[\fb],\quad y\in \Dom ( B). \end{equation*} \item[(ii)] the operator $B$ is boundedly invertible and the open interval $(-m_-, m_+)\ni 0$ belongs to its resolvent set. \end{itemize} \end{theorem} \begin{proof} (i). Given $\mu \in (-m_-, m_+)$, on $\Dom[\fa_\mu]=\Dom[\fa]$ introduce the positive closed form $\fa_\mu$ by \begin{equation*} \fa_\mu[x,y]=\fa[x,Jy]-\mu \langle x, Jy\rangle, \quad x,y\in \Dom[\fa_\mu], \end{equation*} and denote by $\fH_{\fa_\mu}$ the Hilbert space $\Dom[\fa_\mu]$ equipped with the inner product $\langle \cdot, \cdot \rangle_\mu=\fa_\mu[\cdot, \cdot]$. We remark that the norms $\|\cdot\|_\mu=\sqrt{\fa_\mu[\cdot]}$ on $\fH_{\fa_\mu}=\Dom[\fa_\mu]$ are obviously equivalent. Since $\fv$ is $\fa$-bounded, one concludes then that \begin{equation*} v_\mu:=\sup_{0\ne x\in \Dom[\fa]}\frac{|\fv[x]|}{\fa_\mu[x]}<\infty,\quad \text{ for all } \mu\in(-m_-,m_+). \end{equation*} Along with the off-diagonal form $\fv$, introduce a dual form $\fv'$ by \begin{equation*} \fv'[x,y]=\ii\fv[x,Jy], \quad x,y\in \Dom[\fa]. \end{equation*} We claim that $\fv'$ is an $\fa$-bounded off-diagonal symmetric form. It suffices to show that \begin{equation*} v_\mu=v_\mu'<\infty, \quad \mu \in (-m_-, m_+), \end{equation*} where \begin{equation}\label{vaumu:bis} v_\mu':=\sup_{0\ne x\in \Dom[\fa]}\frac{|\fv'[x]|}{\fa_\mu[x]}. \end{equation} Indeed, let $x=x_+ +x_-$ be a unique decomposition of an element $x\in \Dom[\fa]$ such that $x_\pm\in \fH_\pm\cap \Dom[\fa]$. By Remark \ref{rem:4}, \begin{equation*} \fv[x]=\fv[x_+,x_-]+\fv[x_-, x_+]=2\Re \fv[x_+,x_-],\quad x\in \Dom[\fa]. \end{equation*} In a similar way (since the form $\fv'$ is obviously off-diagonal) one gets that \begin{align*} \fv'[x]&=\ii\fv[x_++x_-, J(x_++x_-)]=\ii\fv'[x_+]-\ii\fv'[x_-]-\ii\fv[x_+, x_-]+\ii\fv[x_-, x_+] \\&=-\ii\fv[x_+,x_-]+\ii\overline{\fv[x_+, x_-]}=2\Im \fv[x_+,x_-], \quad x\in \Dom[\fa]. \end{align*} Clearly, from \eqref{vaumu:bis} it follows that \begin{equation*} v_\mu'= 2\sup_{0\ne x\in \Dom[\fa]}\frac{ |\Im \fv[x_+,x_-]|}{\fa_\mu[x]} =2\sup_{0\ne x\in \Dom[\fa]}\frac{ |\Re \fv[x_+,x_-]|}{\fa_\mu[x]}=v_\mu, \end{equation*} $$\mu \in (-m_-,m_+),$$ which completes the proof of the claim. Next, on $\Dom[\ft_\mu]=\Dom[\fa]$ introduce the sesquilinear form $$\ft_\mu := \fa_\mu+\ii\fv', \quad \mu \in (-m_-,m_+).$$ Since the form $\fa_\mu$ is positive definite and the form $\fv'$ is an $\fa_\mu$-bounded symmetric form, the form $\ft$ is a closed sectorial form with the vertex $0$ and semi-angle \begin{equation}\label{tmu} \theta_\mu=\arctan (v_\mu')=\arctan (v_\mu). \end{equation} Let $T_\mu$ be a unique $m$-sectorial operator associated with the form $\ft_\mu$. Introduce the operator $$B_\mu=JT_\mu\quad \text{on}\quad \Dom(B_\mu)=\Dom(T_\mu), \quad \mu\in(-m_-, m_+).$$ One obtains that \begin{equation}\label{bbb} \begin{split} \langle x, B_\mu y\rangle &=\langle x, JT_\mu\rangle=\langle Jx, T_\mu y\rangle =\fa_\mu[Jx, y]+\ii\fv'[Jx,y] \\ &=\fa[x,y]-\mu\langle Jx,Jy\rangle+\ii^2\fv[Jx,Jy] \\ &=\fa[x,y]-\mu\langle x,y\rangle+\fv[x,y], \end{split} \end{equation} for all $x\in \Dom[\fa]$, $y\in \Dom(B_\mu)=\Dom(T_\mu)$. In particular, $B_\mu$ is a symmetric operator on $\Dom(B_\mu)$, since the forms $\fa$ and $\fv$ are symmetric, and $\Dom(B_\mu)=\Dom(T_\mu)\subset \Dom[a]$. For the real part of the form $\ft_\mu$ is positive definite with a positive lower bound, the operator $T_\mu$ has a bounded inverse. This implies that the operator $B_\mu=JT_\mu$ has a bounded inverse and, therefore, the symmetric operator $B_\mu$ is self-adjoint on $\Dom(B_\mu)$. As an immediate consequence, one concludes (put $\mu=0$) that the self-adjoint operator $B:=B_0$ is associated with the symmetric form $\fb$ and that $\Dom(B)\subset \Dom[\fa]$. To prove uniqueness, assume that $B'$ is a self-adjoint operator associated with the form $\fb$. Then for all $x\in \Dom(B)$ and all $y\in \Dom(B')$ one gets that \begin{equation*} \langle x, B'y\rangle=\fb[x,y]=\overline{\fb[y,x]}=\overline{\langle y, Bx\rangle}=\langle Bx, y\rangle, \end{equation*} which means that $B=(B')^*=B'$. (ii). From \eqref{bbb} one concludes that the self-adjoint operator $B_\mu + \mu I$ is associated with the form $\fb$ and, hence, by the uniqueness \begin{equation*} B_\mu=B-\mu I\quad \text{ on }\quad\Dom(B_\mu)=\Dom(B). \end{equation*} Since $B_\mu$ has a bounded inverse for all $\mu\in (m_-, m_+)$, so does $B-\mu I$ which means that the interval $(-m_-, m_+)$ belongs to the resolvent set of the operator $B_0$. \end{proof} \begin{remark}\label{reg} In the particular case $\fv=0$, from Theorem \ref{repr} it follows that there exists a unique self-adjoint operator $A$ associated with the form $\fa$. For a different, more constructive proof of Theorem \ref{repr} as well as for the history of the subject we refer to our work \cite{GKMV}. \end{remark} \begin{remark} For the part (i) of Theorem \ref{repr} to hold it is not necessary to require that the form $\fa_J$ in Hypothesis \ref{hh1} is positive definite. It is sufficient to assume that $\fa_J$ is a semi-bounded from below closed form (see, e.g., \cite{Nenciu}). \end{remark} \section{The Tan $2 \Theta$ Theorem}\label{sec:tan} The main result of this work provides a sharp upper bound for the angle between the positive spectral subspaces $\Ran \EE_A(\R_+) $ and $\Ran \EE_B(\R_+)$ of the operators $A$ and $B$ respectively. \begin{theorem}\label{thm:esti} Assume Hypothesis \ref{hh1} and suppose that $\fv$ is off-diagonal with respect to the decomposition $\fH=\fH_+\oplus \fH_-$. Let $A$ be a unique self-adjoint operator associated with the form $\fa$ and $B$ the self-adjoint operator associated with the form $\fb=\fa+\fv$ referred to in Theorem \ref{repr}. Then the norm of the difference of the spectral projections $P=\EE_{A}(\R_+)$ and $Q=\EE_B(\R_+)$ satisfies the estimate \begin{equation*} \|P-Q\| \leq \sin\left(\frac{1}{2}\arctan v \right)<\frac{\sqrt{2}}{2}, \end{equation*} where \begin{equation*} v=\inf_{\mu\in(-m_-,m_+)}v_\mu=\inf_{\mu\in(-m_-,m_+)}\sup_{0\ne x\in \Dom[\fa]}\frac{|\fv[x]|}{\fa_{\mu}[x]}, \end{equation*} with \begin{equation*} \fa_\mu[x,y]=\fa[x,Jy]-\mu \langle x, Jy\rangle, \quad x,y\in \Dom[\fa_\mu] = \Dom[\fa]. \end{equation*} \end{theorem} The proof of Theorem \ref{thm:esti} uses the following result borrowed from \cite{Woronowicz}. \begin{proposition}\label{lem:4:2} Let $T$ be an m-sectorial operator of semi-angle $\theta < \pi/2$. Let $T=U|T|$ be its polar decomposition. If $U$ is unitary, then the unitary operator $U$ is sectorial with semi-angle $\theta$. \end{proposition} \begin{remark} We note that for a bounded sectorial operator $T$ with a bounded inverse the statement is quite simple. Due to the equality \begin{equation*} \langle x, T x\rangle = \langle |T|^{-1/2}y, U|T|^{1/2}y\rangle = \langle y, |T|^{-1/2} U|T|^{1/2}y\rangle,\qquad y=|T|^{1/2}x, \end{equation*} the operators $T$ and $|T|^{-1/2} U|T|^{1/2}$ are sectorial with the semi-angle $\theta$. The resolvent sets of the operators $|T|^{-1/2} U|T|^{1/2}$ and $U$ coincide. Therefore, since $U$ is unitary, it follows that $U$ is sectorial with semi-angle $\theta$. \end{remark} \begin{proof}[Proof of Theorem \ref{thm:esti}] Given $\mu\in(-m_-,m_+)$, let $T_{\mu}=U_{\mu}|T_{\mu}|$ be the polar decomposition of the sectorial operator $T_{\mu}$ with vertex $0$ and semi-angle $\theta_\mu$, with \begin{equation}\label{ugl} \theta_\mu=\arctan (v_\mu) \end{equation} (as in the proof of Theorem \ref{repr} (cf.~\eqref{tmu}). Since $B_{\mu}=JT_{\mu}$, one concludes that \begin{equation*} |T_{\mu}|=|B_{\mu}| \quad \text{and}\quad U_{\mu}=J^{-1}\sign(B_{\mu}). \end{equation*} Since $T_{\mu}$ is a sectorial operator with sem-angle $\theta_\mu$, by a result in \cite{Woronowicz} (see Proposition \ref{lem:4:2}), the unitary operator $U_{\mu}$ is sectorial with vertex $0$ and semi-angle $\theta_\mu$ as well. Therefore, applying the spectral theorem for the unitary operator $U_\mu$ from \eqref{ugl} one obtains the estimate \begin{equation*} \|J-\sign( B_{\mu})\|=\|I-J^{-1}\sign(B_{\mu})\|=\|I-U_{\mu}\|\le 2 \sin \left(\frac{1}{2}\arctan v_\mu\right). \end{equation*} Since the open interval $(-m_-,m_+)$ belongs to the resolvent set of the operator $B=B_0$, the involution $\sign( B_\mu)$ does not depend on $\mu\in (-m_-,m_+)$ and hence one concludes that \begin{equation*} \sign( B_{\mu})=\sign( B_0)=\sign(B),\quad \mu\in (-m_-,m_+). \end{equation*} Therefore, \begin{equation}\label{muo} \|P-Q\|=\frac12 \|J-\sign( B)\|=\frac12 \|J-\sign( B_\mu)\| \le \sin \left (\frac{1}{2}\arctan v_\mu\right) \end{equation} and, hence, since $\mu\in(-m_-,m_+)$ has been chosen arbitrarily, from \eqref{muo} it follows that \begin{equation*} \|P-Q\|\le \inf_{\mu\in(-m_-,m_+)} \sin \left (\frac{1}{2}\arctan v_\mu\right )\le \sin \left (\frac{1}{2}\arctan v \right ). \end{equation*} The proof is complete. \end{proof} As a consequence, we have the following result that can be considered a geometric variant of the Birman-Schwinger principle for the off-diagonal form-perturbations. \begin{corollary}\label{cor:pi8} Assume Hypothesis \ref{hh1} and suppose that $\fv$ is off-diagonal. Then the form $\fa_J+\fv$ is positive definite if and only if the $a_J$-relative bound \eqref{xyz} of $\fv$ does not exceed one. In this case \begin{equation*} \|P-Q\| \le \sin \left (\frac{\pi}{8} \right), \end{equation*} where $P$ and $Q$ are the spectral projections referred to in Theorem \ref{thm:esti}. \end{corollary} \begin{proof} Since $\fv$ is an $\fa$-bounded form, one concludes that there exists a self-adjoint bounded operator $\cV$ in the Hilbert space $\Dom[\fa]$ such that \begin{equation*} v[x,y]=\fa_J[x, \cV y],\quad x,y\in \Dom[\fa]. \end{equation*} Since $\fv$ is off-diagonal, the numerical range of $\cV$ coincides with the symmetric about the origin interval $[-\|\cV\|, \|\cV\|]$. Therefore, one can find a sequence $\{x_n\}_{n=1}^\infty$ in $\Dom[\fa]$ such that \begin{equation*} \lim_{n\to \infty} \frac{\fv[x_n]}{\fa_J[x_n]}=-\|\cV\|, \end{equation*} which proves that $\|\cV\|\le 1$ if and only if the form $\fa_J+\fv$ is positive definite. If it is the case, applying Theorem \ref{thm:esti}, one obtains the inequality \begin{equation*} \|P-Q\| \leq \sin \left(\frac{1}{2}\arctan \left(\|\cV\|\right )\right )\le \sin \left (\frac{\pi}{8}\right) \end{equation*} which completes the proof. \end{proof} \begin{remark} We remark that in accordance with the Birman-Schwinger principle, for the form $\fa_J+\fv$ to have negative spectrum it is necessary that the $a_J$-relative bound $\|\cV\|$ of the perturbation $\fv$ is greater than one. As Corollary \ref{cor:pi8} shows, in the off-diagonal perturbation theory this condition is also sufficient. \end{remark} \section{Two sharp estimates in the semibounded case}\label{sec:semi} In this section we will be dealing with the case of off-diagonal form-perturbations of a semi-bounded operator. \begin{hypothesis}\label{ppp} Assume that $A$ is a self-adjoint semi-bounded from below operator. Suppose that $A$ has a bounded inverse. Assume, in addition, that the following conditions hold: \begin{itemize} \item[(i)] \emph{The spectral condition.} An open finite interval $(\alpha, \beta)$ belongs to the resolvent set of the operator $A$. We set \begin{equation*} \Sigma_-=\spec(A)\cap (-\infty, \alpha]\quad \text{and} \quad \Sigma_+=\spec(A)\cap [\beta, \infty]. \end{equation*} \item[(ii)] \emph{Boundedness.} The sesquilinear form $\fv$ is symmetric on $\Dom[\fv]\supset \Dom(|A|^{1/2})$ and \begin{equation}\label{nach} v:= \sup_{0\ne x\in \Dom[\fa \,]}\frac{|\fv[x]|}{\||A|^{1/2}x\|^2}<\infty. \end{equation} \item[(iii)] \emph{Off-diagonality.} The sesquilinear form $\fv$ is off-diagonal with respect to the orthogonal decomposition $\fH=\fH_+\oplus \fH_-$, with \begin{equation*} \fH_+=\Ran \EE_A((\beta, \infty))\quad \text{ and } \quad \fH_-=\Ran \EE_A((-\infty, \alpha )). \end{equation*} That is, \begin{equation*} \fv[Jx,y]=-\fv[x,Jy], \quad x,y\in \Dom[\fa], \end{equation*} where the self-adjoint involution $J$ is given by \begin{equation}\label{J} J=\EE_A\left ((\beta, \infty)\right )-\EE_A\left ((-\infty, \alpha)\right ). \end{equation} \end{itemize} \end{hypothesis} Let $\fa$ be the closed form represented by the operator $A$. A direct application of Theorem \ref{repr} shows that under Hypothesis \ref{ppp} there is a unique self-adjoint boundedly invertible operator $B$ associated with the form $$ \fb=\fa+\fv. $$ Under Hypothesis \ref{ppp} we distinguish two cases (see Fig.~\ref{fig:2} and \ref{fig:3}). \begin{itemize} \item[{\bf Case I}.] Assume that $\alpha <0$ and $\beta >0$. Set \begin{equation*} d_+=\text{dist}(\inf (\Sigma_+), 0) \quad \text{and}\quad d_-=\text{dist}(\inf (\Sigma_-), 0) \end{equation*} and suppose that $d_+>d_-$. \item[{\bf Case II}.] Assume that $\alpha,\beta >0$. Set \begin{equation*} d_+=\dist(\inf (\Sigma_+), 0) \quad \text{and}\quad d_-= \dist(\sup (\Sigma_-), 0). \end{equation*} \end{itemize} As it follows from the definition of the quantities $d_\pm$, the sum $d_-+d_+$ coincides with the distance between the lower edges of the spectral components $\Sigma_+$ and $\Sigma_-$ in Case I, while in Case II the difference $d_+-d_-$ is the distance from the lower edge of $\Sigma_+$ to the upper edge of the spectral component $\Sigma_-$. Therefore, $d_+-d_-$ coincides with the length of the spectral gap $(\alpha, \beta)$ of the operator $A$ in latter case. \begin{figure}[htb0] \begin{pspicture}(12,2) \psline(0,1)(12,1) \psline[linewidth=3pt](8,1)(12,1) \psline[linewidth=3pt](3,1)(4,1) \psline[linestyle=dashed](3,1)(3,2) \psline[linestyle=dashed](5,1)(5,2) \psline[linestyle=dashed](8,1)(8,2) \psline[linewidth=.5pt]{<->}(3,1.5)(5,1.5) \psline[linewidth=.5pt]{<->}(5,1.5)(8,1.5) \rput(3.5,0.5){$\Sigma_-$} \rput(9,0.5){$\Sigma_+$} \rput(5,.5){$0$} \rput(4,1.8){$d_-$} \rput(6.5,1.8){$d_+$} \rput(4.1,.7){$\alpha$} \rput(7.8,.7){$\beta$} \psdot*[dotscale=1](5,1) \end{pspicture} \caption{ \label{fig:2}\small The spectrum of the unperturbed sign-indefinite semibounded invertible operator $A$ in Case I. } \end{figure} \begin{figure}[htb0] \begin{pspicture}(12,3) \psline(0,1)(12,1) \psline[linewidth=3pt](8,1)(12,1) \psline[linewidth=3pt](6,1)(7,1) \psline[linestyle=dashed](7,1)(7,1.5) \psline[linestyle=dashed](5,1)(5,2.5) \psline[linestyle=dashed](8,1)(8,2.5) \psline[linewidth=.5pt]{<->}(5,1.5)(7,1.5) \psline[linewidth=.5pt]{<->}(5,2.2)(8,2.2) \rput(6.5,0.5){$\Sigma_-$} \rput(9,0.5){$\Sigma_+$} \rput(5,.5){$0$} \rput(6,1.8){$d_-$} \rput(6.5,2.5){$d_+$} \rput(7.1,.7){$\alpha$} \rput(7.9,.7){$\beta$} \psdot*[dotscale=1](5,1) \end{pspicture}\caption{ \label{fig:3}\small The spectrum of the unperturbed strictly positive operator $A$ with a gap in its spectrum in Case II.} \end{figure} We remark that the condition $d_+>d_-$ required in Case I, holds only if the length of the convex hull of negative spectrum $\Sigma_-$ of $A$ does not exceed the one of the spectral gap $ (\alpha, \beta)=\left (\sup(\Sigma_-), \inf (\Sigma_+)\right )$. Now we are prepared to state a relative version of the Tan $2 \Theta$ Theorem in the case where the unperturbed operator is semi-bounded or even positive. \begin{theorem}\label{thm:esti:prime} In either Cases I or II, introduce the spectral projections \begin{equation}\label{spp} P=\EE_A((-\infty,\alpha])\quad \text{and }\quad Q=\EE_B((-\infty, \alpha])\end{equation} of the operators $A$ and $B$ respectively. Then the norm of the difference of $P$ and $Q$ satisfies the estimate \begin{equation}\label{Davis} \|P-Q\|\le \sin \left(\frac{1}{2}\arctan \left [2 \frac{v}{\delta}\right]\right)<\frac{\sqrt{2}}{2}, \end{equation} where \begin{equation}\label{delta} \delta=\frac{1}{\sqrt{d_+d_-}}\begin{cases} d_++d_-& \text{ in Case I},\\ d_+-d_-& \text{ in Case II}, \end{cases} \end{equation} and $v$ stands for the relative bound of the off-diagonal form $\fv$ (with respect to $\fa$) given by \eqref{nach}. \end{theorem} \begin{proof} We start with the remark that the form $\fa-\mu$, where $\fa$ is the form of $A$, satisfies Hypothesis \ref{hh1} with $J$ given by \eqref{J}. Set $$ \fa_\mu=(\fa-\mu)_J, \quad\mu \in (\alpha, \beta), $$ that is, $$ \fa_\mu[x,y]=\fa[x,Jy]-\mu[x,Jy], \quad x,y\in \Dom[\fa]. $$ Notice that $\fa_\mu$ is a strictly positive closed form represented by the operators $JA-J\mu=|A|-\mu J$ and $ JA-\mu J=|A-\mu I|$ in Cases I and II, respectively. Since $\fv$ is off-diagonal, from Theorem \ref{thm:esti} it follows that \begin{equation}\label{first} \|\EE_{A-\mu I}(\R_+)-\EE_{B-\mu I}(\R_+)\| \leq \sin\left(\frac{1}{2}\arctan v_\mu\right)\quad\text{ for all }\quad \mu\in( \alpha, \beta), \end{equation} with \begin{equation} v_\mu=: \sup_{0\ne x\in \Dom[\fa \,]}\frac{|\fv[x]|}{\fa_\mu[x]}. \end{equation} Since $\fv$ is off-diagonal, by Remark \ref{rem:4} one gets the estimate $$ |\fv[x]|\le 2 v_0 \sqrt{\fa_0[x_+]\fa_0[x_-]}, \quad x\in \Dom [\fa], $$ where $x=x_++x_-$ is a unique decomposition of the element $x\in \Dom [\fa]$ with $$x_\pm\in \fH_\pm\cap \Dom [\fa].$$ Thus, in these notations, taking into account that $$ v_0=v, $$ where $v$ is given by \eqref{nach}, one gets the bound \begin{equation}\label{osnb} v_\mu\le 2 v\sup_{0\ne x\in \Dom[\fa \,]}\frac{ \sqrt{\fa_0[x_+]\fa_0[x_-]} }{\fa_\mu[x]}. \end{equation} Since $\fa_\mu$ is represented by $JA-J\mu=|A|-\mu J$ and $ JA-\mu J=|A-\mu I|$ in Cases I and II, respectively, one observes that \begin{equation}\label{nado} \fa_\mu[x]= \begin{cases} \fa_0[x_+]-\mu\|x_+\|^2+\fa_0[x_-]+\mu\|x_-\|^2, & \text{ in Case I,}\\ \fa_0[x_+]-\mu\|x_+\|^2-\fa_0[x_-]+\mu\|x_-\|^2, & \text{ in Case II.} \end{cases} \end{equation} Introducing the elements $y_\pm\in \fH_\pm$, $$y_\pm:= \begin{cases}(|A|\mp \mu I)^{1/2} x_\pm, & \text{ in Case I},\\ \pm(A-\mu I)^{1/2} x_\pm, & \text{ in Case II}, \end{cases} $$ and taking into account \eqref{nado}, one obtains the representation $$ \frac{ \sqrt{\fa_0[x_+]\fa_0[x_-]} }{\fa_\mu[x]}= \frac{\||A|^{1/2}(|A|- \mu I)^{-1/2}y_+\|\, \||A|^{1/2}(-A+ \mu I)^{-1/2}y_-\|} {\|y_+\|^2+\|y_-\|^2}, $$ valid in both Cases I and II. Using the elementary inequality $$ \|y_+\|\, \|y_-\|\le \frac12\left (\|y_+\|^2+\|y_-\|^2\right ), $$ one arrives at the following bound \begin{equation}\label{eins} \frac{ \sqrt{\fa_0[x_+]\fa_0[x_-]} }{\fa_\mu[x]}\le\frac12 \||A|^{1/2}(|A|- \mu I)^{-1/2}|_{\fH_+}\|\cdot\||A|^{1/2}(-A+ \mu I)^{-1/2}|_{\fH_-}\|. \end{equation} It is easy to see that \begin{equation}\label{zwei} \||A|^{1/2} (|A|-\mu I)^{-1/2}|_{\fH_+}\| \leq \frac{\sqrt{d_+}}{\sqrt{d_+ -\mu}} \quad \mu\in (\alpha, \beta),\quad \text{ in Cases I and II}, \end{equation} while \begin{equation}\label{drei} \||A|^{1/2} (-A+\mu I)^{-1/2}|_{\fH_-}\| \leq \begin{cases} \frac{\sqrt{d_-}}{\sqrt{d_-+\mu}},& \mu\in (0, \beta),\quad \text{ in Case I},\\ \frac{\sqrt{d_-}}{\sqrt{\mu -d_-}},& \mu\in (\alpha, \beta),\quad \text{ in Case II}. \end{cases} \end{equation} Choosing $\mu=\frac{d_+-d_-}{2}>0$ in Case I (recall that $d_+>d_-$ by the hypothesis) and $\mu=\frac{d_++d_-}{2}$ in Case II, and combining \eqref{eins}, \eqref{zwei}, \eqref{drei}, one gets the estimates $$ \frac{ \sqrt{\fa_0[x_+]\fa_0[x_-]} }{\fa_{\frac{d_+-d_-}{2}}[x]}\le \frac{\sqrt{d_+d_-}}{d_++d_-}\quad\text{in Case I} $$ and $$ \frac{ \sqrt{\fa_0[x_+]\fa_0[x_-]} }{\fa_{\frac{d_++d_-}{2}}[x]}\le \frac{\sqrt{d_++d_-}}{d_+-d_-}\quad \text{in Case II} . $$ Hence, from \eqref{osnb} it follows that \begin{equation*} v_{\frac{d_+-d_-}{2}}\le 2v \frac{\sqrt{d_+d_-}}{d_++d_-}\quad\text{in Case I} \end{equation*} and \begin{equation*} v_{\frac{d_++d_-}{2}}\le 2v \frac{\sqrt{d_+d_-}}{d_+-d_-}\quad \text{in Case II}. \end{equation*} Applying \eqref{first}, one gets the norm estimates \begin{equation}\label{first1} \|\EE_{A-\frac{d_+-d_-}{2} I}(\R_+)-\EE_{B-\frac{d_+-d_-}{2} I}(\R_+)\| \leq \sin\left(\frac{1}{2}\arctan\left[2 \frac{\sqrt{d_+d_-}}{d_++d_-} v\right]\right) \end{equation} in Case I and \begin{equation}\label{first2} \|\EE_{A-\frac{d_++d_-}{2} I}(\R_+)-\EE_{B-\frac{d_++d_-}{2} I}(\R_+)\| \leq \sin\left(\frac{1}{2}\arctan\left[2 \frac{\sqrt{d_+d_-}}{d_+-d_-} v\right]\right) \end{equation} in Case II. In remains to observe that $\|P-Q\|$, where the spectral projections $P$ and $Q$ are given by \eqref{spp}, coincides with the left hand side of \eqref{first1} and \eqref{first2} in Case I and Case II, respectively. The proof is complete. \end{proof} \begin{remark} We remark that the quantity $\delta$ given by \eqref{delta} coincides with the \emph{relative distance} (with respect to the origin) between the lower edges of the spectral components $\Sigma_+$ and $\Sigma_-$ in Case I and it has the meaning of the \emph{relative length} (with respect to the origin) of the spectral gap $(d_-, d_+)$ in Case II. For the further properties of the relative distance and various relative perturbation bounds we refer to the paper \cite{Li} and references quoted therein. We also remark that in Case II, i.e., in the case of a positive operator $A$, the bound \eqref{Davis} directly improves a result obtained in \cite{Luka}, \emph{the relative $\sin\Theta$ Theorem}, that in the present notations is of the form \begin{equation*} \|P-Q\|\le \frac{v}{\delta}. \end{equation*} \end{remark} We conclude our exposition with considering an example of a $2\times 2$ numerical matrix that shows that the main results obtained above are sharp. \begin{example}\label{exam} \emph{Let $\fH$ be the two-dimensional Hilbert space $\fH=\mathbb{C}^2$, $\alpha<\beta$ and $w\in \mathbb{C}$. } \emph{We set } \begin{equation*} A=\begin{pmatrix} \beta &0 \\ 0& \alpha \end{pmatrix} , \quad V=\begin{pmatrix} 0& w\\ w^*&0\end{pmatrix}\quad \text{ \emph{and} }\quad J=\begin{pmatrix} 1&0\\0&-1 \end{pmatrix}. \end{equation*} \emph{Let $\fv$ be the symmetric form represented by (the operator) $V$.} \emph{Clearly, the form $\fv$ satisfy Hypothesis \ref{ppp} with the relative bound $v$ given by \begin{equation*} v=\frac{|w|} {\sqrt{|\alpha\beta|}}, \end{equation*} provided that $\alpha, \beta\ne0$. Since $VJ=-JV$, the form $\fv$ is off-diagonal with respect to the orthogonal decomposition $\fH=\fH_+\oplus\fH_-$.} \end{example} In order to illustrate our results, denote by $B$ the self-adjoint matrix associated with the form $\fa+\fv$, that is, \begin{equation*} B=A+V=\begin{pmatrix} \beta & w \\ w^* &\alpha\end{pmatrix}. \end{equation*} Denote by $P$ the orthogonal projection associated with the eigenvalue $\alpha$ of the matrix $A$, and by $Q$ the one associated with the lower eigenvalue of the matrix $B$. It is well know (and easy to see) that the classical Davis-Kahan Tan $2\Theta$ theorem \eqref{tan:2:Theta} is exact in the case of $2\times2$ numerical matrices. In particular, the norm of the difference of $P$ and $Q$ can be computed explicitly \begin{equation}\label{sharp} \|P-Q\|=\sin \left(\frac12 \arctan \left[\frac{2|w|}{\beta-\alpha}\right]\right). \end{equation} Since, in the case in question, \begin{equation}\label{susu2} v_\mu=\sup_{0\ne x\in \Dom[\fa \,]}\frac{|\fv[x]|}{\fa_\mu[x]}=\frac{|w|} {\sqrt{(\beta-\mu)(\mu-\alpha)}},\quad \mu \in (\alpha, \beta), \end{equation} from \eqref{susu2} it follows that \begin{equation*} \inf_{\mu\in(\alpha, \beta)}v_\mu=\frac{2|w|}{\beta-\alpha} \end{equation*} (with the infimum attained at the point $\mu =\frac{\alpha+\beta}{2}$). Therefore, the result of the relative $\tan 2 \Theta$ Theorem \ref{thm:esti} is sharp. It is easy to see that if $\alpha < 0<\beta$ (Case I), then the equality \eqref{sharp} can also be rewritten in the form \begin{equation}\label{zz} \|P-Q\|= \sin \left(\frac12 \arctan \left[2\frac{\sqrt{d_+d_-}}{d_++d_-}v\right]\right), \end{equation} where $d_+=\beta$, $d_-=-\alpha$ and $v=\frac{|w|}{\sqrt{|\alpha|\beta}}$. If $0<\alpha <\beta$ (Case II), the equality \eqref{sharp} can be rewritten as \begin{equation}\label{zzz} \|P-Q\|= \sin \left(\frac12 \arctan \left[2\frac{\sqrt{d_+d_-}}{d_+-d_-}v\right]\right), \end{equation} with $d_+=\beta$, $d_-=\alpha$, and $v=\frac{|w|}{\sqrt{\alpha\beta}}$. The representations \eqref{zz} and \eqref{zzz} show that the estimate \eqref{Davis} becomes equality in the case of $2\times 2$ numerical matrices and, therefore, the results of Theorem \ref{thm:esti:prime} are sharp.
1,116,691,501,383
arxiv
\section{Introduction} Let $G$ be a connected simple cubic graph; $\mrm{Aut}~G$ denotes its automorphism group. Let $n$ be half the number of vertices of $G$. We define the {\em arithmetic genus} of a (possibly disconnected) graph as $e-v+1$ where $e$ is the number of edges, $v$ the number of vertices of $G$). For a connected simple cubic graph, $g=n+1$. The definition of arithmetic genus is motivated by the following: to a projective nodal curve with rational components one may associate a so-called {\em dual graph}; the arithmetic genus of the curve is the arithmetic genus of this graph. We abbreviate arithmetic genus to ``genus'' everywhere in this article, although this is at variance with standard graph theory terminology. We trust that this will not actually be confusing. A bound on the order of $\mrm{Aut}~G$ was obtained in \cite{wormald:79}, where it is shown that $|\mrm{Aut}~G|$ divides $3n\cdot 2^n$. However, it can be easily checked by consulting a list of cubic graphs\footnote{For example, Beresford's Gallery of Cubic Graphs, which can be viewed at {\tt http://www.mathpuzzle.com/BeresfordCubic.html}. This gallery is complete for graphs of at most twelve vertices.}, that this bound is only rarely attained (in fact, it is only attained for graphs with four or six vertices). Thus a natural problem is to find a sharp bound for the order of $\mrm{Aut}~G$. We solve this problem by the following: \begin{thm} Assume $g\geq 9$; set $l(g)=\min\{k| g=\sum\limits_{i=1}^k a_i\cdot 2^{n_i}, a_i\in \{1,3\}\}$, and set $o(g)=g-l(g)$. \begin{itemize} \item if $g=9\cdot 2^m+s$ ($s=0,1,2$) ($m\geq 0$) except $g=10,11,19,20,38$, then $|\mrm{Aut}~G|\leq 3\cdot 2^{o(g)}$ \item if $g=3\cdot 2^m+s$ ($s=0,1,2$) ($m\geq 2$), or $g=9(2^m+2^p)$ (with $|m-p|\geq 5$) or if $g=10,11,19,20,38$, then $|\mrm{Aut}~G|\leq \frac{3}{2}\cdot 2^{o(g)}$ \item if $g=5\cdot a\cdot 2^m+1$ (where $a=1$ or $3$, $m\geq 2$), then $|\mrm{Aut}~G|\leq \frac{5}{4}\cdot 2^{o(g)}$ \item otherwise, $|\mrm{Aut}~G|\leq 2^{o(g)}$ \end{itemize} Moreover, these bounds are sharp; an explicit construction of graphs attaining the bounds in each case will be given in a subsequent section (see \ref{candidates}). \end{thm} The graphs with maximal automorphism group for $g\leq 8$ will be listed in a table below. This work was motivated by our earlier work \cite{msc} on maximal order automorphism groups of stable curves. Aaron Bertram asked us if we could bound the automorphism groups of stable curves with all rational components. This is equivalent to the problem of finding the maximal order automorphism groups of cubic multigraphs (with a slightly modified notion of graph automorphism). Such a result may indeed be obtained along the lines of this article and is pursued in \cite{msc2}. The basic idea is as follows: once the genus is large enough (larger than eight), the graphs with the most automorphisms should be as nearly trees as possible. Of course a tree cannot be trivalent, and all trees have genus zero. Subject to these restrictions, we need to attach ``appendages'' of positive genus to trees in an optimal way. One sees that this is easiest when the appendages have the smallest possible genus. Restricting to simple graphs forces the appendages to have genus at least three, which in turn forces us to consider graphs slightly more general than trees for the ``cores'' of our graphs. If we consider cubic multigraphs, then there are appendages of genus two (a triangle with one edge doubled) and genus one (a loop). The answers to Bertram's question are graphs with loops as appendages. We will not pursue these questions on non-simple graphs in this article. Our appendages are shown in Figure \ref{pinchfig}, and have genus three and four. The ``pinched tetrahedra'' were used previously, for example, in an article of Wormald \cite{wormald:79}. A graph of genus 16 with $8^5=2^{o(16)}$ automorphisms may be constructed by attaching four copies of the ``pinched $K_{3,3}$'' around the ends of a binary tree with four leaves (with the root vertex removed so the graph is cubic). To reach the bound given for genus 18, we factor 18 as $3\cdot 6$. Our goal is to arrange six pinched tetrahedra around a core as symmetrically as possible. This is achieved by attaching them in three pairs to binary trees with two leaves, and then arranging these binary trees around a star with four vertices. The main idea is to construct a candidate graph in each genus whose automorphism group has order equal to the bounds in the Main Theorem, thereby giving a lower bound for the upper bound we seek. For a graph $G$ assumed to be optimal, we then remove the orbit of a well-chosen edge and attempt to proceed by induction. However, removing this orbit in general causes problems - the components of the remaining graph are not cubic, making them cubic may lead to a non-simple graph, some components may be cycles, which cannot be made cubic in any useful way. When one of these problems occurs, we will show that it constrains the automorphism group of the graph so that its order is smaller than that of the candidate graph's automorphism group. Graphs are assumed to be connected, unless otherwise stated. We will actually have to work extensively with disconnected graphs, but this will be clearly stated. It is a pleasure to thank Marston Conder for his assistance in pointing out the results of Goldschmidt's article, allowing us to significantly shorten the elimination of edge-transitive graphs from consideration. Professor Conder also made some helpful suggestions on an early version of the manuscript. \section{Technicalities} \begin{defn} For a natural number $g$, define the functions \begin{eqnarray*} l(g)&=&\min\{k:g=\sum_{i=1}^k a_i\cdot 2^{n_i}, a_i\in\{1,3\}\} \\ o(g)&=&g-l(g). \end{eqnarray*} \end{defn} The function $l$ may be computed as follows: expand $g$ in binary and starting from the left, count the number of pairs $10$ or $11$, possibly adding one to the total if after pairing there is a $1$ in the last digit. For example $l(15)=2$, $l(21)=3$. We will make extensive use of various inequalities involving the function $o$; we collect them here for convenience: \begin{prop}\label{daily.inequalities} \ \begin{itemize} \item $\frac{3}{2}k< 2^{o(k+1)}$ for all $k$. \item $k< 2^{2+o(\lceil \frac{k}{6}\rceil+1)}$ for all $k\geq 1$. \item $k<2^{o(\lceil \frac{2k}{3}\rceil +1)}$ for all $k$. \item $3k< 2^{o(2k-2)}$ for $k\geq 4$. \item $k\leq 2^{o(k-1)}$ for $k\geq 4$, with strict inequality for $k\geq 5$. \item $k\leq 2^{\lfloor \frac{k+1}{2}\rfloor}$, with strict inequality for $k\neq 2, 4$. \item $o(\lceil \frac{k}{2}\rceil +1)-\lfloor \frac{k}{6}\rfloor\geq o(\lceil \frac{k}{8}\rceil+1)$. \item $k\leq 2^{o(\lceil \frac{k}{2}\rceil+1)}$, with strict inequality for $k\neq 2, 4, 8$. \end{itemize} \end{prop} \begin{proof} Straightforward. \end{proof} We list also some properties of the function $l$ which will be useful in what follows: \begin{lem}\label{estimate.l.function} We have the following (in)equalities: \begin{itemize} \item $l(a)=1$ if and only if $a=2^m$ or $a=3\cdot 2^m$ for some $m\geq 0$. \item $l(a)\leq \lceil \frac{\log_2(s)}{2}\rceil$. \item $l(a\cdot b)\leq 2l(a)\cdot l(b)$. \item $l(2a)=l(a)$. \item $o(2a)=o(a)+a$. \item $l(3a)\leq 2l(a)$. \item $l(a+1)\leq l(a)+1$. \item $o(a)=1$ if and only if $a=2$; $o(a)\geq 2$ for $a\geq 3$. \item $2^{l(a)}\leq 2\sqrt{a}$. \item $l(a)=2$ and $l(3a)=4$ if and only if $a=3\cdot(2^m+2^p)$ with $m,p\geq 0$ and $|m-p|\geq 5$. \end{itemize} \end{lem} \begin{proof} Only the last two parts deserve some explanations: for $u$ such that $2^u\leq a <2^{u+1}$, the binary decomposition of $a$ will have $u+1$ digits; writing $a$ as sums in the definition of $l(a)$ effectively forms groups of at least two digits in this binary form, plus at most an extra one at the end; there are thus at most $\frac{u+2}{2}$ such groups, so $l(a)\leq \frac{u+1}{2}$; then $2^{l(a)}\leq 2\sqrt{2^u}\leq 2\sqrt{a}$. It is easy to see that the inequality is strict as soon as $a>1$. For the last part, note that $l(a)=2$ means $a=b\cdot 2^m+c\cdot 2^m$ with $b,c\in\{1,3\}$. It is immediate to check that if at least one of $b,c$ is not $3$, then $l(3a)\geq 3$, and moreover, even if both $b=c=3$ one must have $|m-p|\geq 5$ for $l(3a)=4$ to happen. \end{proof} \begin{lem}\label{main.inequality} The inequality $$sl(h)-l(sh)\geq \lfloor \frac{s+1}{2}\rfloor$$ is: \begin{itemize} \item strict for any $h$ if $s=4,6$ or $s\geq 8$ \item strict for $l(h)\geq 2$ and any $s\geq 2$, $s\neq 3$ \item strict for $l(h)\geq 3$ and $s=3$ \item equality for $l(h)=1$ and $s=2,5,7$, or $l(h)=2$ and $s=3$ \item false for $s=1$ or $l(h)=1$ and $s=3$ \end{itemize} \end{lem} \begin{proof} We begin by noting that $l(sh)\leq 2l(s)l(h)$, so $sl(h)-l(sh)\geq l(h)(s-2l(s))=l(h)(2o(s)-s)$. Since $l(h)\geq 1$ we would like to see from what value of $s$ we have $2o(s)-s\geq \lfloor \frac{s+1}{2}\rfloor$, or equivalently $2o(s)\geq \lfloor \frac{3s+1}{2}\rfloor$. If $2^u\leq s<2^{u+1}$ then $l(s)\leq \lceil \frac{u+1}{2}\rceil=\lfloor \frac{u}{2}\rfloor +1$. Then $2l(s)\leq 2\lfloor \frac{u}{2}\rfloor+2\leq u+2$, so $2o(s)=2s-2l(s)\geq 2s-u-2$. The inequality we would like to prove becomes $2s-u-2\geq \lfloor \frac{3s+1}{2}\rfloor $ or $s-\lfloor \frac{s+1}{2}\rfloor\geq u+2$. This in turn is implied by $\frac{s-1}{2}\geq u+2$ or $s\geq 2u+5$. Since $s\geq 2^u$ and $2^u>2u+5$ for $u\geq 4$ we see that the initial inequality is strict for $s\geq 16$. Now one may construct a table of values for both sides of the inequality for values of $s$ up to $15$; we use the inequalities in (\ref{estimate.l.function}) above: \begin{equation}\label{long.table} \begin{array}{cccl} s & sl(h)-l(sh) & \lfloor \frac{s+1}{2}\rfloor & \mrm{comments}\\ \hline 1 & 0 & 1 & \mrm{always\ false}\\ 2 & l(h) & 1 & \mrm{not\ strict\ for\ }l(h)=1\\ 3 & \geq l(h) & 2 & \mrm{strict\ for\ }l(h)\geq 3\\ 4 & 3l(h) & 2 & \mrm{always\ strict}\\ 5 & \geq 3l(h) & 3 & \mrm{not\ strict\ for\ }l(h)=1\\ 6 & \geq 4l(h) & 3 & \mrm{always\ strict}\\ 7 & \geq 4l(h) & 4 & \mrm{not\ strict\ for\ }l(h)=1\\ 8 & 7l(h) & 4 & \mrm{always\ strict}\\ 9 & \geq 7l(h) & 5 & \mrm{always\ strict}\\ 10 & \geq 8l(h) & 5 & \mrm{always\ strict}\\ 11 & \geq 8l(h) & 6 & \mrm{always\ strict}\\ 12 & \geq 10l(h) & 6 & \mrm{always\ strict}\\ 13 & \geq 10l(h) & 7 & \mrm{always\ strict}\\ 14 & \geq 11l(h) & 7 & \mrm{always\ strict}\\ 15 & \geq 11l(h) & 8 & \mrm{always\ strict} \end{array} \end{equation} The lemma follows now immediately. \end{proof} In the future, we will denote by $A(s,h)$ the quantity $sl(h)-l(sh)-\lfloor \frac{s-1}{2}\rfloor$, and by $B(s,h)$ the quantity $sl(h)-l(sh)$; the lemma may be interpreted as giving ranges of $s$ and $h$ for which $A(s,h)\geq 1$. \section{Eliminating edge-transitive graphs} For an edge $e$ of a graph $G$, we will denote by $O(e)$ its orbit via the automorphism group of $G$. We use the word ``edge'' here to denote the graph with two vertices joined by an edge, so that $O(e)$ is a graph. Define the function $M(G)$ as the number of edges in a minimal orbit of an edge. We refer to the star on four vertices simply as a ``star'', since we consider no stars with more vertices. \begin{lem}\label{structure.minimal.orbit} Let $e$ be an edge of $G$ such that $O(e)$ has minimal order among all orbits of edges of $G$. Then only the following possibilities occur: \begin{itemize} \item $G=O(e)$; \item $O(e)$ is a disjoint union of stars; \item $O(e)$ is a disjoint union of edges; \item $O(e)$ is a disjoint union of cycles; two such cycles are at distance at least two from each other. \end{itemize} \end{lem} \begin{proof} If is easy to see that if two stars in $O(e)$ have a common edge, then $G=O(e)$. Similarly, if two stars in $O(e)$ have a vertex in common, then either $G=O(e)$ or the third edge at that vertex will have an orbit of order smaller than that of $O(e)$ (which would be a contradiction to the choice of $e$). Thus, if there is a star in $O(e)$, one of the first two possibilities occurs for $G$. If no three edges in $O(e)$ share a common vertex, then either all edges in $O(e)$ are disjoint, or there are two edges $e_1$ (which may be assumed to be $e$, as $O(e)$ is acted upon transitively by $\mrm{Aut}~G$) and $e_2$ in $O(e)$ with a common vertex $v$. Denote by $f$ the third edge of $G$ at $v$; $f$ is then not in $O(e)$. Denote by $w$ the other end of $e$. If $v$ and $w$ are not in the same orbit of $\mrm{Aut}~G$, then we see that $|O(e)|=2|O(v)|>|O(v)|\geq |O(f)|$, so we reach a contradiction to the choice of $e$. If however, $w\in O(v)$, then the existence of a cycle made of edges in $O(e)$ is immediate. Moreover, since $f\notin O(e)$, these cycles are disjoint. Note that $|O(f)|\leq |O(e)|=|O(v)|$, with equality if and only if the ends of $f$ are not in the same orbit; in particular two cycles in $O(e)$ cannot be at distance one from each other (the edge between them, necessarily in the orbit of $f$, would have both endpoints in the same orbit). \end{proof} \begin{note}\label{e-f} If the fourth situation above occurs, we will actually choose the edge $f$ and work with it in the arguments that follow; this is possible since $O(f)$ is also minimal, and may only be either a disjoint union of stars, or a disjoint union of edges. $f$ (or more precisely, its orbit) in this case will be called {\em well-chosen}. \end{note} \begin{thm}\label{edge.transitive.bound} An edge-transitive graph $G$ has at most $384(g-1)$ automorphisms. \end{thm} \begin{proof} Tutte's papers \cite{tutte:47}\cite{tutte:59} on symmetric graphs give a bound of $48(g-1)$ for such graphs, and Goldschmidt's results on semisymmetric graphs \cite{gold} imply the bound in the statement of the theorem. \end{proof} We will use a couple of other functions frequently. Define \[ \mu(g)=\max \frac{|\mrm{Aut}~G|}{2^{o(g)}}, \] the maximum taken over all simple cubic graphs of genus $g$. For an edge $e$ of a graph, define $\mrm{Aut}_e' G$ to be the group of automorphisms {\em preserving} (not necessarily fixing!) the edge $e$ -- that is, preserving the unordered pair of endpoints of $e$. Similarly, define \[ \mu_1(g)=\max\frac{|\mrm{Aut}_e' G|}{2^{o(g)}}, \] the maximum here taken over all simple cubic graphs of genus $g$ and all edges of these graphs. We are interested in the values of these functions for small $g$ (when the optimal graphs are not our candidates). For a fixed graph $G$, we define $\mu_1(G)$ similarly by taking the maximum over all edges of $G$. Finally, set \[ \pi(G)=\max|\mrm{Aut}_e' G|, \] the maximum taken over all edges in $G$. The following table may be compiled by inspection of lists of cubic graphs on a small number of vertices. \[\label{mu.table} \begin{array}{lllll} g & l(g) & o(g) & \mu(g) & \mu_1(g)\\ \hline 3 & 1 & 2 & 6 & 1 \\ 4 & 1 & 3 & 9 & 1\\ 5 & 2 & 3 & 6 & 1\\ 6 & 1 & 5 & \frac{15}{4} & 1 \\ 7 & 2 & 5 & 2 & 1\\ 8 & 1 & 7 & \frac{21}{8} & 1 \end{array} \] \section{The candidate graphs} We describe now the candidates $C_g$ for the cubic simple graphs with the most automorphisms when $g\geq 9$. The definitions make sense for smaller genus, but do not give the optimal graphs. We need some non-standard terminology. If $G$ is an edge-transitive graph, choose an edge $e$. Replace $e$ by two edges with one endpoint in common and the other endpoints the former endpoints of $e$. We call this {\em pinching} $G$. This notion of pinching motivates the study of the function $\mu_1$. If $G$ is not edge transitive, we must specify an edge when pinching. A tree has a unique edge or vertex through which all geodesics of maximal length pass; call this the {\em root}. If we attach a tree to another graph at its root, and this root is an edge, we pinch the edge and the new vertex is the point of attachment. If this attaching leads to an a vertex of higher valence, we tacitly introduce an edge to correct the problem. In most cases, the meaning of ``attach'' is not confusing, since we make the simplest attachment possible to keep the graph cubic. If there is possibility of confusion, we will be more explicit. \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{1.5in}}c} \begin{picture}(30,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,-1){10}} \put(5,15){\line(1,1){10}} \put(5,15){\line(1,0){20}} \put(15,5){\circle*{3}} \put(15,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(0,1){10}} \put(15,25){\circle*{3}} \put(15,25){\line(1,-1){10}} \put(25,15){\circle*{3}} \end{picture} & \begin{picture}(50,50) \put(5,35){\circle*{3}} \put(5,35){\line(1,1){10}} \put(5,35){\line(1,-1){10}} \put(5,35){\line(2,-3){20}} \put(15,45){\circle*{3}} \put(15,25){\circle*{3}} \put(25,5){\circle*{3}} \put(25,5){\line(2,3){20}} \put(15,45){\line(1,-1){20}} \put(15,45){\line(1,0){20}} \put(15,25){\line(1,1){20}} \put(15,25){\line(1,0){20}} \put(35,45){\circle*{3}} \put(35,25){\circle*{3}} \put(35,45){\line(1,-1){10}} \put(45,35){\circle*{3}} \put(35,25){\line(1,1){10}} \end{picture} \\ \mbox{A pinched tetrahedron} & \mbox{A pinched $K_{3,3}$} \end{array}$ \end{center} \caption{Pinching illustrated.} \label{pinchfig} \end{figure} \begin{defn}\label{candidates} \ \begin{itemize} \item Define the graphs $A_m$ as follows: attach a pinched tetrahedron to each leaf of a binary tree with $2^m$ leaves. $A_m$ has genus $g=3\cdot 2^m$ ($m\geq 1$) and $2^{g-1}=2^{o(g)}$ automorphisms. For $g=3\cdot 2^m$ ($m\geq 1$), define $C_g=A_m^\mrm{stab}$ (see the next section for the definition of stabilization: in this case it ensures that the graph is cubic by shrinking the central edge pair to an edge). We note that $\pi(C_g)=2^{o(g)}$ in this case. \item Define the graphs $B_m$ for $m\geq 2$ as follows: attach a pinched $K_{3,3}$ to each leaf of a binary tree with $2^{m-2}$ leaves. $B_m$ has genus $g=2^m$ and $2^{g-1}=2^{o(g)}$ automorphisms. For $g=2^m$ ($m\geq 3$), define $C_g=B_m^\mrm{stab}$. Note also that $\pi(C_g)=2^{o(g)}$ in this case. \item If $g=3\cdot 2^m$ ($m\geq 2$), $C_g$ is defined by linking three copies of $B_m$ at their roots, by three edges, to a common root vertex. This is easily seen to yield a simple cubic graph with $\frac{3}{4}\cdot 2^g=\frac{3}{2}\cdot 2^{o(g)}$ automorphisms. If $g=3\cdot 2^m+1$, define $C_g$ by expanding the root of the previous tree into a triangle (with the $B_m$ attached at its vertices); this simple cubic graph has $\frac{3}{8}\cdot 2^g=\frac{3}{2}\cdot 2^{o(g)}$ automorphisms. Note that for this configuration, since $M(G) \geq 3$, we get $\pi(C_g)=\frac{1}{2} 2^{o(g)}$. \item If $g=9\cdot 2^m$, $C_g$ is defined by linking three copies of $A_m$ at their roots, by three edges, to a common root. This yields a simple cubic graph with $\frac{3}{4}\cdot 2^g=3\cdot 2^{o(g)}$ automorphisms. For $g=9\cdot 2^m+1$ we proceed as above by inserting a triangle at the root of the previous tree; again $|\mrm{Aut}~C_g|=\frac{3}{8}\cdot 2^g=3\cdot 2^{o(g)} (m\geq 2)$ or $|\mrm{Aut}~C_g|=\frac{3}{2}\cdot 2^{o(g)} (m=0,1)$. We note that $\pi(C_g)=2^{o(g)}$ for $m\geq 2$ and $\pi(C_g)=\frac{1}{2} 2^{o(g)}$ for $m=0,1$. \item If $g=3\cdot 2^m+2$ ($m\geq 2$) or $g=9\cdot 2^m+2$ ($m\geq 0$), we obtain $C_g$ by attaching copies of $B_m$, respectively, $A_m$ to the valence two vertices of a $K_{2,3}$. In both cases the genus two $K_{2,3}$ at the core yields extra symmetry for a total of $\frac{3}{8}\cdot 2^g$ automorphisms. If $g=3\cdot 2^m+2$, this means $\frac{3}{2}\cdot 2^{o(g)}$ automorphisms, while if $g=9\cdot 2^m+2$ one gets $3\cdot 2^{o(g)}$ automorphisms for $m\geq 3$ and $\frac{3}{2}\cdot 2^{o(g)}$ automorphisms for $m=0,1,2$. As above, we note that $\pi(C_g)=2^{o(g)}$ for $m\geq 2$ and $\pi(C_g)=\frac{1}{2} 2^{o(g)}$ for $m=0,1,2$. \item If $g=9(2^m+2^p)+s$ ($s=0,1,2$) with $|m-p|\geq 5$, $C_g$ is defined by attaching $A_m$ to $A_k$ at their roots, and arranging three copies of this configuration around a root which is a star, a triangle, or a $K_{2,3}$ depending on the value of $s$. This graph has $\frac322^{o(g)}$ automorphisms. $\pi(C_g)=\frac122^{o(g)}$ in this case. \item If $g=2^m+1$ ($m\geq 4$, as $m=3$ is covered above), we attach four copies of $B_m$ to the vertices of a square to obtain $C_g$; each of the quasi-trees will have genus $2^{m-2}$; the order of the automorphism group of this graph is then $8\cdot (2^{2^{m-2}-1})^4= 2^{g-2}=2^{o(g)}$. In this case $\pi(C_g)=\frac{1}{4}2^{o(g)}$. \item If $g=5\cdot 2^m+1$ ($m\geq 2$) or $g=5\cdot 3\cdot 2^m+1$ ($m\geq 0$), then $C_g$ is a cycle of length five with copies of $B_m$, respectively $A_m$ at its vertices. In this case one gets $\frac{5}{4}\cdot 2^{o(g)}$ automorphisms. \item $C_7$ is a pinched tetrahedron joined to a pinched $K_{3,3}$ by an edge. \item In all other cases, in the binary decomposition of $g$ we have, counting from the left, at least two pairs $11$ and/or $10$, plus a possible 1 left over. We look at the binary decomposition of $g$ and, from left to right, look for groups $11$ and $10$. We get a decomposition of $g$ into a sum of powers of two with coefficients one or three; for each part of $g$ of the form $2^m$ (with $m>1$) we take a $B_m$ of the corresponding genus, and for each part of $g$ of the form $3\cdot 2^m$ we take an $A_m$. If a 1 is left after this pairing, we replace the root of the last binary tree with a triangle attached the the two branches, with a free edge attached to its other branch. If a 10 is left, attach to the root of the last binary tree an edge connected to a pair of triangles with a common side. In the end, link each of these graphs to a distinct vertex of a path of length $l(g)-1$ using an edge. It is easy to see that the graph such obtained is cubic, simple, and of genus $g$; moreover, it is easy to compute that the order of the automorphism group of this graph is precisely $2^{o(g)}$. \end{itemize} \end{defn} The final point in the definition will be called the {\em general case}, and the others {\em exceptional}. \begin{exa} It is worth illustrating the general case with examples. Let $g=57$, so the binary expansion of $g$ is $111001$. From left to right, there are two pairs -- $11$ and $10$, and then a 1 is left over. We write $57=3\cdot 2^4+1\cdot 2^3+1\cdot 2^0$; $l(57)=3$, so $o(57)=54$. We are to take an $A_4$ and attach it to a $B_3$, inserting a triangle in the middle. A simpler way to describe the graph in this case is as follows: to two of the vertices of a triangle, attach an $A_3$, and attach a $B_3$ to the third. $C_{56}$ is $C_{57}$ with the triangle collapsed to a point. There is no change in automorphism group. $o(56)=o(57)=54$. For $g=58$, we have instead a pair of triangles sharing an edge, with an $A_4$ attached to the free vertex of one, and the $B_3$ attached at the other free vertex. $o(58)=55$, and there is an extra involution of $C_{58}$ coming from the configuration of two triangles. \end{exa} \begin{rmk}\label{comparison} \ \begin{itemize} \item We note that the candidates above yield no more automorphisms than $3\cdot 2^{o(g)}$, and in most genera the bound is $2^{o(g)}$. \item We compare below the orders of the automorphism groups of the candidates constructed above with the theoretical bound of $48(g-1)$ obtained by Tutte for symmetric graphs (there are no semisymmetric graphs of genus smaller than 28). For $9\leq g \leq 12$, methods similar to those of Tutte give a bound of $24(g-1)$ for such graphs which will be used in the table. \[\label{low.genus.table} \begin{array}{lllll} g & o(g) & |\mrm{Aut}~C_g| & \mrm{edge-transitive~bound} & \mrm{optimal} \\ \hline 3 & 2 & N/A & 96 & \mrm{tetrahedron~} (24)\\ 4 & 3 & N/A & 144 & K_{3,3}~(72)\\ 5 & 3 & N/A & 192 & \mrm{cube~} (48)\\ 6 & 5 & 32 & 240 & \mrm{Petersen~} (120)\\ 7 & 5 & 32 & 288 & \mrm{no-name~} (64) \\ 8 & 7 & 128 & 336 & \mrm{Heawood~} (336)\\ 9 & 7 & {\bf 384} & 192 & \\ 10 & 8 & {\bf 384} & 216 & \\ 11 & 9 & {\bf 768} & 240 & \\ 12 & 11 & {\bf 3072} & 264 & \\ 13 & 11 & {\bf 3072} & 576 & \\ 14 & 12 & {\bf 6144} & 624 & \\ 15 & 13 & {\bf 8192} & 672 & \\ 16 & 15 & {\bf 32768} & 720 & \end{array} \] \end{itemize} \end{rmk} It is easy to see that for $g\geq 16$ we always have $2^{o(g)}> 384(g-1)$. The conclusion so far: the graphs $C_g$ have more automorphisms than any edge-transitive graph as soon as $g\geq 9$. Thus, we will be concerned in what follows with the two cases in which the minimal orbit of an edge is a disjoint union of stars or edges inside $G$. As a consequence of (\ref{estimate.l.function}), we have the following: \begin{prop}\label{growth.candidates} $|\mrm{Aut}~C_{g+1}|\geq |\mrm{Aut}~C_g|$ except when $g=9\cdot 2^m+2$ ($m\geq 1$) or $g=9(2^m+2^p)+2$ ($|m-p|\geq 5$). $|\mrm{Aut}~C_{g+2}|>|\mrm{Aut}~C_{g}|$ for any $g\geq 9$. \end{prop} \begin{proof} Lemma \ref{estimate.l.function} shows that $o(g+1)\geq o(g)$. \begin{itemize} \item If $C_g$ has $2^{o(g)}$ automorphisms, then $C_{g+1}$ has at least $2^{o(g+1)}\geq 2^{o(g)}$. \item If $C_g$ has $3\cdot 2^{o(g)}$ automorphisms, then $g=9\cdot 2^m+u$, where $u=0$ or $u=2$; in the second case $m\geq 3$ also. But then $C_{g+1}$, for $g+1=9\cdot 2^m+1$, has the same number of automorphisms as $C_g$, while for $g+1=9\cdot 2^m+3$, $C_{g+1}$ has $2^{o(g+1)}=2^{o(g)+1}$ automorphisms. Thus for $g=9\cdot 2^m+2$, $|\mrm{Aut}~C_{g+1}|<|\mrm{Aut}~C_g|$. Similar behavior occurs when $g=9(2^m+2^p)+2$ ($m$ and $p$ as in the hypotheses) with $p$ not too small. \item If $C_g$ has $\frac{3}{2} \cdot 2^{o(g)}$ automorphisms, then $o(g+1)\geq o(g)+1$ would show that $|\mrm{Aut}~C_{g+1}| > |\mrm{Aut}~C_g|$. We have: \begin{itemize} \item If $g=3\cdot 2^m$, then $o(g+1)=o(g)$, but also $|\mrm{Aut}~C_{g+1}|=\frac{3}{2}\cdot 2^{o(g+1)}$. \item If $g=3\cdot 2^m+u$ ($u=1,2$), then $o(g+1)=o(g)+1$. \item If $g=9\cdot 2^m+1$ ($n\geq 2$), then $o(g+1)=o(g)+1$. \item If $g=10,11,19$, in which case $g+1=11,12,20$ then again $o(g+1)\geq o(g)+1$ . \item If $g=20,38$, then as before we see that $|\mrm{Aut}~C_{g+1}|<|\mrm{Aut}~C_g|$. \end{itemize} \end{itemize} The previous calculations then show that $|\mrm{Aut}~C_{g+1}|\geq|\mrm{Aut}~C_g|$ except for the noted exceptions. Since $o(g+2)\geq o(g)+1$, the second part follows immediately except in the case $g=9\cdot 2^m+2$; but a direct computation shows that $o(g+2)=o(g)+2$ for such $g$, so $|\mrm{Aut}~C_{g+2}|\geq 4\cdot 2^{o(g)}>|\mrm{Aut}~C_g|$ so we are done. \end{proof} \section{Some reductions} \begin{defn} We will call a simple cubic graph {\em optimal} if its automorphism group has maximal order among all simple cubic graphs of the same genus. We call such a graph {\em strictly optimal} if it is optimal and the minimal orbit of edges in it has minimal order among all optimal graphs of that genus. \end{defn} In this section we investigate the structure of a strictly optimal simple cubic graph of genus $g\geq 9$. The results of the previous sections show that $G$ cannot be edge-transitive. Consequently, we will pick a minimal orbit of an edge and try to understand its structure, and the structure it brings to the whole graph $G$. A first step in this direction has been made by (\ref{structure.minimal.orbit}). We will denote by $G'=G\setminus O(e)$ and by $g'$ the genus of $G'$. Due to the structure of $O(e)$, $G'$ has valence either two or three at each of its vertices. Consequently, $g'\geq 1$ and $g'=1$ if and only if $G'$ is a disjoint union of cycles. \begin{rmk}\label{disc} We will use a number of times a ``local surgery'' process, replacing subgraphs of the original graph $G$ with other subgraphs; the surgery is to be done throughout the orbit of the replaced subgraph. These graphs will have valence three at each of their vertices except at those that are in the same well-chosen orbit. We will reattach the replacements at the same points, to preserve regularity and avoid multiple edges. Each time we will keep track of the genus lost, and of the order of decrease/increase in the number of automorphisms. Since we are targeting only graphs that are optimal (maximal order of the automorphism subgroup), if by chance the orbit of the newly introduced subgraph is larger than that of the original subgraph, then we must have effectively/strictly increased the automorphism subgroup of the graph, so the estimates we use to show that the original one was not optimal still hold up. Thus we will assume tacitly that we are in the worst case scenario, where the new subgraph has orbit ``the same'' as the original, and argue usually by the number of elements in a minimal orbit of edges to reach a contradiction. More precisely, one can simply mark the vertices where the original graph was disconnected; the subgroup of automorphisms of the graph (after surgery) required to preserve the marking (at most permuting marked vertices among themselves) is the one we are really estimating. Most times this extra marking is not needed; in the few cases where it is, we will make it clear. \end{rmk} \begin{defn} For a graph $G$ having valence at least two at each of its vertices and genus at least three we will denote by $G^\mrm{stab}$ the {\em stabilization} of $G$ (the terminology is motivated by algebraic geometry). This is the graph obtained by replacing each maximal path, with interior vertices all of valence two by a simple edge with the same endpoints as the path. It is clear that $G^\mrm{stab}$ will have valence at least three at each of its vertices; however, it might have either loops or multiple edges. \end{defn} The possibility of loops and/or multiple edges inside $G^\mrm{stab}$ prohibits a direct induction; sharp bounds on such graphs may be obtained using the methods of this article; they are greater than the bound for simple graphs in every genus. \begin{thma} \label{main.theorem} \ \begin{itemize} \item If $G$ is a (strict optimal) simple cubic graph of genus $g\geq 9$, then $\mu(g)=\frac{|\mrm{Aut}~G|}{2^{o(g)}}\leq 3$. Moreover, $\mu(g)\leq 1$ except when $g=a\cdot 2^m+b$, with $(a,b)\in \{(3,0),(3,1),(3,2),(9,0),(9,1),(9,2),(5,1),(15,1)\}$. \item If $G$ is a (strict optimal) simple cubic graph of genus $g\geq 9$ with $|\mrm{Aut}~G|>2^{o(g)}$, then $M(G)\geq 3$. \item If $G$ is a simple cubic graph of genus $g\geq 9$, then $\pi(G)\leq 2^{o(g)}$ (or $\mu_1(g)\leq 1$). \end{itemize} \end{thma} Note that the third assertion of the theorem follows easily from the first two. Also note that Theorem A immediately implies the Main Theorem of the article. We will call Theorem A restricted to graphs of genus $g$ Theorem A$_g$. The idea of the proof is to analyze the graph $G'$ left after removing a well-chosen minimal orbit $O(e)$ from $G$. $G'$ may be disconnected, $O(e)$ can be a disjoint union of stars or isolated edges, and $G'$ (or its components) might have loops or multiple edges after stabilization. Overall, $2^{o(g)}$ acts as a filter in each genus: if a graph does not have at least as many automorphisms, then it is not optimal, as (\ref{candidates}) shows. We will use estimates to determine precisely when (for what $g$, and for what structure of $O(e)$) a graph has more than $2^{o(g)}$ automorphisms. Moreover, we will determine and use in the inductive process the order of the minimal orbit of edges (or at least some useful estimate). In what follows, $O(e)$ will {\em always} refer to a minimal orbit of disjoint stars or edges. We call a vertex of $G'$ which has valence three a {\em stable} vertex. These might not exist. More precisely, these do not exist precisely in the components of $G'$ which are cycles. However, in this instance $G$ is easily seen not to be optimal; this will be shown inductively in Lemma \ref{all.cycles}. In regard to the process of stabilization of the components of $G'$, the following lemmas will be useful: \begin{lem}\label{unstable.path.length} There can be at most two endpoints of edges in $O(e)$ on any unstable path with stable endpoints in $G'$. \end{lem} \begin{proof} First note that if at least three such vertices exist on such a path, they cannot possibly be in the same orbit (there is in this case a ``middle'' edge distinguished from the others). Since the endpoints of stars are in the same orbit, this means that we need to discuss only the possibility of isolated edges in $O(e)$ having three or more endpoints on a path with stable endpoints in $G'$. For such paths of length five or more (four or more contact points), it is clear that one of the edges in this path (the most central) will have an orbit of order less than that of $O(e)$, which is a contradiction. For a path of order four, the only possibility is that the middle contact point (vertex) is not in the orbit of the other two contact points. But then it has an orbit of order half of the other contact points, which is impossible, since both are endpoints of edges in $O(e)$. \end{proof} Moreover: \begin{lem}\label{problems.stabilization} Assume Theorem A$_h$ for all $9\leq h<g$. Let $G$ be a strictly optimal simple cubic graph of genus $g$, and $O(e)$ a minimal orbit of edges in $G$. Then no two edges in $O(e)$ may have endpoints on a path with stable endpoints in $G'=G\setminus O(e)$. \end{lem} \begin{proof} When two edges of $O(e)$ have contact points with a path with stable endpoints $p$ in $G'$, a naive (but effective) idea to try is to detach the two edges from the path, join their ``free'' ends to a common point, and join that common point to the middle of the original path (with the contact points removed) by a new edge $f$. This is illustrated in the first column of Figure \ref{two.endpoints.plan}. The procedure should be carried on throughout $G$, in the orbit of the path $p$. In this way, the graph obtained has the same genus and it is easily seen to have at least as many automorphisms as the original one, and in fact the $O(f)$ will have order less than that of the $O(e)$ (this is easily seen to be true regardless of the structure of $O(e)$), so $G$ could not have been strictly optimal. There are only two possible cases when this procedure leads to double edges: \begin{enumerate} \item {\bf Problem 1}: (does not happen when $O(e)$ is a disjoint union of stars): Two edges in $O(e)$ end up simultaneously with both ends on paths in the same orbit (see the middle column of Figure \ref{two.endpoints.plan}); assume the path has distinct stable endpoints (for the other case, see {\bf Problem 2} below). However, in this instance replacing both edges by a single one yields a graph $\bar{G}$ which is simple, cubic, has no fewer automorphisms than $G$ (we do this on the whole orbit of the path at once) and has genus $g-\frac{k}{2}$ (where $k=|O(e)|$). Moreover, the orbit of the replacement edge has fewer members, contradicting the strict optimality of $G$. More precisely, (\ref{growth.candidates}) shows that, except in case $g=9\cdot 2^m+2$, $C_{g+1}$ has at least as many automorphisms as $C_g$. Then $|\mrm{Aut}~G|\leq |\mrm{Aut}(C_{g-\frac{k}{2}})|\leq |\mrm{Aut}(C_g)|$; in case of equality throughout, the size of the minimal orbit helps establish the contradiction with the assumed strict optimality of $G$. This works except when $k=2$ and $g=9\cdot 2^m+3$, when the last inequality in the sequence above fails. However, in that case a direct argument can show that $G$ was not optimal; assuming $M(G)\geq 3$ for an optimal $G$ with $g=9\cdot 2^m+2$, and assuming that $|\mrm{Aut}~H|\leq 2^{o(g)}$ for every non-optimal graph $H$ in the same genus, we see that $k=2$ and/or the orbit of $p$ having length two forces $G$ to be non-optimal, so $|\mrm{Aut}~G|\leq 2^{o(g-1)}<2^{o(g)}=|\mrm{Aut}~C_g|$. \item {\bf Problem 2}: The procedure yields a double edge when the path $p$ has is a loop, starting and ending at the same stable point. Denoting by $f$ the third edge around this stable point, we see that the orbit of $f$ has order half that of $O(e)$ (contradiction!) unless the above-mentioned {\bf Problem 1} also occurs. But in this case the picture is as in the third column of Figure \ref{two.endpoints.plan} and the whole configuration: may be replaced (see the same figure). Since $f$ was not in the orbit of $e$, this stabilizes the graph locally, and in fact globally, as it is easy to see. Moreover, the automorphism group of the new graph (of the same genus as the original one) increases at least twofold, which is a contradiction to the optimality of $G$. Thus this problem does not occur in the optimal graphs. \end{enumerate} \end{proof} \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{1in}}c@{\hspace{1in}}c} \begin{picture}(60,50) \put(5,5){\circle*{3}} \put(5,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(5,25){\circle*{3}} \put(5,25){\line(1,-1){10}} \put(15,15){\line(1,0){30}} \put(25,15){\circle*{3}} \put(35,15){\circle*{3}} \put(45,15){\circle*{3}} \put(45,15){\line(1,1){10}} \put(45,15){\line(1,-1){10}} \put(55,25){\circle*{3}} \put(55,5){\circle*{3}} \put(25,15){\line(0,1){15}} \put(35,15){\line(0,1){15}} \put(26,19){\makebox{\scriptsize $e$}} \put(36,19){\makebox{\scriptsize $e$}} \put(15,25){\framebox(30,15){}} \end{picture} & \begin{picture}(60,70) \put(5,5){\circle*{3}} \put(5,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(5,25){\circle*{3}} \put(5,25){\line(1,-1){10}} \put(15,15){\line(1,0){30}} \put(25,15){\circle*{3}} \put(35,15){\circle*{3}} \put(45,15){\circle*{3}} \put(45,15){\line(1,1){10}} \put(45,15){\line(1,-1){10}} \put(55,25){\circle*{3}} \put(55,5){\circle*{3}} \put(25,15){\line(0,1){30}} \put(35,15){\line(0,1){30}} \put(26,29){\makebox{\scriptsize $e$}} \put(36,29){\makebox{\scriptsize $e$}} \put(5,35){\circle*{3}} \put(5,35){\line(1,1){10}} \put(15,45){\circle*{3}} \put(5,55){\circle*{3}} \put(5,55){\line(1,-1){10}} \put(15,45){\line(1,0){30}} \put(25,45){\circle*{3}} \put(35,45){\circle*{3}} \put(45,45){\circle*{3}} \put(45,45){\line(1,1){10}} \put(45,45){\line(1,-1){10}} \put(55,55){\circle*{3}} \put(55,35){\circle*{3}} \end{picture} & \begin{picture}(30,60) \put(15,5){\circle*{3}} \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(-1,1){10}} \put(15,15){\line(1,1){10}} \put(5,25){\circle*{3}} \put(5,35){\circle*{3}} \put(5,25){\line(1,0){20}} \put(5,25){\line(0,1){10}} \put(25,25){\circle*{3}} \put(25,25){\line(0,1){10}} \put(25,35){\circle*{3}} \put(5,35){\line(1,0){20}} \put(5,35){\line(1,1){10}} \put(15,45){\circle*{3}} \put(15,45){\line(1,-1){10}} \put(15,45){\line(0,1){10}} \put(15,55){\circle*{3}} \put(16,9){\makebox{\scriptsize $f$}} \put(16,49){\makebox{\scriptsize $f$}} \put(6,29){\makebox{\scriptsize $e$}} \put(26,29){\makebox{\scriptsize $e$}} \end{picture} \\ \dnarr & \dnarr & \dnarr \\ \begin{picture}(50,60) \put(5,5){\circle*{3}} \put(5,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(5,25){\circle*{3}} \put(5,25){\line(1,-1){10}} \put(15,15){\line(1,0){20}} \put(25,15){\circle*{3}} \put(35,15){\circle*{3}} \put(25,15){\line(0,1){10}} \put(25,25){\circle*{3}} \put(25,25){\line(-1,1){15}} \put(25,25){\line(1,1){15}} \put(5,35){\framebox(40,15){}} \put(35,15){\line(1,1){10}} \put(35,15){\line(1,-1){10}} \put(45,5){\circle*{3}} \put(45,25){\circle*{3}} \end{picture} & \begin{picture}(50,70) \put(5,5){\circle*{3}} \put(5,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(5,25){\circle*{3}} \put(5,25){\line(1,-1){10}} \put(15,15){\line(1,0){20}} \put(25,15){\circle*{3}} \put(35,15){\circle*{3}} \put(35,15){\line(1,1){10}} \put(35,15){\line(1,-1){10}} \put(45,25){\circle*{3}} \put(45,5){\circle*{3}} \put(25,15){\line(0,1){30}} \put(26,27){\makebox{\scriptsize $e$}} \put(5,35){\circle*{3}} \put(5,35){\line(1,1){10}} \put(15,45){\circle*{3}} \put(5,55){\circle*{3}} \put(5,55){\line(1,-1){10}} \put(15,45){\line(1,0){20}} \put(25,45){\circle*{3}} \put(35,45){\circle*{3}} \put(35,45){\line(1,1){10}} \put(35,45){\line(1,-1){10}} \put(45,55){\circle*{3}} \put(45,35){\circle*{3}} \end{picture} & \begin{picture}(30,60) \put(15,5){\circle*{3}} \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(-1,1){10}} \put(15,15){\line(1,1){10}} \put(5,25){\circle*{3}} \put(5,35){\circle*{3}} \put(5,25){\line(2,1){20}} \put(5,25){\line(0,1){10}} \put(25,25){\circle*{3}} \put(25,25){\line(0,1){10}} \put(25,35){\circle*{3}} \put(5,35){\line(2,-1){20}} \put(5,35){\line(1,1){10}} \put(15,45){\circle*{3}} \put(15,45){\line(1,-1){10}} \put(15,45){\line(0,1){10}} \put(15,55){\circle*{3}} \end{picture} \\ \mbox{Usual case} & \mbox{Problem 1} & \mbox{Problem 2} \end{array}$ \end{center} \caption{Stabilizations leading to a double edge. The problems are in the first row, and their solutions in the second.} \label{two.endpoints.plan} \end{figure} Note that the previous lemma implies that any unstable path in $G'$ has length two. Given this, we note, for future reference, what types of situations would lead to double (or triple) edges when stabilizing (the components of) $G'$. There are four classes, three labelled with roman numerals. A subscript on a roman numeral indicates the length of a stable path between the stable endpoints of an unstable path; if the subscript is a plus sign, the vertices are either connected by a stable path of length greater than two, or not connected by a stable path. The figures are drawn with the edges in $O(e)$ labelled ``$e$''. \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{0.5in}}c@{\hspace{0.5in}}c@{\hspace{0.5in}}c} \begin{picture}(50,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(0,1){20}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(9,16){\makebox{\scriptsize $e$}} \put(39,16){\makebox{\scriptsize $e$}} \end{picture} & \begin{picture}(80,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,25){\line(1,-1){10}} \put(25,25){\line(4,-1){40}} \put(25,5){\line(1,1){10}} \put(25,5){\line(4,1){40}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(65,15){\circle*{3}} \put(65,15){\line(1,0){10}} \put(75,15){\circle*{3}} \put(9,16){\makebox{\scriptsize $e$}} \put(39,16){\makebox{\scriptsize $e$}} \put(69,16){\makebox{\scriptsize $f$}} \end{picture} & \begin{picture}(50,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(0,-1){5}} \put(25,25){\line(0,1){5}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(9,16){\makebox{\scriptsize $e$}} \put(39,16){\makebox{\scriptsize $e$}} \end{picture} & \begin{picture}(30,30) \put(15,5){\circle*{3}} \put(15,5){\line(-1,1){10}} \put(15,5){\line(0,1){20}} \put(15,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(15,25){\circle*{3}} \put(15,25){\line(-1,-1){10}} \put(15,25){\line(1,-1){10}} \put(5,15){\circle*{3}} \put(25,15){\circle*{3}} \end{picture} \\ \mbox{I$_1$} & \mbox{I$_2$} & \mbox{I$_+$} & \mbox{$K_{2,3}$} \\ \begin{picture}(30,50) \put(15,5){\circle*{3}} \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(-1,2){10}} \put(15,15){\line(1,2){10}} \put(15,25){\circle*{3}} \put(15,25){\line(-1,1){10}} \put(15,25){\line(1,1){10}} \put(5,35){\circle*{3}} \put(25,35){\circle*{3}} \put(15,25){\line(0,1){20}} \put(15,45){\circle*{3}} \put(5,35){\line(1,1){10}} \put(25,35){\line(-1,1){10}} \put(16,33){\makebox{\scriptsize $e$}} \put(16,9){\makebox{\scriptsize $f$}} \end{picture} & \begin{picture}(50,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(0,1){20}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(26,14){\makebox{\scriptsize $e$}} \end{picture} & \begin{picture}(50,30) \put(5,15){\circle*{3}} \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(0,1){20}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(9,16){\makebox{\scriptsize $e$}} \end{picture} & \begin{picture}(50,30) \put(5,5){\circle*{3}} \put(5,5){\line(1,0){10}} \put(5,25){\circle*{3}} \put(5,25){\line(1,0){10}} \put(15,5){\circle*{3}} \put(15,25){\circle*{3}} \put(15,5){\line(0,1){20}} \put(15,5){\line(1,1){10}} \put(15,25){\line(1,-1){10}} \put(25,15){\circle*{3}} \put(25,15){\line(1,0){10}} \put(29,16){\makebox{\scriptsize $e$}} \put(35,15){\circle*{3}} \end{picture} \\ \mbox{II$_2$} & \mbox{II$_+$} & \mbox{III$_2$} & \mbox{III$_+$} \end{array}$ \end{center} \caption{The subgraphs that lead to multiple edges and loops.} \label{nasty.subgraphs} \end{figure} We will discuss when can these structures occur in a strictly optimal graph $G$. All the constructions are assumed to be done throughout the orbit of the unstable path simultaneously. We denote by $\bar{G}$ the graph obtained as a result of the surgery. The following definition is convenient. \begin{defn} Let $H$ be a graph which is cubic except for two vertices of valence two. A {\em pseudocycle} is a cubic graph obtained by replacing the vertices in a cycle with copies of $H$. \end{defn} \begin{lem} An optimal graph does not have a subgraph of type I$_1$. \end{lem} \begin{proof} I$_1$ may only occur when the whole $G$ is a pseudocycle (otherwise the middle edge would have a shorter orbit). Direct computation shows that these graphs are not optimal for $g\geq 9$. \end{proof} \begin{lem}\label{situation.V} A strictly optimal graph has no subgraphs of type I$_2$. \end{lem} \begin{proof} {\bf Case 1:} A strictly optimal $G$ with $O(e)$ a disjoint union of stars cannot have an I$_2$: it is clear that the orbit of the edge labelled $f$ in Figure \ref{nasty.subgraphs} will have an orbit with fewer elements than $O(e)$. {\bf Case 2:} When $O(e)$ is a disjoint union of $k\geq 2$ edges, the only possible way that an I$_2$ could occur in $G$ is if members of $O(e)$ alternate with I$_2$ to form a pseudocycle (otherwise again the edge labelled $f$ would have a smaller orbit). There are two subcases: \begin{enumerate}[a] \item The pseudocycles have exactly two I$_2$. We will create a new graph whose minimal orbit is smaller, contradicting strict optimality of $G$. Each pseudocycle is connected to the rest of $G$ by the edges labelled $f$ in the figure. If these two edges actually coincide, the graph $G$ is one pseudocycle, and direct computation excludes it from optimality. So we may assume that each pseudocycle has two distinct edges connecting it to the rest of the graph. A pseudocycle has local genus five and local automorphism group of order eight (sixteen, if the two $f$s may be flipped). Remove the pseudocycle and replace it with a triangle connected by two vertices to the two $f$s and whose third vertex is connected to a graph $A_1$. This does not change the genus or number of automorphisms of the graph, but the edge connecting the triangle to the $A_1$ now has a smaller orbit than the previous $e$. \item If the pseudocycle has at least three I$_2$, we replace it with a cycle whose length is equal to the number of I$_2$. In this way the new graph, cubic and simple, has lost precisely $2^k$ automorphisms (permuting the stable endpoints of the unstable paths), but also lost $2k$ in genus. By induction, if $\bar{g}\geq 9$ we have $|\mrm{Aut}~\bar{G}|\leq 3\cdot 2^{o(g-2k)}$ so $|\mrm{Aut}~G|\leq 3\cdot 2^{k+o(g-2k)}\leq 3\cdot 2^{o(g)-o(k)}$. Since $k\geq 3$, the last quantity is clearly less than $2^{o(g)}$ so $G$ could not have been optimal. If however $\bar{g}\leq 8$, we must increase $\mu(G)$ to nine, so we can only derive the contradiction to the optimality of $G$ when $k\geq 6$; if $\mu(G)\leq 6$, then the contradiction happens as soon as $k\geq 4$. So we are left with a few cases to consider: \begin{itemize} \item $\mu(G)=9$ and $k=4,5$. Then $\bar{g}=4$, so $g=12$ or $14$; $|\mrm{Aut}~\bar{G}|\leq 72$. In the first case, $|\mrm{Aut}~G|\leq 72\cdot 2^4<6\cdot 2^9=|\mrm{Aut}~C_{12}|$ so $G$ was not optimal; in the second case $|\mrm{Aut}~G|\leq 72\cdot 2^5<2^{12}=|\mrm{Aut}~C_{14}|$ so again $G$ was not optimal \item $\mu(G)\leq 6$ and $k=3$. Then $g\leq 14$, and the table (\ref{low.genus.table}) shows that $|\mrm{Aut}~G|<|\mrm{Aut}~C_g|$ so $G$ is not optimal. \end{itemize} \end{enumerate} \end{proof} \begin{lem} Assume Theorem $A_h$ for $9\leq h<g$. Then an optimal graph of genus $g$ does not have a subgraph of type I$_+$. \end{lem} \begin{proof} {\bf Case 1:} Suppose $O(e)$ is a disjoint union of $k$ stars, so the components of $G'$ are all isomorphic. If the components of $G'$ pairs of squares connected by two edges, diagonally opposite each other, we may replace these components as in Figure \ref{iplusnotsquare} to gain automorphisms without changing the genus, contradicting optimality. Otherwise, one may collapse each I$_+$ as in Figure \ref{iplusnotsquare}. The resulting graph is cubic simple, and has at least as many automorphisms as $G$; however, its genus is $g-\frac{3k}{2}$, and then (\ref{growth.candidates}) and (\ref{mu.table}) show that $G$ could not have been optimal. {\bf Case 2:} The same surgery as above may be done when $O(e)$ is a disjoint union of $k$ isolated edges (under the same restriction as above). \begin{itemize} \item If the two unstable vertices of an I$_+$ are not in the same orbit, then the existence of a pseudocycle formed of edges in $O(e)$ and I$_+$ to which they are incident is immediate; in fact the whole orbit $O(e)$ will be partitioned in edges arranged in such isomorphic pseudocycles. If a pseudocycle contains at least three I$_+$, do the same surgery as above; the decrease in genus overall is precisely $k\geq 2$, so again $G$ could not have been optimal. If a pseudocycle contains only two I$_+$, then the surgery is modified to unite by an edge the two vertices to which the eyes were contracted. This time the decrease in genus is at least three, so again $G$ could not have been optimal. \item If the two unstable vertices of an eye are in the same orbit, but the other ends of the two incident edges are not in their orbit, then the surgery above can be done at the orbit of the unstable path without leading to double edges; the decrease in genus is $\frac{k}{2}$ (it is easy to see that $k$ must be even), and the only case requiring consideration is when $k=2$ (when the difference in genus is only one) and $\mu(\bar{G})>1$; but then $M(\bar{G})\geq 3$ by induction, so we reach a contradiction. \item If the ends of the edges in $O(e)$ are in the same orbit, we see again the existence of pseudocycles in $O(e)$ and we continue the argument as above; $G$ could not be optimal. \end{itemize} In the case of ``cylinders'' (as in Case 1, the left side of the figure) we may replace as in the case when $O(e)$ was a disjoint union of stars, and see that $G$ was not optimal. This completes the proof. \end{proof} \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{0.25in}}c@{\hspace{0.25in}}c@{\hspace{0.5in}}c@{\hspace{0.25in}}c@{\hspace{0.25in}}c} \begin{picture}(30,100) \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(-1,1){10}} \put(15,15){\line(1,1){10}} \put(5,25){\circle*{3}} \put(25,25){\circle*{3}} \put(5,25){\line(1,1){10}} \put(25,25){\line(-1,1){10}} \put(15,35){\circle*{3}} \put(15,35){\line(0,1){10}} \put(5,25){\line(0,1){50}} \put(25,25){\line(0,1){50}} \put(5,75){\circle*{3}} \put(25,75){\circle*{3}} \put(5,75){\line(1,1){10}} \put(5,75){\line(1,-1){10}} \put(25,75){\line(-1,1){10}} \put(25,75){\line(-1,-1){10}} \put(15,65){\circle*{3}} \put(15,65){\line(0,-1){10}} \put(15,85){\circle*{3}} \put(15,85){\line(0,1){10}} \end{picture} & \begin{picture}(15,5)(0,-45) \put(0,3){\vector(1,0){15}} \end{picture} & \begin{picture}(30,80)(0,-10) \put(5,5){\line(1,1){10}} \put(25,5){\line(-1,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(0,1){10}} \put(15,25){\circle*{3}} \put(15,25){\line(-1,1){10}} \put(15,25){\line(1,1){10}} \put(5,35){\circle*{3}} \put(25,35){\circle*{3}} \put(5,35){\line(0,1){10}} \put(5,35){\line(2,1){20}} \put(25,35){\line(0,1){10}} \put(25,35){\line(-2,1){20}} \put(5,45){\circle*{3}} \put(25,45){\circle*{3}} \put(5,45){\line(1,1){10}} \put(25,45){\line(-1,1){10}} \put(15,55){\circle*{3}} \put(15,55){\line(0,1){10}} \put(15,65){\circle*{3}} \put(15,65){\line(-1,1){10}} \put(15,65){\line(1,1){10}} \end{picture} & \begin{picture}(50,50)(0,-25) \put(5,25){\line(1,0){10}} \put(15,25){\circle*{3}} \put(15,25){\line(1,-1){10}} \put(15,25){\line(1,1){10}} \put(25,35){\circle*{3}} \put(25,15){\circle*{3}} \put(25,35){\line(0,1){10}} \put(25,15){\line(0,-1){10}} \put(25,35){\line(1,-1){10}} \put(25,15){\line(1,1){10}} \put(35,25){\circle*{3}} \put(35,25){\line(1,0){10}} \put(9,26){\makebox{\scriptsize $e$}} \put(39,26){\makebox{\scriptsize $e$}} \put(26,9){\makebox{\scriptsize $h$}} \put(26,39){\makebox{\scriptsize $f$}} \end{picture} & \begin{picture}(15,5)(0,-45) \put(0,3){\vector(1,0){15}} \end{picture} & \begin{picture}(40,30)(0,-35) \put(5,5){\line(1,1){10}} \put(15,15){\circle*{3}} \put(5,25){\line(1,-1){10}} \put(15,15){\line(1,0){10}} \put(25,15){\circle*{3}} \put(25,15){\line(1,1){10}} \put(25,15){\line(1,-1){10}} \put(11,7){\makebox{\scriptsize $h$}} \put(11,21){\makebox{\scriptsize $f$}} \put(33,9){\makebox{\scriptsize $e$}} \put(33,19){\makebox{\scriptsize $e$}} \end{picture} \end{array}$ \end{center} \caption{Surgeries for graphs of type I$_+$.} \label{iplusnotsquare} \end{figure} \begin{lem} An optimal contains at most one subgraph of type III$_+$. Furthermore, if a graph of type III$_2$ occurs in an optimal graph, it is unique, so may be considered as a subgraph II$_+$. \end{lem} \begin{proof} The argument for subgraphs of type III$_+$ follows the line of those above. In this case, the surgery is to collapse the triangle in each III$_+$ to a point. The resulting graph has the same automorphisms, and lower genus, which contradicts optimality as above if there were multiple III$_+$ in the graph (if there is only one, the genus does not decrease enough to apply (\ref{growth.candidates})). Note that these subgraphs do appear in certain of the $C_g$. The middle edge of the graph III$_2$ moves as much as the edge labelled $e$. If the orders of their orbits are equal, we may shift attention to the middle edge and think of the III$_2$ as a II$_+$. In this case, the result follows from the next lemma. The only way that the orbit of $e$ could be smaller than the orbit of the middle edge is if a configuration as in Figure \ref{breaking.iii2} occurs, and the same figure gives a surgical solution to this problem. \end{proof} \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{1in}}c@{\hspace{1in}}c} \begin{picture}(80,30)(0,-15) \put(5,15){\line(1,0){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(1,-1){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(0,1){20}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(45,15){\line(1,1){10}} \put(45,15){\line(1,-1){10}} \put(55,5){\circle*{3}} \put(55,25){\circle*{3}} \put(55,5){\line(0,1){20}} \put(55,5){\line(1,1){10}} \put(55,25){\line(1,-1){10}} \put(65,15){\circle*{3}} \put(65,15){\line(1,0){10}} \end{picture} & \begin{picture}(15,5)(0,-25) \put(0,3){\vector(1,0){15}} \end{picture} & \begin{picture}(40,60) \put(10,5){\line(1,0){20}} \put(20,5){\circle*{3}} \put(20,5){\line(0,1){10}} \put(20,15){\circle*{3}} \put(20,15){\line(-1,2){15}} \put(20,15){\line(1,2){15}} \put(5,45){\circle*{3}} \put(5,45){\line(1,1){10}} \put(5,45){\line(1,-1){10}} \put(15,55){\circle*{3}} \put(15,35){\circle*{3}} \put(15,55){\line(1,0){10}} \put(15,35){\line(1,0){10}} \put(25,55){\circle*{3}} \put(25,35){\circle*{3}} \put(15,35){\line(1,2){10}} \put(15,55){\line(1,-2){10}} \put(25,35){\line(1,1){10}} \put(25,55){\line(1,-1){10}} \put(35,45){\circle*{3}} \end{picture} \end{array}$ \end{center} \caption{Surgery for graphs of type III$_2$.} \label{breaking.iii2} \end{figure} \begin{lem} An optimal graph may have at most one II$_+$. \end{lem} \begin{proof} First of all, replacing each II$_+$ by a single edge cannot produce a double edge or a loop (producing a loop would mean that we had a II$_2$). This is because either two such II$_+$ share the vertices at distance one from their stable ends, or there is a shortcut (edge) between those vertices (at distance one from their stable ends); we note that, if three such II$_+$ share the vertices of distance one from their stable endpoints, this configuration is the whole graph, of genus eight, and excluded from consideration. The surgeries in these two cases are depicted in Figure \ref{no.two.theta}. In the first case, the order of the orbit of a minimal edge is decreased without changing genus or automorphisms, contradicting strict optimality. In the second case, the surgery produces a graph with a larger automorphism group. Then, if $k=|O(e)|\geq 2$, the II$_+$ could not have adjacent vertices: more than three II$_+$ could only be adjacent if they form a cycle (the whole of $G$!) due to the requirement that their middle edges should be in the same orbit; then $|\mrm{Aut}~G|\leq 2k\cdot 2^k<2^{o(2k+1)}$ for $k\geq 5$ and $|\mrm{Aut}~G|<|\mrm{Aut}~C_9|$ for $k=4$. And if only two II$_+$ would be adjacent, the same surgery as in the previous lemma (see Figure \ref{breaking.iii2}) can be done. Thus if more than two II$_+$ exist in a strictly optimal $G$, they are not adjacent, and the vertices at distance one from their stable endpoints are not neighbors; replace then the II$_+$ by single edges; the automorphism group decreased in order by a factor of $2^k$, the resulting graph is simple and cubic, and of genus $g-2k$. Then the same discussion as in the case of the I$_2$ shows that $G$ could not have been optimal. \end{proof} \begin{figure}[ht] \begin{center} $\begin{array}{c@{\hspace{0.25in}}c@{\hspace{0.25in}}c@{\hspace{1in}}c@{\hspace{0.25in}}c@{\hspace{0.25in}}c} \begin{picture}(70,70)(0,-5) \put(5,35){\line(1,0){10}} \put(15,35){\circle*{3}} \put(15,35){\line(1,2){10}} \put(15,35){\line(1,-2){10}} \put(25,55){\circle*{3}} \put(25,15){\circle*{3}} \put(25,55){\line(1,1){10}} \put(25,55){\line(1,-1){10}} \put(25,15){\line(1,1){10}} \put(25,15){\line(1,-1){10}} \put(35,65){\circle*{3}} \put(35,45){\circle*{3}} \put(35,5){\circle*{3}} \put(35,25){\circle*{3}} \put(35,65){\line(1,-1){10}} \put(35,65){\line(0,-1){20}} \put(35,45){\line(1,1){10}} \put(35,25){\line(1,-1){10}} \put(35,25){\line(0,-1){20}} \put(35,5){\line(1,1){10}} \put(45,55){\circle*{3}} \put(45,15){\circle*{3}} \put(45,55){\line(1,-2){10}} \put(45,15){\line(1,2){10}} \put(55,35){\circle*{3}} \put(55,35){\line(1,0){10}} \end{picture} & \begin{picture}(15,5)(0,-35) \put(0,3){\vector(1,0){15}} \end{picture} & \begin{picture}(30,80) \put(5,5){\line(1,0){20}} \put(15,5){\circle*{3}} \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(1,1){10}} \put(15,15){\line(-1,1){10}} \put(5,25){\circle*{3}} \put(5,25){\line(1,0){20}} \put(25,25){\circle*{3}} \put(5,25){\line(1,1){10}} \put(25,25){\line(-1,1){10}} \put(15,35){\circle*{3}} \put(15,35){\line(0,1){10}} \put(15,45){\circle*{3}} \put(15,45){\line(-1,2){10}} \put(15,45){\line(1,2){10}} \put(5,65){\circle*{3}} \put(25,65){\circle*{3}} \put(5,65){\line(1,1){10}} \put(5,65){\line(1,-1){10}} \put(15,75){\line(0,-1){20}} \put(15,75){\circle*{3}} \put(15,75){\line(1,-1){10}} \put(15,55){\circle*{3}} \put(15,55){\line(1,1){10}} \end{picture} & \begin{picture}(50,40)(0,-20) \put(5,5){\line(1,0){40}} \put(15,5){\circle*{3}} \put(15,5){\line(0,1){20}} \put(35,5){\circle*{3}} \put(35,5){\line(0,1){20}} \put(15,25){\circle*{3}} \put(35,25){\circle*{3}} \put(15,25){\line(1,1){10}} \put(15,25){\line(1,-1){10}} \put(25,35){\circle*{3}} \put(25,15){\circle*{3}} \put(25,35){\line(1,-1){10}} \put(25,15){\line(1,1){10}} \put(25,15){\line(0,1){20}} \end{picture} & \begin{picture}(15,5)(0,-35) \put(0,3){\vector(1,0){15}} \end{picture} & \begin{picture}(30,50)(0,-15) \put(5,5){\line(1,0){20}} \put(15,5){\circle*{3}} \put(15,5){\line(0,1){10}} \put(15,15){\circle*{3}} \put(15,15){\line(-1,2){10}} \put(15,15){\line(1,2){10}} \put(5,35){\circle*{3}} \put(25,35){\circle*{3}} \put(5,35){\line(1,1){10}} \put(5,35){\line(1,-1){10}} \put(15,45){\line(0,-1){20}} \put(15,45){\circle*{3}} \put(15,45){\line(1,-1){10}} \put(15,25){\circle*{3}} \put(15,25){\line(1,1){10}} \end{picture} \end{array}$ \end{center} \caption{Surgeries for multiple II$_2$.} \label{no.two.theta} \end{figure} To eliminate most subgraphs of type II$_2$ from consideration, we refine our notion of well-chosen. Note that our candidate graphs contain many copies of II$_2$, but with $e$ not minimal. We will avoid the multiple edges which occur as a result of stabilizing II$_2$ by choose the edge labelled $f$ in the defining figure as the minimal edge. Arguments as above show that there is no problem in doing so. From henceforth, a well-chosen edge will also be subject to this restriction. \begin{lem}\label{triple.edges} For any genus $g\geq 9$, a strictly optimal $G$ and a well-chosen $O(e)$, $G'$ contains at most one component which is a $K_{2,3}$. \end{lem} \begin{proof} Let $k$ be the number of $K_{2,3}$'s in a resulting $G'$ (note that these must be connected components). The extra symmetry brought by these inside $G$ is that even if their valence two vertices are fixed, there is still a swapping of their stable vertices possible. We proceed to eliminate this and discuss the result: replace therefore each $K_{2,3}$ by a vertex, to which the edges incident to the original $K_{2,3}$ will be linked. This results precisely in a $2^k$ times decrease in the order of the automorphism group, from the elimination of the swapping mentioned above. However, at the same time the genus has dropped by $2k$. The graph $\bar{G}$ thus obtained is easily seen to be simle cubic, except if two edges incident to one of the collapsed $K_{2,3}$'s have a common endpoints, or are incident to another collapsed $K_{2,3}$. In the first case $O(e)$ must have been a disjoint union of stars, and then actually only one of them, so $g=4$; in the second case, all components of $G'$ must have been isomorphic; the transitivity of the action of $\mrm{Aut}~G$ on $O(e)$ forces actually the two components linked by two edges in $O(e)$ to be linked by three edges in $O(e)$, and this is all of $G$. Then $g=6$. Both cases are outside our considerations, therefore $\bar{G}$ is simple cubic. Let $\bar{g}$ be the genus of $\bar{G}$. By induction $\mu(\bar{g})\leq 9$, with equality only when $\bar{g}=4$; moreover $4\neq \bar{g}\leq 8$ implies $\mu(\bar{g})\leq 6$ and $\bar{g}\geq 9$ implies $\mu(\bar{g})\leq 3$. We are then studying the inequality $\mu(\bar{g})\cdot 2^{o(g-2k)+k}\leq 2^{o(g)}$ or equivalently $\mu(\bar{g})\leq 2^{k-l(g)+l(g-2k)}$; this is implied by $\mu(\bar{g})\leq 2^{o(k)}$. \begin{itemize} \item $k\geq 6$ makes the last inequality strict even for $\mu(\bar{g})=9$, while $k\geq 4$ makes the last inequality strict for $\mu(\bar{g})\leq 6$ \item If $\bar{g}=4$, $|\mrm{Aut}~\bar{G}|\leq 72$; we need to study the cases $k\leq 5$. If $k=1,2$, then $g\leq 8$--too small. If $k=5$, $g=14$ and $|\mrm{Aut}~G|\leq 72\cdot 2^5<|\mrm{Aut}~C_{14}|$ so $G$ was not optimal. If $k=4$, $g=12$ and $|\mrm{Aut}~G|\leq 72\cdot 2^4< |\mrm{Aut}~C_{12}|$ so again $G$ was not optimal. If $k=3$, $g=10$ and since $|\mrm{Aut}~G|\leq 72\cdot 2^3>|\mrm{Aut}~C_{10}|$ we need to make use of the marking mentioned in (\ref{disc}): the three contracted $K_{2,3}$ must have been in the same orbit. Referring to a table of cubic graphs of low genus, we see that either $|\mrm{Aut}~\bar{G}|=12$ in which case $|\mrm{Aut}~G|<|\mrm{Aut}~C_{10}|$, or $G=K_{3,3}$ so the three vertices representing the contracted $K_{2,3}$'s (which must not be neighbors, by the discussion on the simplicity of $\bar{G}$) fill one of the two sets of the partition. Then the marking of this partition cuts in half the number of automorphisms of $\bar{G}$, so $|\mrm{Aut}~G|\leq 36\cdot 2^3<|\mrm{Aut}~C_{10}|$ so again $G$ could not have been optimal. \item If $4\neq \bar{g}\leq 8$, we need to worry about $k\leq 3$. \begin{itemize} \item $k=1$ is only possible when $\bar{g}=7,8$. In the first situation, $|\mrm{Aut}~G|\leq 64\cdot 2 < |\mrm{Aut}~C_9|$ so $G$ was not optimal. In the second situation, the marking of the unique vertex introduced by contracting the $K_{2,3}$ is easily seen to cut at least in half the order of $\mrm{Aut}~\bar{G}$; then $|\mrm{Aut}~G|\leq 168\cdot 2 < |\mrm{Aut}~C_{10}|$ so again $G$ was not optimal. \item $k=2$ is possible for $5\leq \bar{g}\leq 8$. In all cases except when $\bar{G}$ is the Petersen's graph ($\bar{g}=6$) one easily reaches the conclusion (using (\ref{low.genus.table})) that $G$ was not optimal. Using the marking (two marked points) in case of the Petersen's graph severely cuts the order of available automorphisms, to $|\mrm{Aut}~G|\leq 12\cdot 2^2< |\mrm{Aut}~C_{10}|$. \item $k=3$ and $4\neq \bar{g}\leq 8$ is again easily shown using (\ref{low.genus.table}) to lead directly (without discussing markings) to the conclusion that $G$ was not optimal. \end{itemize} \item $\bar{g}\geq 9$; then by induction $\mu(\bar{g})\leq 3$ and only the cases $k=1$ and $k=2$ do not lead to the immediate conclusion that $G$ was not optimal (otherwise $o(k)\geq 2$). When $k=2$, the graph $G'$ has at most three components, and removing one of the two $K_{2,3}$'s and its incident edges the graph is still connected, simple and cubic (since there is at most one double edge that could occur in its stabilization, by the previous reductions); the surgery would drop the genus by four, while overall $|\mrm{Aut}~G|$ dropped fourfold (twice from the interchanging of the two $K_{2,3}$'s, and twice from the swapping of the two stable points of the removed $K_{2,3}$). Since $o(g)\geq o(g-4)+3$. Only now reducing the final $K_{2,3}$ would drop the genus by another two, while losing only a factor of two. Overall, we get a drop of six in genus, and a drop of eight in the order of the automorphism group. If $\bar{g}-2\geq 9$, then $o(g)\geq o(g-6)+5$ so $2^{o(g)}> \mu(\bar{G'})\cdot 2^{o(g-6)}$, so $G$ was not optimal; otherwise, we discuss as above to reach the same conclusion. \end{itemize} \end{proof} To summarize the reductions so far: \begin{prop}\label{reduction} In a strictly optimal $G$ of genus $g\geq 9$, a minimal $O(e)$ may be chosen in such a way that $G'$ may be stabilized to a simple graph, with the exception of the following situations: \begin{enumerate} \item $k=|O(e)|=1$ and $e$ is in the middle of a II$_+$; it is clear that the edges leaving from the stable ends of the II$_+$ can be at most swapped by $\mrm{Aut}~G$, but cannot move anywhere else inside $G$. Moreover, in this case $G'$ is clearly connected, but stabilizing it would produce a double edge; furthermore, the vertices at distance one from the endpoints of the II$_+$ are not connected by an edge. \item (for $g\geq 10$) $k=|O(e)|=1$ and this edge is incident to precisely one II$_2$; $G'$ is disconnected, but we know precisely an isomorphism class of components of $G'$ (it is easy to see that not all connected components of $G'$ could be isomorphic; one would get two components and the genus would be seven, too low for our considerations). \item $G'$ contains cycles; due to the length of these cycles being at least three, $|O(e)|\geq 3$, so by the discussion above, the other components of $G'$ must stabilize properly (or be cycles themselves). \item $G'$ may contain a unique $K_{2,3}$ as a component. \end{enumerate} \end{prop} \ \noindent As an immediate consequence, we may choose $O(e)$ minimal in such a way that one of the following is true: \begin{itemize} \item The components of $G'$ of genus greater than two stabilize to simple cubic graphs. \item $G'$ has components that are cycles, but the components that are not stabilize to simple cubic graphs. \item $G'$ is connected, but stabilizing it leads to a (unique) double edge (this is when a unique II$_+$ occurs). \item $G'$ is disconnected, and one component is a II$_2$ which stabilizes to a graph with a double edge. \item $G'$ contains a unique $K_{2,3}$ as a component. \end{itemize} \begin{rmk}(Enforcing strictness)\label{enforcing.strictness} In certain situations, when the geometrical situation will allow, we will show that some graphs cannot be strictly optimal by the following constructions. \begin{enumerate} \item $G'$ is made up of three components that stabilize to simple cubic graphs and $O(e)$ is a disjoint union of two stars, each incident exactly once to each component. The two points of contact of each component with the stars must be in the same orbit; for optimality of $G$ it is necessary that fixing one of the points will fix the other (i.e. the two pinched edges where the incidence occurs must always move together under the action of $\mrm{Aut}~G$). Then we detach the stars, link their ends incident to each component together to form a $K_{2,3}$, and link each of these new vertices with an edge to any one of the previous incidence points; we then stabilize (removing the pinch points at the other points of detachment). Then the automorphism group of the new graph is at least as large as that of the initial one, but clearly the minimal orbit has decreased in order; thus $G$ was not strictly optimal. \item $G'$ is made up of two components that stabilize to simple cubic graphs, and $O(e)$ is a disjoint union of two or four edges. Regardless of whether the two components of $G'$ are isomorphic, the incidence points of the edges in $O(e)$ with each component are in the same orbit, and their set is preserved by $\mrm{Aut}~G$; moreover, the optimality of $G$ will dictate that fixing one incidence point will necessarily fix the others on its component; this implies that once an edge in $O(e)$ is fixed, the others will be fixed as well. \begin{enumerate} \item If $k=|O(e)|=2$, then we detach the ends from one component, join them, and link the resulting vertex with an edge to any either of the initial incidence points, while stabilizing the other. \item If $k=4$ we detach all the edges in $O(e)$, link the unstable vertices of a $K_{3,3}$ with an edge removed to the endpoints of one of the edges in $O(e)$ and stabilize the remaining endpoints. \end{enumerate} In both situations the new graph has the same genus and at least as many automorphisms, but the minimal orbit has strictly smaller order; thus again $G$ was not strictly optimal. \item $G'$ is made up of three components, (all stabilizing to simple cubic graphs) two of which are isomorphic, linked each by two edges to the third one. Then as above we may detach the edges from the two isomorphic components, join their free ends, and link the resulting vertices to one of the initial incidence vertices (stabilizing the other). As above, this is easily seen to contradict the strict optimality of $G$. \end{enumerate} \end{rmk} \section{Proof of the Main Theorem} We will repeatedly use an {\em exhausting subgraphs} argument. This entails choosing a connected component (star or edge) in $O(e)$, fixing its orientation (when the endpoints are in the same orbit) and then gradually enlarging the subgraph gotten at a certain stage by choosing one of its tails and adding whole components either of $O(e)$ or of $G'$ reached by that tail. When a component of $G'$ will be added, we will include in the new subgraph only the edges of $O(e)$ incident to it, and of these, in case $O(e)$ is a union of stars, only those that do not lead to stars whose center is already a vertex of the previous subgraph (in order to avoid cutting unnecessarily the number of tails). At each step we look at the relative gain in the automorphism group. If a star is included at that step, then one of its edges is already fixed by the initial subgraph, then there could be at most a twofold increase in the order of the automorphism group at such a stage; moreover, such an increase occurs only when none of the vertices of the star was part of the subgraph at the beginning of the stage. If however, a component is included at a certain step, then one of its vertices (which has valence two in $G'$) is already fixed, and that limits its symmetry; in other words, the automorphisms of the new subgraph fixing the previous one are precisely those fixing the incidence point. Once all these automorphisms are taken into account, all the edges incident to that component do not have extra freedom (they move where their incidence point moves), so may be added without further increase in the order of the automorphism group of the subgraph. Unless otherwise noted, we will always expand the subgraphs by including whole components of $G'$ if the possibility exists (i.e. when not all tails of the subgraph gotten so far are centers of stars in $O(e)$). During the course of the proofs, it will sometimes be convenient to disallow certain automorphisms of a graph. In particular, sometimes we will collapse a cycle to a vertex, but we only want to remember the automorphisms of the resulting structure which come from the cycle. We will call the resulting vertex a {\em vertex with dihedral symmetry} to indicate that we do not allow the more general automorphisms in the contracted graph. At this time, we introduce the following theorem, which will also be proved and used inductively in the course of this section: \begin{thmb} Suppose $g\geq 9$. If $|\mrm{Aut}~C_g|>2^{o(g)}$ or $g$ is a power of two, there is a unique strictly optimal graph of genus $g$, unless $g=10$. Moreover, there is a unique graph $G$ for which $\pi(G)=1$ in the cases $g=2^m, 3\cdot 2^m$ and $3(2^m+2^p)$. \end{thmb} \begin{rmk} The case $g=10$ is a real exception. A graph different from $C_{10}$ with the same number of automorphisms is depicted in Figure \ref{genus10}. In the cases where $|\mrm{Aut}~C_g|=2^{o(g)}$, non-uniqueness is the norm: a simple example occurs for $g=340$ ($101010100$ in binary). The candidate graph has four ``tails'' which can be arranged in three non-isomorphic ways around the edges of a binary tree with four ends. \end{rmk} \begin{figure}[ht] \begin{center} \begin{picture}(80,70) \put(5,15){\circle*{3}} \put(5,15){\line(1,-1){10}} \put(5,15){\line(1,1){10}} \put(15,5){\circle*{3}} \put(15,25){\circle*{3}} \put(15,5){\line(1,0){10}} \put(15,25){\line(1,0){10}} \put(15,5){\line(1,2){10}} \put(15,25){\line(1,-2){10}} \put(25,5){\circle*{3}} \put(25,25){\circle*{3}} \put(25,5){\line(1,1){10}} \put(25,25){\line(1,-1){10}} \put(35,15){\circle*{3}} \put(35,15){\line(1,0){10}} \put(45,15){\circle*{3}} \put(45,15){\line(1,-1){10}} \put(45,15){\line(1,1){10}} \put(55,5){\circle*{3}} \put(55,25){\circle*{3}} \put(55,5){\line(1,0){10}} \put(55,25){\line(1,0){10}} \put(55,5){\line(1,2){10}} \put(55,25){\line(1,-2){10}} \put(65,5){\circle*{3}} \put(65,25){\circle*{3}} \put(65,5){\line(1,1){10}} \put(65,25){\line(1,-1){10}} \put(75,15){\circle*{3}} \put(25,55){\circle*{3}} \put(25,55){\line(1,-1){10}} \put(25,55){\line(1,1){10}} \put(35,45){\circle*{3}} \put(35,65){\circle*{3}} \put(35,45){\line(1,0){10}} \put(35,65){\line(1,0){10}} \put(35,45){\line(1,2){10}} \put(35,65){\line(1,-2){10}} \put(45,45){\circle*{3}} \put(45,65){\circle*{3}} \put(45,45){\line(1,1){10}} \put(45,65){\line(1,-1){10}} \put(55,55){\circle*{3}} \put(5,15){\line(1,2){20}} \put(75,15){\line(-1,2){20}} \end{picture} \end{center} \caption{An optimal non-$C_{10}$} \label{genus10} \end{figure} We use subscripts on B in the same way as they are used on A. Throughout this section, we assume that Theorems A$_h$ and B$_h$ are true for all $h$ less than the $g\geq 9$ under consideration. As it relies on estimates based on the arithmetic of $g$, the proof of the Main Theorem is somewhat tedious. Here is the outline, with details filled in by the lemmas that occupy the rest of this section. \begin{proof}[Proof of Theorems A and B] As usual, the proof is divided into two cases: when $O(e)$ is a disjoint union of stars, and when $O(e)$ is a disjoint union of edges. {\bf Case 1:} Suppose $O(e)$ is a disjoint union of stars. Then Lemma \ref{stars.and.genus.more.than.3} will show that if $O(e)$ consists of more than one star, $G$ is not strictly optimal. Then by the reductions of the previous section, we may remove the star, disconnecting the graph into subgraphs of lower genus. Lemma \ref{stars.and.genus.more.than.3} will show furthermore that the genus of these subgraphs is quite restricted. The order of the automorphism group of $G$ in this case will be six (for the star) times the automorphism groups of the components, so the stabilizations of the components must be optimal. We may then proceed by induction: the components are of smaller genus and we know the optimal pinched graphs in these genera, hence we get a bound for the automorphism group. The second part of Theorem A about the pinched graphs follows similarly. Theorem B will also follow by applying it inductively to the components when necessary. Thus, essentially, Case I is covered by the Lemma and induction hypotheses. {\bf Case 2:} If $O(e)$ is a disjoint union of edges, we are in a much more restricted situation. First of all, suppose that removing $O(e)$ and stabilizing results in a non-simple $G'$. We have classified the possible behaviors in the previous section: if $G'$ remains connected, it has a unique subgraph II$_+$. Replacing this entire subgraph by an edge, we reduce the genus, are able to apply induction to the resulting graph (which no longer leads to an unstable $G'$) and close this case. If $G'$ is disconnected, we either have the case of a unique II$_2$ (touching the minimal orbit; clearly there can be many II$_2$ in an optimal graph), in which case we may remove $e$ and concentrate on the component which stabilizes well, again proving the theorems by induction, or the case of a unique $K_{2,3}$ component in $G'$ (before stabilization). Again, we use induction on the components of $G'$ that are not $K_{2,3}$ and obtain the result. Therefore, we may assume that stabilizing $G'$ does not introduce loops or multiple edges. This finally breaks into two subcases: either $G'$ contains cycles or it does not. Lemma \ref{all.cycles} shows that $G'$ is not a disjoint union of cycles, and Lemma \ref{some.cycles} then shows that a cycle in $G'$ must be unique. After this is established, we see that $G$ is a collection of isomorphic subgraphs that stabilize well arranged around a cycle. Again we may apply induction to the subgraphs to obtain the theorems. If $G'$ does not contain a cycle, then the orbit of a minimal edge must be small, and the estimates finish the work. The details are recorded in \ref{isolated.edges.and.genus.more.than.3}. The proofs of the lemmas will show that if $g=2^m$ there is only one optimal graph $G$ for which $\pi(G)=1$. If $g=3\cdot 2^m$, then $A_m$ is optimal, but $\pi(A_m)=\frac12$. This part of Theorem B will be show by showing that the only other graphs with at least $2^{o(g)}$ automorphisms in these genera are those linking a $B_{m+1}$ and a $B_m$ by an edge; these graphs are not optimal (they have exactly $2^{o(g)}$ automorphisms), but they satisfy $\pi=1$. A similar observation is true in the case $g=3(2^m+2^p)$. This proves the last part of Theorem B, which is essential in the induction. \end{proof} \subsection{$G'$ has components that are cycles} \begin{lem}\label{all.cycles} If $G'$ is a disjoint union of cycles, then $G$ cannot be optimal. \end{lem} \begin{proof} {\bf Case 1}: If $G'$ is a single cycle, and $k=|O(e)|$, then the length of the cycle is $2k$ with $4k$ automorphisms; $g=k+1$ and $2^{k+1}>4k$ for $k\geq 5$, i.e. for $g\geq 6$, which is certainly the case; such a $G$ could not be optimal. {\bf Case 2:} If $G'$ is disconnected and $O(e)$ is a disjoint union of $k$ stars, then all the connected components of $G'$ are isomorphic (since all the vertices of the stars are in the same orbit). Then $G'$ having a connected component which is a cycle means that $G'$ is a disjoint union of $s$ cycles of the same length, say $t$; note that $t\geq 3$, otherwise $G$ could not have been simple. We note that the transitivity of the action of $\mrm{Aut}~G$ on the orbit $O(e)$, coupled with the disconnectedness of $G'$, prohibits the stars from having more than one contact point with any component of $G'$. The genus of $G$ is $g=2k+1$, and we have the ``contact formula'' $st=3k$. In estimating $|\mrm{Aut}~G|$ we may simply replace the cycles in $G'$ by (contract them to) vertices of valence $t$ with dihedral symmetry. We get a graph with $k$ vertices of valence three and $s$ vertices of valence $t$. We use growing trees: choose a star ($k$ choices), fix its edges (at most six choices), then expand this tree and subsequent trees by reaching, from a tail, to adjacent vertices not included in the tree so far. Due to the at most dihedral symmetry around each vertex of the contracted graph, at each step in the process the size of the automorphism group increases at most two-fold; this occurs precisely when a tail of order three in $G$ has only one neighbor in the tree gotten so far (say this happens $a$ times), or when a tail of order $t$ in $G$ has at most two neighbors (one, if $t=3$) in the tree (say this happens $b$ times). Thus $|\mrm{Aut}~G|\leq 6k\cdot 2^{a+b}$ and (denoting by $n$ the number of vertices of the contracted graph) $n=s+k\geq 4+2a+(t-2)b$ (if $t\geq 4$) respectively $n\geq 4+2a+2b$ (if $t=3$). In the first instance we get $a\leq \frac{s+k}{2}-2-\frac{t-2}{2}b$, so $|\mrm{Aut}~G|\leq \frac{3}{2}k\cdot 2^{\frac{s+k}{2}-b\frac{t-4}{2}}$ (and note that $s\leq \frac{3k}{4}$); in the second instance we get directly $a+b\leq \frac{s+k}{2}-2$ (but $s=k$) so $|\mrm{Aut}~G|\leq \frac{3}{2}k\cdot 2^k$. Both times we compare to $2^{o(2k+1)}=2^{2k+1-l(2k+1)}\geq2^{k+o(k)}$ (since $l(2k+1)\leq l(k)+1$; see (\ref{estimate.l.function})). Now both inequalities are implied by $\frac{3}{2}k\leq 2^k$ which is strict by (\ref{daily.inequalities}), since $k\geq o(k)+1\geq o(k+1)$. {\bf Case 3}: If $G'$ is disconnected and $O(e)$ is a disjoint union of isolated edges, then there are at most two isomorphism classes of connected components (according to whether both endpoints of $e$ are in the same orbit or not). We will deal directly with the general case, in which there are two isomorphism classes of cycles in $G'$; the simpler case may be dealt with by taking $s_1=s_2$ below; all the estimates still work then. Thus there are $s_1$ cycles $H_1$ incident by $t$ edges to each of $n_1$ neighbors (these have length $n_1t$), and $s_2$ cycles $H_2$ incident by $t$ edges to each of $n_2$ neighbors (these have length $n_2t$). Then $k=s_in_it$ and $g=k+1$. We need to show first that any two cycles may be linked by at most an edge in $O(e)$, i.e. $t=1$. If two cycles would have $t\geq 3$ common edges in $O(e)$ incident to both, then clearly fixing one cycle will fix the whole graph. Then $|\mrm{Aut}~G|\leq 2s_1n_1=2k<2^{o(k+1)}$ for $k\geq 5$ using (\ref{daily.inequalities}). But $k\leq 4$ and $t\geq 3$ is only possible when $G$ is made up of two isomorphic cycles sharing an edge between their vertices (one from each component); in that case the genus is too small (five or six) for our considerations. Thus such a $G$ could not be optimal. If two cycles would have precisely two edges in common, then $|\mrm{Aut}~G|\leq 2k\cdot 2^{a+b}$, where $a$ is the number of times we get an involution of a cycle of type $H_1$ by including it when expanding the subgraph at a tail incident to it; only one neighbor of this $H_1$ must have been included in the expanding subgraph previously, so $n_1-1$ new cycles of type $H_2$ will be incident to the newly increased subgraph afterwards; and similarly for $b$. Thus $s_2\geq n_1+a(n_1-1)$ and $s_1\geq 1+b(n_2-1)$. Note that if $n_1=1$ we get $s_2=1$; then $t=2$ would force the existence of double edges in $G$. Thus $\min (n_1,n_2)\geq 2$; as a consequence, since $t=2$, $s_i\leq \frac{k}{2}$. If $n_1\geq 3$, then $a\leq \frac{s_2-1}{n_1-1}-1\leq \frac{s_2-1}{2}-1< \frac{k}{4}-1$; similarly $b\leq \frac{s_1-1}{n_2-1}<s_1=\frac{k}{n_1}\leq \frac{k}{3}$. Then overall $|\mrm{Aut}~G|\leq 2k\cdot 2^{\frac{7k}{12}-1}<k\cdot 2^{\frac{k}{2}}$; now, since $k=2s_1n_1\geq 12$, this is easily seen to be strictly less than $2^{o(k+1)}$ by (\ref{daily.inequalities}) so again $G$ is not optimal. If $n_1=n_2=2$, then $G$ is formed of cycles of length four, each with two neighbors with which it is linked by two edges; it is immediate that the edges between two adjacent cycles should be linked at their opposite vertices for maximum symmetry gain. These cycles are actually isomorphic, and there are precisely $\frac{k}{2}$ of them in $G$. However, in this case an involution in one cycle will force an involution in a neighboring cycle; overall $|\mrm{Aut}~G|\leq k\cdot 2^{\frac{k}{2}}<2^{o(k+1)}$ as soon as $k\geq 10$, so $G$ is not optimal. We note that for $g=9$, i.e. $k=8$, $|\mrm{Aut}~G|\leq 8\cdot 2^4=2^{o(g)}<|\mrm{Aut}~C_9|$ so $G$ cannot be optimal. We also note that $g=2^m$ is not possible in the above configuration. Thus we have reduced to only the possibility $t=1$. We can estimate as above by first collapsing all cycles to vertices, then by expanding trees. So we get a graph $\bar{G}$ with $s_1$ vertices of order $t_1$ and $s_2$ vertices of order $t_2$, with dihedral symmetry around each vertex, and, most importantly, with $|\mrm{Aut}~\bar{G}|=|\mrm{Aut}~G|$. Note that $k=|O(e)|=s_1t_1=s_2t_2$, $t_i\geq 3$ and $g=k+1$. Without loss of generality, we may assume $t_1\geq t_2$. Construct the exhausting trees by first choosing an edge in $O(e)$ ($k$ choices), then fixing its orientation (at most two choices); afterwards, each tail which has no more than two neighbors in the trees constructed so far may bring at most an extra involution among the edges ending at it (since two of those are fixed necessarily); in case $t_i=3$, there should be only one neighbor of that tail among the vertices touched by the tree so far in order for that involution to exist. Say this situation occurs $a$ times for the cycles of length $t_1$ and $b$ times for the cycles of length $t_2$. Overall $|\mrm{Aut}~G|\leq 2k\cdot 2^{a+b}$. Then we have the following estimates: \begin{enumerate} \item $s_1+s_2\geq 2+a(t_1-2)+b(t_2-2)$ when $t_i\geq 4$; thus $b\leq \frac{s_1+s_2-2}{t_2-2}-a\frac{t_1-2}{t_2-2}$, so $|\mrm{Aut}(\bar{G})|\leq 2k\cdot 2^{\frac{s_1+s_2-2}{t_2-2}}$ \item $s_1+s_2\geq 2+a(t_1-2)+2b$ when $t_1>t_2=3$; thus $b\leq \frac{s_1+s_2-2}{2}-a\frac{t_1-2}{2}$, so $|\mrm{Aut}(\bar{G})|\leq k\cdot 2^\frac{s_1+s_2}{2}$. \item $s_1+s_2\geq 2+2a+2b$ when $t_1=t_2=3$ (so $s_1=s_2$), so $|\mrm{Aut}(\bar{G})|\leq k\cdot 2^{s_1}$. \end{enumerate} In all instances we compare with $2^{o(k+1)}$. In the first subcase, $s_1=\frac{k}{t_1}\leq \frac{k}{t_2}=s_2$, so the inequality to prove is implied by $2k\cdot 2^\frac{2(k-t_2)}{t_2(t_2-2)}\leq 2^{o(k+1)}=2^{k+1-l(k+1)}$; this in turn is implied, using (\ref{estimate.l.function}) and the fact that $\frac{k-t_2}{t_2(t_2-2)}\leq \frac{k-4}{8}$, by $4k\sqrt{k+1}\cdot 2^\frac{k-4}{4}\leq 2^{k+1}$. This last one is equivalent to $k\sqrt{k+1}\leq 2^\frac{3k}{4}$, which is easily seen to be strict for $k\geq 5$; however, $g\geq 8$ implies $k\geq 7$ so in this first subcase we always get a strict inequality. In the second subcase, $s_1=\frac{k}{t_1}\leq \frac{k}{3}=s_2$; as above, we reduce to $k\sqrt{k+1}\leq 2^\frac{2k}{3}$, which is easy to be seen as true (strict inequality) for $k\geq 6$, which again is what we needed given that $k\geq 8$. The third subcase is reduced to the same inequality as the second, so we are done. \end{proof} \begin{lem}\label{some.cycles} If $G$ is a strictly optimal graph, $G'$ cannot contain more than one component which is a cycle. Moreover, Theorem B holds for those $g$ for which $C_g$ contains an isolated cycle. \end{lem} \begin{proof} Due to (\ref{all.cycles}), we are left to analyze the case where some components of $G'$ are cycles while the others are not. First, (\ref{reduction}) allows us to assume that the $s_2$ components $H_i$ of $G'$ which are not cycles may be stabilized without problems (no double edges or loops occur); in particular, the arithmetic genus, denoted by $h$, of these components is at least three. Then the induction hypothesis shows that $|\pi(H_i)|\leq 2^{o(h)}$ for $h\geq 3$. $G'$ has also $s_1$ components which are cycles, each with $n_1$ neighbors (components of $G'$ at distance one); we denote by $t$ the {\em incidence degree} of two components in different isomorphism classes, i.e. the number of edges in $O(e)$ joining two such components; we also denote by $n_2$ the number of neighbors of a component $H_i$. We have: the length of the cycles is $n_1t$, $k=s_1n_1t=s_2n_2t$ and $g=s_2(h-1)+1+k$. Note that $n_1t\geq 3$ (to avoid double edges in $G$) and since we want $s_1=1$ it is enough to show that $k\geq 6$ leads to contradictions. We will discuss also what happens when $s_1=1$. Estimate $|\mrm{Aut}~G|$ by first contracting the cycles to vertices with dihedral symmetry at the edges around them, then using again an exhausting subgraphs argument; we start by choosing a cycle ($s_1$ choices), then fixing its orientation ($2n_1t$ choices). We get $|\mrm{Aut}~G|\leq 2k\cdot 2^{a}\cdot 2^{s_2o(h)}$, and would like to compare this to $2^{o(g)}$. Due to the way we mentioned we expand the subgraphs, preferably at their tails incident to components $H_i$ (when they exist at a certain stage) we see that the only possibility of gaining extra symmetry when forced to incorporate a cycle is when that cycle had at most two incident edges (and precisely one if its length $n_1t=3$). Including such a cycle will immediately yield $n_1t-2$ (respectively $2$) tails incident to components $H_i$; since $t$ was the incidence, we see that we have: if $t\geq 2$, only one component $H_i$ was incident to this cycle, so $n_1-1$ new components will be reached by tails after the cycles is included in the newly increased subgraph; if $t=1$ but $n_1\geq 4$, at least $n_1-2$ new components will be reached; and finally if $t=1$ but $n_1=3$, exactly $2=n_1-1$ new components will be reached. This happens each of the $a$ times. Thus we have: \begin{itemize} \item $s_2\geq n_1+a(n_1-1)$ if either $t\geq 2$ or $t=1,n_1=3$ \item $s_2\geq n_1+a(n_1-2)$ if $t=1$ and $n_1\geq 4$ \end{itemize} Quick manipulations lead us to the inequality $$2k\leq 2^{s_2h-s_2+k+1-l(s_2h-s_2+k+1)-a-s_2h+s_2l(h)}$$ Using (\ref{estimate.l.function}) we get $l(s_2h-s_2+k+1)\leq l(s_2)l(h)+l(k+1-s_2)$ so the above inequality is implied by $2k\leq 2^{B(s_2,h)-a+o(k+1-s_2)}$. If $s_2=1$, then $n_1t\geq 3$ (the cycles must have length at least three, otherwise $G$ would have double edges); moreover, the cycles are connected to the ``core'' component $H$ of $G'$ by edges starting from all of their vertices. Thus (dispensing with the above bound on $|\mrm{Aut}~G|$) we have in fact $|\mrm{Aut}~G|=|\mrm{Aut}~H|$. But $g=h+k=h+s_1n_1t\geq h+3$, so $o(g)\geq o(h)+o(3)=o(h)+2$; now either $h\geq 9$, in which case the induction hypothesis says that $|\mrm{Aut}~H|\leq 3\cdot 2^{o(h)}<4\cdot 2^{o(h)}\leq 2^{o(g)}$ (so $G$ could not have been optimal), or $h\leq 8$, in which case the table (\ref{low.genus.table}) and the estimates (\ref{growth.candidates}) show again that $G$ could not have been optimal. Thus this case cannot occur for an optimal graph $G$. Thus from now on $s_2\geq 2$, which implies $n_1\geq 2$. Suppose $n_2\geq 2$ (or, equivalently, $s_1\geq 2$). If $n_1\geq 4$ (and any $t$), then $s_2\geq 4$ (so by (\ref{main.inequality}) $B(s_2,h)\geq \lfloor \frac{s_2+1}{2}\rfloor$) and $s_2\leq \frac{k}{n_2t}\leq \frac{k}{2}$; also $a\leq \lfloor \frac{s_2-1}{2}\rfloor-1$; then $B(s_2,h)-a\geq 2$. The inequality is then implied by $2k\leq 2^{2+o(\lceil \frac{k}{2}\rceil +1)}$, or equivalently $\frac{k}{2}\leq 2^{o(\lceil \frac{k}{2}\rceil +1)}$. Now (\ref{daily.inequalities}) shows that this is strict, so $G$ could not have been optimal. If $n_1=3$ (and any $t$), then $s_2\geq 3$, $k=3s_1\geq 6$ and $a\leq \lfloor \frac{s_2-1}{2}\rfloor-1$; in the same time $s_2=\frac{k}{n_1t}\leq \frac{k}{3}$. (\ref{main.inequality}) (more precisely, (\ref{long.table})) shows that $B(s_2,h)-\lfloor \frac{s_2+1}{2}\rfloor\geq -1$ (with equality if and only if $s_2=3$ and $l(h)=1$) so $B(s_2,h)-a\geq 1$; the inequality is then implied by $2k\leq 2^{1+o(\lceil \frac{2k}{3}\rceil +1)}$ which is strict by (\ref{daily.inequalities}) so again $G$ is not optimal. Thus we must have both $s_1=n_2=1$ and $s_2=n_1\geq 2$ in an optimal $G$; the lemma is proved. Then $a=0$ and $k=s_2t$; we must have $k\geq 3$ since that is the length of the cycle. If $t\geq 2$, $2\leq s_2\leq \lfloor \frac{k}{2}\rfloor$ and $B(s_2,h)\geq l(h)\geq 1$ (using (\ref{long.table}) in (\ref{main.inequality})). The inequality is implied by $k\geq 2^{o(\lceil \frac{k}{2}\rceil+1)}$, which by (\ref{daily.inequalities}) is strict except for $k=2,4,8$. But $k=s_2t\geq 4$ so only $k=4,8$ need consideration. $k=4$ may occur only when $s_2=t=2$ (and $l(h)=1$) and then (\ref{enforcing.strictness}) shows that $G$ was not optimal. $k=8$ may occur for either $s_2=2,t=4$ or $s_2=4,t=2$; however the inequality $2k\leq 2^{B(s_2,h)+o(k+1-s_2)}$ is easily seen to be strict in these cases, so again $G$ was not optimal. Then we must have $t=1$ so $k=s_2\geq 3$; the inequality becomes $2k\leq 2^{B(k,h)}$. Then (\ref{main.inequality}) and (\ref{daily.inequalities}) show that the inequality is strict for all $k\geq 6$. If $k=5$, the inequality becomes $10\leq 2^{B(5,h)}$ which is strict for $l(h)\geq 2$ by (\ref{long.table}), and false for $l(h)=1$ If $k=4$, the inequality becomes $8\leq 2^{B(4,h)}$ which is again strict for $l(h)\geq 2$ by (\ref{long.table}), with equality for $l(h)=1$. If $k=3$, the inequality becomes $6\leq 2^{B(3,h)}$ which is strict for $l(h)\geq 3$ by (\ref{long.table}), and fails for $l(h)=1$ or when $l(h)=2$ and $l(3h)=4$. Even if $g$ would be $9$, $2^n$, or $3\cdot 2^n$, a small calculation shows that the graphs with a cycle do not give an optimal graph. The cases which remain are exactly the $g$ for which $C_g$ contains an isolated cycle, so Theorem B holds in these cases by induction. \end{proof} From now on, we may assume that $G'$ contains no cycles. We first address the case that the minimal orbit is a disjoint union of stars. \begin{lem}\label{stars.and.genus.more.than.3} Let $G$ be a simple cubic graph of genus $g\geq 9$, with a minimal orbit $O(e)$ a disjoint union of $k$ stars, with all the components of $G'$ of genus $h\geq 3$ (by induction on Theorem A, $\pi(G_i)\leq 2^{o(h)}$). Then $G$ is not strictly optimal as soon as $k\geq 2$; moreover, for $k=1$, $G$ is strictly optimal only if either $l(h)=1$ or $h=3\cdot (2^m+2^p)$ with $|m-p|\geq 5$. Therefore, by induction, Theorems A holds in these cases. \end{lem} \begin{proof} The reduction (\ref{reduction}) shows that in case $O(e)$ is a disjoint union of stars, the components of $G'$ must stabilize without problems; thus we may use the induction hypothesis in this case. If $G'$ is connected, then $|\mrm{Aut}~G|\leq |\mrm{Aut}~G^\mrm{'stab}|\leq |\mrm{Aut}~C_{g'}|$ where $g'=g-2k$. Using (\ref{growth.candidates}) we see that $G$ could not have been optimal for any $k\geq 1$. Then let $s\geq 3$ be the number of components of $G'$ (if $s\leq 2$ then $G'$ is connected, as a star cannot be incident to a given component twice without having actually all tails in that component; thus $G'$ disconnected implies that each star is incident to three distinct component of $G'$). Let $t$ be the number of edges in $O(e)$ incident to a given component. Then $3k=st$, and $g=sh-s+2k+1$. We also note that $t=1$ implies $k=1$ (the subgraph made up of a star and the three components to which it is incident would be a connected component of $G$, thus the whole $G$). Using the exhausting subgraphs argument we get $|\mrm{Aut}~G|\leq 6k\cdot 2^{s\cdot o(h)}\cdot 2^a$, where $a$ is the number of times we might have gained a twofold increase in the automorphism group of the subgraph by including a (new) star at a tail incident to it. Due to the way we construct these enhausting subgraphs, each inclusion of a star counted among the $a$ ones will make the new subgraph incident to two other components to which the previous subgraph was not incident. Thus we see that $s\geq 3+2a$ (since at the beginning we already had a star incident to three components). Thus we would like to show that $6k\cdot 2^{s\cdot o(h)+\integer{\frac{s-3}{2}}}\leq 2^{o(g)}$, or equivalently (using (\ref{estimate.l.function})) $$3k\leq 2^{o(2k-s+1)+sl(h)-l(sh)-\integer{\frac{s-1}{2}}}=2^{A(s,h)+o(2k-s+1)}$$ If $t\geq 3$ then $s\leq k$ so the inequality is implied by $3k\leq 2^{A(s,h)+o(k+1)}$; since $s\geq 3$, by (\ref{main.inequality}) $A(s,h)\geq 1$ and using (\ref{daily.inequalities}) this is easily seen to be strict for all $k\geq 1$, so such a $G$ cannot be optimal. If $t=2$, then setting $k=2u$ we get $s=3u$; we need to study when $6u\leq 2^{o(u+1)+A(3u,h)}$. For $u\geq 2$, $A(3u,h)\geq 2$ by (\ref{main.inequality}) and then again (\ref{daily.inequalities}) shows that the inequality is strict; such $G$ cannot be optimal. If $u=1$, there are three connected components in $G'$, isomorphic and linked by two stars; it is apparent that already $M(G)\geq 3$. The inequality becomes $6\leq 2^{o(2)+3l(h)-l(3h)-1}=2^{3l(h)-l(3h)}\geq 2^{l(h)}$. Then clearly for $l(h)\geq 3$ the inequality is strict, as it is for $l(h)=2$ but $l(3h)\leq 3$. Thus either $l(h)=1$ or $l(h)=2$ and $l(3h)=4$; however, even in these cases (\ref{enforcing.strictness}) shows that $G$ could not have been strictly optimal. We are left with considering the case $t=1$. Then $k=1$ as remarked before, so the inequality becomes $6\leq 2^{3l(h)-l(3h)}$. From (\ref{long.table}), we see that the inequality is strict as soon as $l(h)\geq 3$. Moreover, for $l(h)=2$ and $l(3h)\leq 3$ we get again a strict inequality. Thus only $l(h)=1$ or $l(h)=2,l(3h)=4$ are left. In all cases, $M(G)=3$, and overall $|\mrm{Aut}~G|\leq 3\cdot 2^{o(g)}$ as claimed; moreover, equality may occur only when $h=3\cdot 2^m$. We only need to show that any graph with more than $2^{o(g)}$ automorphisms is forced to have $M(G)\geq 3$, and it is clear that the above reductions prove just that, so we are done. \end{proof} Finally, the case of $O(e)$ a disjoint union of edges must be analyzed. \begin{lem}\label{isolated.edges.and.genus.more.than.3} Let $G$ be a simple cubic graph with a minimal orbit $O(e)$ a disjoint union of $k$ edges, with all components of $G'$ of genus at least three and stabilizing to simple cubic graphs. Then $G$ is not strictly optimal as soon as $k\geq 5$. Moreover, Theorems A and B hold in these cases. \end{lem} \begin{proof} {\bf Case 1: $G'$ is connected} and (according to the hypothesis) it stabilizes without double edges or loops. If $g=9$ then $g'=g-k\leq 8$; it is clear that the inequality $|\mrm{Aut}~G|\leq|\mrm{Aut}~G'|$ and the table (\ref{low.genus.table}) show that $G$ could not be optimal for any $k$. If $g=8$ then $g'\leq 7$; $2^{o(g)}=128$ and it is clear that $|\mrm{Aut}~G|\leq|\mrm{Aut}~G'|$ and the table (\ref{low.genus.table}) show that $G$ could not be optimal for any $k$. If $g=2^m\geq 16$, then for $k\geq 2$ we get $|\mrm{Aut}~G|\leq|\mrm{Aut}~G'|\leq |\mrm{Aut}~C_{g'}|$ and (\ref{growth.candidates})) shows that $G$ could not be optimal for such $k$; and if $k=1$, then $l(g')\geq 2$ so by induction the Main Theorem shows that $|\mrm{Aut}~C_{g'}|\leq 3\cdot2^{g'-2}=3\cdot 2^{g-3}<2^{o(g)}$; thus again $G$ could not have been optimal. Otherwise, $|\mrm{Aut}~G|\leq|\mrm{Aut}~G'|\leq |\mrm{Aut}(G^{'\mrm{stab}})|\leq |\mrm{Aut}(C_{g'})|$. Now $g'=g-k$ so (\ref{growth.candidates}) shows that $G$ is not optimal as soon as $k\geq 2$. If $k=1$, (\ref{growth.candidates}) gives $|\mrm{Aut}~G|\leq |\mrm{Aut}~G^{'\mrm{stab}}|\leq |\mrm{Aut}~C_{g'}|$; we would like to compare the last term with $2^{o(g)}\geq 2^{o(g')}$. If $|\mrm{Aut}~G'|\leq 2^{o(g')}$, we get $|\mrm{Aut}~G|\leq 2^{o(g)}$ which would prove Theorem A in this case. If however $|\mrm{Aut}~G^{'\mrm{stab}}|>2^{o(g')}$, then $M(G^{'\mrm{stab}})\geq 3$ by induction. Now $k=1$ shows that the edges of $G^{'\mrm{stab}}$ which are pinched by $e$ must be in an orbit by themselves under the action of $\mrm{Aut}~G'$. Thus $\mrm{Aut}~G'$ is the stabilizer of the two unstable paths (edges pinched by $e$). If $M(G')=4$ we then get at least a twofold reduction in $|\mrm{Aut}~G^{'\mrm{stab}}|$ vs. $|\mrm{Aut}~G'|$, while if $4\neq M(G') \geq 3$ we get at least a threefold reduction in the same comparison. The induction shows that $M(G')=4$ happens only for $g'=g-1=5\cdot 2^m+1$ (and then $|\mrm{Aut}~G|<2^{o(g)}$) and otherwise $|\mrm{Aut}~G'|\leq 2^{o(g')}$ as desired, with equality if and only if $g'=g-1=9\cdot 2^m+u$ ($u=0,2$); if $u=2$, $o(g)>o(g')$ so again $G$ could not have been optimal, while if $u=0$ the induction shows that the star disconnecting the $G^{'\mrm{stab}}$ is the minimal orbit; inserting $e$ produces actually a sixfold loss of symmetry, so again $G$ could not have been optimal since in any case we get $|\mrm{Aut}~G|< 2^{o(g)}$. Note that the reasoning applies when $g=9$ or $g=2^m$ with $m\geq 4$ to yield that such a $G$ could not have been optimal (strict or not). {\bf Case 2: $G'$ is disconnected and its components are split in two isomorphism classes}, one of $s_1$ subgraphs isomorphic to $H_1$ and one with $s_2$ subgraphs isomorphic to $H_2$. Say each $H_1$ is at distance one from $n_1$ components isomorphic to $H_2$; define $n_2$ similarly. Define also $t$ to be the number of edges linking two neighboring components $H_1$ and $H_2$. Note that $g=s_1(h_1-1)+s_2(h_2-1)+k+1$, $k=s_1n_1t=s_2n_2t$; note also that $s_1=1$ if and only if $n_2=1$, and similarly $s_2=1$ if and only if $n_1=1$. We may assume, without loss of generality, that $s_1\leq s_2$. Start again with an exhausting subgraphs argument; the initial step is choosing an edge and its orientation (if all components of $G'$ are isomorphic). We get $|\mrm{Aut}~G|\leq k\cdot 2^{s_1o(h_1)+s_2o(h_2)}$ and we would like to study the inequality $k\cdot 2^{s_1o(h_1)+s_2o(h_2)}\leq 2^{o(g)}$. This reduces to $$k\leq 2^{s_1l(h_1)+s_2l(h_2)+k+1-s_1-s_2-l(s_1h_1+s_2h_2+k+1-s_1-s_2)}.$$ This is implied, via (\ref{estimate.l.function}), by the inequality \begin{equation*} \tag{*} k\leq 2^{B(s_1,h_1)+B(s_2,h_2)+o(k+1-s_1-s_2)}. \end{equation*} Assume first $t\geq 2$ and any $g\geq 9$. If $n_1,n_2\geq 2$, then $s_i\geq 4$ so by (\ref{main.inequality}, (\ref{long.table})) we see that $B(s_i,h_i)\geq 2$; moreover, $s_i= \frac{k}{n_it}\leq \frac{k}{4}$ so then the inequality is implied by $\frac{k}{16}\leq 2^{o(\lceil \frac{k}{2}\rceil+1)}$ which is strict for all $k\geq 1$ in light of (\ref{daily.inequalities}). If $n_1\geq 2$ and $n_2=1$, then actually $s_1=1$, $s_2=n_1\geq 2$ and $k=s_2t\geq 4$. Then $B(s_2,h_2)\geq 1$ and $s_2=\frac{k}{t}\leq \frac{k}{2}$ so the inequality is implied by $\frac{k}{2}\leq 2^{o(\lceil \frac{k}{2}\rceil+1)}$ which is strict for all $k\geq 1$ by (\ref{daily.inequalities}). If finally $n_1=n_2=1$ then $s_1=s_2=1$, $k=t$ and the inequality becomes $k\leq 2^{o(k-1)}$. This is strict for $k\geq 5$ by (\ref{daily.inequalities}), so we only need to consider the cases when $G$ is made up of two (non-isomorphic) components linked by two, three or four edges in the same orbit of $\mrm{Aut}~G$. If $g=9$, then if $k=4$ the two components must have genus three each, so overall $|\mrm{Aut}~G|\leq 4\cdot 2^4$ (since $\mu_1(3)=1$); if $k=3$ then one component has genus three and the other genus four, so overall $|\mrm{Aut}~G|\leq 3\cdot 2^5$ (since $\mu_1(3)=\mu_1(4)=1$); and if $k=2$ we may have either two components of genus four each, or a component of genus three and one of genus five, in either case getting $|\mrm{Aut}~G|\leq 2\cdot 2^5$; then (\ref{low.genus.table}) shows that $G$ could not have been optimal. If $g=8$, then only $k\leq 3$ is possible, otherwise one of the components will have genus less than two. If $k=3$, the components are tetrahedra, so $|\mrm{Aut}~G|\leq 3\cdot 2^4$; if $k=2$ then one component is a tetrahedron and the other one has genus four, so $|\mrm{Aut}~G|\leq 2\cdot 2^5$; in both cases we get less than $2^7=2^{o(g)}$ so such a $G$ cannot be optimal. If $g=2^m\geq 16$, then $|\mrm{Aut}~G|\leq k\cdot 2^{o(h_1)+o(h_2)}\leq k\cdot 2^{h_1+h_2-2}=k\cdot 2^{g-k-1}<2^{g-1}=2^{o(g)}$, so $G$ cannot be optimal. Similar estimates show that such a $G$ is not optimal if $g=3\cdot 2^m$ or $g=3(2^m+2^p)$ (with the usual restriction on $m$ and $p$). Otherwise, for $g>9$ which is not a power of two, (\ref{enforcing.strictness}) reduces the search for strictly optimal $G$'s to the case $|O(e)|=3$. We note also that, since $g=h_1+h_2+2$ and $l(g)\leq l(h_1)+l(h_2)+1$, $|\mrm{Aut}~G|\leq 3\cdot 2^{o(h_1)+o(h_2)}=3\cdot 2^{o(g)-(l(h_1)+l(h_2)+2-l(g))}\leq \frac{3}{2}2^{o(g)}$; we also have $M(G)=3$. Fixing an edge in $O(e)$ must have as a result the fixing of all the edges in $O(e)$; regarding the components, fixing one of the three incidence points must fix all three (growing the subgraphs by choosing an edge, then including a component will automatically fix all the edges, therefore the other component's incidence points). Then, on one side, this implies $M(H_i^\mrm{stab})\geq 3$; on the other it says $\pi(H_i)\leq \frac{1}{3}|\mrm{Aut}~H_i|$. By induction we know that only for $h_i=9\cdot 2^{n_i}+u_i$ ($u=0,1,2$) may we get the coefficient $\mu(H_i)=3$; otherwise $\mu(H_i)\leq \frac{3}{2}$, so $\pi(H_i)\leq \frac{1}{2}2^{o(h_i)}$ (for at least one $i$) so it is easy to see then that $|\mrm{Aut}~G|\leq \frac{3}{4}2^{o(g)}$ so $G$ would not be optimal. However, a quick computation shows that even if $h_i=9\cdot 2^{n_i}+u_i$ one still gets $l(h_1)+l(h_2)\geq l(g)$ so $|\mrm{Aut}~G|<2^{o(g)}$ so $G$ could not be optimal in this case. Thus we must have $t=1$ for a strictly optimal $G$. If $n_1,n_2\geq 3$, then $s_i\geq 3$ so again $B(s_i,h_i)\geq 2$, $s_i\leq \frac{k}{3}$ and the inequality is implied by $\frac{k}{16}\leq 2^{o(\lceil \frac{k}{3}\rceil+1)}$, which is strict for all $k\geq 1$ by (\ref{daily.inequalities}). If $n_1\geq 3$ and $n_2=2$ (similarly for $2=n_1<n_2$), then $s_2\geq 3$, $s_1\geq 2$ so $B(s_1,h_1)\geq 1$, $B(s_2,h_2)\geq 2$, while $s_2\leq \frac{k}{2}$ and $s_1\leq \frac{k}{3}$. Then the inequality is implied by $\frac{k}{8}\leq 2^{o(\lceil \frac{k}{6}\rceil)+1}$, again strict for all $k\geq 1$ by (\ref{daily.inequalities}). If $n_1=n_2=2$, then $G$ is a pseudocycle formed by components linked to each of their two neighbours by an edge, with the alternate components isomorphic; then $s_1=s_2$ (note that $g$ is odd so $g=2^m$, $3\cdot 2^m$, and $3(2^m+2^p)$ with $m, p>0$ are impossible, and also that $g=9$ would force all components to have genus two, which is again ruled out). If $s_1\geq 3$ the cycle has length $2s_1\geq 6$ and cannot be strictly optimal: grouping adjacent components two by two (linked by the common edge), pinching this edge and finally linking the new $s_1$ components to the vertices of a cycle of length $s_1$ produces a graph with the same number of automorphisms, but with fewer edges in the minimal orbit. If $s_1=2$ the inequality becomes $4\leq 2^{l(h_1)+l(h_2)}$, so as soon as $\max(l(h_1),l(h_2))\geq 2$ we get a strict inequality. Thus for an optimal $G$ one must have $l(h_i)=1$. Then $g=2h_1+2h_2+1$ and $|\mrm{Aut}~G|\leq 4\cdot 2^{2o(h_1)+2o(h_2)}=2^{o(g)-(2l(h_1)+2l(h_2)+1-l(2h_1+2h_2+1))}$; this is strictly less than $2^{o(g)}$ by (\ref{estimate.l.function}) so $G$ could not have been optimal. If $n_1=1$ and $n_2\geq 3$, then $s_2=1$ and $s_1=n_2=k\geq 3$; the inequality (*) becomes $k\leq 2^{B(k,h_1)}$. \begin{itemize} \item If $l(h_1)\geq 2$, (\ref{long.table}) in the proof of (\ref{main.inequality}) and (\ref{daily.inequalities}) show that $2^{B(k,h_1)}\geq 2^{\lfloor \frac{k+1}{2}\rfloor}\geq k$, with at least one of these inequalities strict (since $k\geq 3$); thus $G$ could not have been optimal. \item If $l(h_1)=1$ a similar argument shows that only $k=3$ should be considered in all cases. If $g=9$ then $k=3$ is not possible (a component would have genus less than two). If $g=2^m$, $3\cdot 2^m$, or $3(2^m+2^p)$ and $k=3$, $3h_1+h_2=g$ so $|\mrm{Aut}~G|\leq 3\cdot 2^{3o(h_1)+o(h_2)}= 3\cdot 2^{3h_1+h_2-(3l(h_1)+l(h_2))} \leq 3\cdot 2^{o(g)-(3l(h_1)+l(h_2)-l(g))} < 2^{o(g)}$ ($l(g)\leq 2$) so $G$ cannot be optimal. Otherwise, one first remarks that $M(H_i)\geq 3$, so $\mu_1(H_i)\leq \frac{1}{2}$ unless $h_i=9\cdot 2^{n_i}+u_i$ ($u_i=0,1,2$); but that is impossible for $h_1$ since $l(h_1)=1$. Then one has $|\mrm{Aut}~G|\leq 3\cdot \frac{1}{2}\cdot 2^{3o(h_1)+o(h_2)}\leq \frac{3}{2}\cdot 2^{o(g)-(3l(h_1)+l(h_2)+1-l(3h_1+h_2+1))}\leq \frac{3}{4}\cdot2^{o(g)}$ (due to (\ref{estimate.l.function})) so $G$ could not have been optimal. \end{itemize} If $n_1=1$ and $n_2=2$, then $s_1=2$, $s_2=1$ and $k=2$, then: \begin{itemize} \item If $g=9$ then $2h_1+h_2-3+1+2=9$ so only $h_1=h_2=3$ is possible, i.e. all components are tetrahedra (after stabilization). Then $|\mrm{Aut}~G|\leq 2\cdot 2^6<|\mrm{Aut}~C_9|$ so $G$ is not optimal. \item If $g=2^m$ or $3\cdot 2^m$ then $2h_1+h_2=g$ and $|\mrm{Aut}~G|\leq 2\cdot 2^{2o(h_1)+o(h_2)}\leq 2^{o(g)-(2l(h_1)+l(h_2)-l(g))}< 2^{o(g)}$ so again $G$ is not optimal. \item If $g=3(2^m+2^p)$ with $m\geq p+5$, then the previous estimate shows that $G$ could only be optimal if $l(h_1)=l(h_2)=1$. Inspecting the possibilities, one shows that $h_1=3\cdot 2^{m-1}$ and $h_2=3\cdot 2^p$. Now the induction shows that $H_2$, of genus $3\cdot 2^p$, must be a A$_p$ in order to reach equality above (otherwise $\pi(H_2)<1$). But then one may compute $|\mrm{Aut}~G|<2^{o(g)}$ so $G$ is not optimal. \item In all other cases, $G$ is made up of a core linked to each of two isomorphic components by an edge. Again pairing the isomorphic components, linking the ends of the edges incident to the core, and reattaching this to one of the original pinching points preserves (or increases) the number of automorphisms, but yields a smaller minimal orbit. \end{itemize} We are left then with $n_1=n_2=1$, so $s_1=s_2=k=1$, i.e. two (non-isomorphic) components linked by an edge, then $|\mrm{Aut}~G|\leq 2^{o(h_1)+o(h_2)}= 2^{o(g)-(l(h_1)+l(h_2)-l(g))}$. \begin{itemize} \item If $g=9$, one can check this configuration cannot be optimal. \item If $g=2^m$ or $g=3\cdot 2^m$, then $l(h_1)+l(h_2)-l(g))\geq 1$, so $G$ cannot be optimal. \item If $g=3(2^m+2^p)$ with $m\geq p+5$, then $2^o(g)$ automorphisms are obtained only if $l(h_1)=l(h_2)=1$. This forces $h_1=3\cdot 2^m$ and $h_2=3\cdot 2^p$ (up to swapping $h_1$ and $h_2$). By induction, $G$ may only be $A_m$ and $A_p$ linked by an edge (some checking rules out the case that $p\leq 1$). \item In all other cases, $l(h_1+h_2)=l(h_1)+l(h_2)$ is forced by the optimality of $G$. We note that this prohibits $h_1=h_2$; in particular, an optimal graph $G$ cannot be built out of two non-isomorphic components of the same genus, linked by an edge. Thus, if $g=3(2^m+2^p)$, the bound $2^{o(g)}$ may only be obtained by linking an $A_m$ and an $A_p$ at their roots. \end{itemize} {\bf Case 3: $G'$ is disconnected and all its components are isomorphic}. Let $s$ be the number of connected components of $G'$, all of genus $h$, $t$ the incidence degree between two neighbors, and $m$ the number of neighbors of a given component; then $g=sh-s+k+1$ and $2k=smt$; moreover, $s=2$ if and only if $m=1$. We bound, as before (but with the extra possibility of flipping the edges in $O(e)$) $|\mrm{Aut}~G|\leq 2k\cdot 2^{so(h)}$. The inequality $2k\cdot 2^{so(h)}\leq 2^{o(sh-s+k+1)}$ is equivalent to $2k\leq 2^{o(k+1-s)+sl(h)-l(sh)}=2^{o(k+1-s)+B(s,h)}$. If $m\geq 3$ (so $s\geq 3$) and $t\geq 2$, then $B(s,h)\geq 1$ by (\ref{long.table}), and $s=\frac{2k}{mt}\leq \frac{k}{3}$; then the inequality is implied by $k\leq 2^{o(\lceil \frac{2k}{3}\rceil+1)}$ which is strict for all $k$ by (\ref{daily.inequalities}). If $m=2$ and $t\geq 2$, then $k=st\geq 6$, $B(s,h)\geq 1$, $s\leq \frac{k}{2}$ and the inequality is implied by $k\leq 2^{o(\lceil \frac{k}{2}\rceil +1)}$ which is strict for all $k$ except $k=2,4,8$ by (\ref{daily.inequalities}). $k=2,4$ are not possible here, while $k=8$ forces $s=4,t=2$. Then $g=4h+5$ impossible for $g=9$ or $g=2^n$; in other genera, $B(4,h)\geq 3$ so the inequality is easily seen to be strict anyway; no such $G$ may be optimal. If $t=1$ and $m\geq 4$, then $s\geq 4$ so $B(s,h)\geq 3$ and $s\leq \frac{2k}{m}\leq \frac{k}{2}$ so the inequality is implied by $\frac{k}{4}\leq 2^{o(\lceil \frac{k}{2}\rceil+1)}$ which is strict for all $k$ by (\ref{daily.inequalities}). If $t=1$ and $m\geq 3$ then $s\geq 4$ so $B(s,h)\geq 3$ and $s\leq \frac{2k}{3}$; we readily get a strict inequality by (\ref{daily.inequalities}). If $t=1$ and $m=2$ then $G$ is a cycle of components of genus $h$ (each component is incident to precisely other two by an edge); $s=k\geq 3$ (otherwise $t=2$, in fact), $g=sh+1$. \begin{itemize} \item This is easily seen to be impossible for $g\leq 9$ ($h\leq 2$ is forced). \item If $g=2^m$, $3\cdot 2^m$, or $g=3(2^m+2^p)$, $|\mrm{Aut}~G|\leq 2k\cdot 2^{ko(h)}\leq 2k\cdot 2^{o(g)-(kl(h)+1-l(kh+1))} \leq 2^{o(g)}\cdot\frac{2k}{2^{B(k,h)}}$. If $k\geq 8$ or $k=6$, the last fraction is strictly less than one (because at least one of the inequalities $2^{B(k,h)}\geq 2^{\lceil\frac{k+1}{2}\rceil+1}\geq 2k$ is strict), so $G$ is not optimal. For $k=7$, one sees directly that $2^{B(k,h)}>14$ by (\ref{long.table}). For $k=4,5$, the inequality $2^{B(k,h)}>2k$ is easily seen to be true except for $l(h)=1$. However, in that case, the induction shows that the components have to be graphs of type $A_m$ or $B_p$. In this case, $|\mrm{Aut}~G|\leq k\cdot 2^{ko(h)}$, and this is easily seen to be less than $2^{o(g)}$. For $k=3$ and $l(h)\geq 3$, we have $2^{B(k,h)}>6$. Moreover, $g=3\cdot 2^m$ and $g=3(2^m+2^p)$ cannot be written in the form $3h+1$, so only the case $g=2^m=3h+1$, $k=3$, $l(h)=1$ is left. This is easily seen to be impossible (numerically). \item In all other genera, we study $2k\leq 2^{B(k,h)}$. $l(h)\geq 3$ would imply strict inequality, so only $l(h)\leq 2$ is possible. $l(h)\geq 2$ and $s\geq 3$ imply by (\ref{main.inequality}) and (\ref{daily.inequalities}) that $G$ is not optimal (split in $s\geq 4$ and $s=3$). So only $l(h)=1$ is left, for which again (\ref{main.inequality}) and (\ref{daily.inequalities}) show that the only hope for optimality is $k=3$. This is only possible for $h=3, g=10$ - a pseudocycle of $K_{3,3}$s with two edges pinched of length three. given below. If the two ends of edges in $O(e)$ incident to a given component are in the same orbit, then all ends of edges in $O(e)$ are in the same orbit. These ends cannot pinch the same edge of $H$ by (\ref{problems.stabilization}); also, (\ref{reduction}) says that we may have at most one ``problem'' when trying to stabilize a strictly optimal graph $G$; since we have six incidence loci, $H$ is a simple cubic graph. Then we must have $M(H)\geq 2$. Then $h\geq 9$ must be one of the special genera of the Main Theorem, and for $l(h)=1$ (and $M(H)\geq 2$) we must have $\pi(H)\leq \frac{1}{2}2^{o(h)}$. Then $|\mrm{Aut} G|\leq 6\cdot \frac{1}{8}2^{3o(h)}=\frac{3}{4}\cdot 2^{o(g)-(3l(h)+1-l(3h+1))}<2^{o(g)}$ so $G$ is not optimal. If $l(h)=1$ and $h\leq 8$, then $h=3,4,6,8$; we note that for $H$ it must be true that fixing one of the two pinched edges is the same as fixing both pinched edges; it is easy now to determine that the only genus for which there exists a graph $H$ with the maximum number of automorphisms preserving two edges equal to $\pi(H)=2^{o(h)}$ and $M(H)\geq 2$ is $g=3$, and $H$ must be a tetrahedron (pinching this in two opposite edges gives the $K_{3,3}$ with an edge removed). Thus this last situation occurs indeed only for $g=10,h=3$. \end{itemize} If $t=1$ and $m=1$ then $G$ is made up of two isomorphic components linked by an edge. If $g=9$, this is not possible since nine is odd. If $g=8$, $2^{o(g)}$ automorphisms may only be obtained when the two components of $G'$ are tetrahedra each with a pinched edge, i.e. a graph $B_3$ (note that this graph is not optimal). In the other cases, we have $|\mrm{Aut}~G|\leq 2\cdot 2^{2o(h)}=2\cdot 2^{o(g)-(2l(h)-l(g))}\leq 2^{o(g)}$ with equality only if $l(h)=1$. If $g=2^m$ or $3\cdot 2^m$, then the inductive argument yields the unique shape of $G$ as $B_m$, respectively $A_m$, proving Theorem B in this case. If $g=3(2^m+2^p)$, such a graph is not optimal since $l(h)=2$. \end{proof} This completes the proof of Theorems A and B.
1,116,691,501,384
arxiv
\section{Introduction} The motivation for this work was to find a way of transforming a generative model that had been trained on one distribution, to output a completely new distribution of images that did not model an existing dataset. We approached this by taking the generator from a pre-trained generative adversarial network (GAN) \cite{goodfellow2014generative} trained on one dataset (in this case ImageNet \cite{deng2009imagenet}) and then fine-tuned it with features from another dataset using a classifier trained on data from both datasets. With this approach we were hoping not to simply model the distribution of images in the new dataset, but transform the generator so it outputs a new distribution of images that fuses visual features from both datasets, resulting in a distribution with novel characteristics. By starting from a pre-trained model with good initial weights, we hoped that this would preserve some aspects of the original distribution, such as the spatial structure of the images, but instilling it with some new characteristics from the other dataset. \section{Method} We created a dataset of approximately 14k images from Pinterest boards with the title \mbox{\textit{a e s t h e t i c}}.\footnote{See Figure \ref{fig2} in the Appendix for samples.} Images from these boards can usually be characterised by having distinct, washed-our colour palettes (often with only one dominant colour in the image) and often the photographs are framed with no particular subject in focus. We trained a binary classifier to classify between the \mbox{\textit{a e s t h e t i c}} images\footnote{We also trained classifiers for other datasets with prominent aesthetic characteristics, but for posterity, we will only be discussing results from fine-tuning with the classifiers trained on the \mbox{\textit{a e s t h e t i c}} dataset.} and images from the ImageNet dataset \cite{deng2009imagenet}. To train the classifier we fine-tuned a pre-trained ResNet \cite{he2016deep} model that had been trained to weakly classify Instagram hastags and then ImageNet \cite{mahajan2018exploring}. In addition to training the classifier to classify \mbox{\textit{a e s t h e t i c}} images and ImageNet images as separate classes (contrastive features), we also---initially by accident---trained a classifier that classifies them as being in the same class (joint features), which led to significantly better results when used for fine-tuning the generator (see Section \ref{discussion} for further discussion). After training the cross-dataset classifier, we used this model to fine-tune the weights of a pre-trained BigGAN \cite{brock2018large} generator trained on the ImageNet dataset at a resolution of 128x128 pixels.\footnote{For this we used `The author's officially unofficial PyTorch BigGAN implementation' \url{https://github.com/ajbrock/BigGAN-PyTorch} and would like to thank the authors of the repository, Andrew Brock and Alex Andonian, for releasing the model weights for the discriminator as well as the generator, without which this work would not have been possible.} We also used the frozen weights of the discriminator in the fine-tuning training procedure, updating the weights of the generator based on a weighted sum of the loss from the discriminator and the cross-dataset classifier (see Figure \ref{fig1} for details). During this fine-tuning process, the networks are not exposed to any new training data, all the samples and losses are produced only using the pre-trained networks. The process of training and convergence is very rapid. Usually within 1000 iterations (using a batch size of 9) the generator has converged onto a configuration of the weights that satisfies both the cross-dataset classifier and the discriminator. However we find that the best results were achieved using early stopping, often the most interesting visual results occurred when training was stopped after 300-600 iterations. Because training time is so quick, it is trivial to try multiple configurations of the parameter weighting and manually compare the visual results. \begin{figure} \centering \begin{tikzpicture}[auto, node distance=2cm,>=latex'] \tikzstyle{model} = [rectangle, minimum width=3cm, minimum height=0.6cm,text centered, draw=black, fill=red!30] \tikzstyle{sample} = [rectangle, rounded corners, minimum width=3cm, minimum height=0.6cm,text centered, draw=black, fill=blue!30] \tikzstyle{arrow} = [thick,->,>=stealth] \node (generator) [model] {Generator}; \node (image) [sample] [right of=generator, xshift= 1.75cm] {Sample Batch}; \node (classifier) [model] [right of=generator, yshift= 0.75cm, xshift= 4cm] {Frozen Classifier}; \node (discriminator) [model] [right of=generator, yshift=-0.75cm, xshift= 4cm] {Frozen Discriminator}; \node (sum) [sample] [right of=generator, yshift=-1.5cm, xshift= 6.5cm] {Weighted Sum of Losses}; \draw [arrow] (generator) --(image); \draw [arrow] (image) -| (classifier); \draw [arrow] (image) -| (discriminator); \draw [arrow] (classifier) -| (sum); \draw [arrow] (discriminator) -| (sum); \draw [arrow] (sum) -| node[anchor=south, yshift=-0.6cm, xshift=3cm] {\textit{update generator weights}}(generator); \end{tikzpicture} \caption{Diagram of training process: Batches of images are sampled from the pre-trained generator, which are fed to the cross-dataset classifier and the pre-trained discriminator (both of which have their weights frozen). The weights of the generator are updated based on a weighted sum of the losses from the classifier and discriminator.} \label{fig1} \end{figure} \section{Discussion and Conclusion} \label{discussion} In the process of this work we have happened upon a number of surprising results. The manner in which features get combined from the different datasets was highly unexpected. Neither the results of fine-tuning using the contrastive features or the joint features classifier have resulted in producing images that resemble the images in either the ImageNet or \mbox{\textit{a e s t h e t i c}} datasets. The second surprising result is that when fine-tuning with the joint features classifier the visual results were much richer and varied (almost dreamlike in nature) than the results from fine-tuning with the contrastive features classifier (see Figures \ref{fig4} and \ref{fig5} in the Appendix for a detailed comparison). We speculate that the contrastive features classifier discards a lot of important features from the ImageNet distribution, so when the generator is fine-tuned, there are less combinations of features that can be used and the resulting distribution has a lot less variety. In future research, we hope to find ways of having more control over what kind of characteristics from the different datasets get combined in the fine-tuning process, be that characteristics relating to aesthetic qualities, the structure and form in the images, or the stylistic qualities of a given dataset. We also hope to apply these techniques to higher resolution GAN models, but without having access to pre-trained discriminators, it is currently not possible to apply these techniques to the higher resolution generative models that have been made publicly available without retraining the models from scratch. \subsubsection*{Acknowledgments} This work has been supported by UK’s EPSRC Centre for Doctoral Training in Intelligent Games and Game Intelligence (IGGI; grant EP/L015846/1). \bibliographystyle{unsrt}
1,116,691,501,385
arxiv
\section{Introduction} The identification and classification of objects \ct{from a set of independent catalogs is a key task for making astronomical data usable for scientific analysis. The standard approach here is to solve this problem step by step using to use hierarchical ``best-match'' algorithms, as exemplified in the cross-identification of radio sources \cite{Vollmer} from the VizieR database of astronomical catalogs \cite{Ochsenbein}. Although such algorithms are fast and efficient in low-level applications, they have limitations in dealing with ambiguities and considering object classes with different levels of complexity. This is illustrated} in the recent production of the band-merged version \cite{Chen1} of the \textit{Planck} Early Release Compact Source Catalog (ERCSC) \cite{PlanckERCSC}, and the variability classification of ERCSC objects using WMAP data \cite{Chen2}. \ct{Motivated by this, we present here a Bayesian approach to object identification and classification, based on data from a set of astronomical catalogs taken, \textit{e.g.}, at different frequencies or by different observatories. In this method, we consider not only positional coincidence between catalog entries, but also the properties of known object classes, and use \textit{both} as criteria for the identification and simultaneous classification of objects. The current paper focuses on the mathematical basics of this method, with some typical choices for priors and likelihoods needed for catalog generation. Applications to data, and a more detailed comparison with standard approaches will follow in future work.} \section{Bayesian Association and Classification} \subsection{Terms and Definitions} \paragraph{Notational Conventions} We understand \textit{probability} {\small $\boldsymbol{\cal P}$} in the Bayesian sense as an operator which assigns a value of plausibility, {\small $0\le \boldsymbol{\cal P}(\mathfrak{S}) \le 1$}, to a statement {\small $\mathfrak{S}$}, and we \ct{introduce the \textit{information Hamiltonian} \cite{Ensslin} for a condition {\small $\mathfrak{X}$} on {\small $\mathfrak{S}$} as {\small $\boldsymbol{\cal H}(\mathfrak{X}|\mathfrak{S}) \equiv -\log \boldsymbol{\cal P}(\mathfrak{S}|\mathfrak{X})$}. \textit{Data} (factual information) shall be denoted by blackboard-bold symbols (\textit{e.g.}, {\small $\mathbb{D}$}), \textit{models} (abstract beliefs) by calligraphic symbols (\textit{e.g.}, {\small ${\cal M}$}).} We denote a set of logically independent statements as {\small $\set{\mathfrak{S}_j}_\perp$} and define for any condition {\small $\mathfrak{X}$} \begin{small}\begin{equation} \textstyle \boldsymbol{\cal P}(\set{\mathfrak{S}_j}_\perp|\mathfrak{X}) \equiv \prod_j \boldsymbol{\cal P}(\mathfrak{S}_j|\mathfrak{X})\quad. \end{equation}\end{small}% A set of mutually exclusive statements shall be denoted with {\small $\set{\mathfrak{S}_j}_\wr$} with the definition \begin{small}\begin{equation} \textstyle \boldsymbol{\cal P}(\set{\mathfrak{S}_j}_\wr|\mathfrak{X}) \equiv \sum_j \boldsymbol{\cal P}(\mathfrak{S}_j|\mathfrak{X})\quad. \end{equation}\end{small}% If a set on mutually exclusive statements \ct{is exhaustive}, we call it a \textit{complete set of alternatives} {\small $\cset{\mathfrak{S}_j}_\wr$}, with {\small $\boldsymbol{\cal P}(\cset{\mathfrak{S}_j}_\wr|\mathfrak{X})=1$}. \ct{The operator {\small $\boldsymbol{\cal N}(\set{\mathfrak{S}_j})$} gives the number of elements of a set with {\small $j>0$}, a set containing a zero-indexed element is denoted {\small $\set{\mathfrak{S}_0,\mathfrak{S}_j}$}.} \paragraph{Structure of Data and Associations} From a set of positions taken from a highly reliable seed catalog we select within a radius {\small $\Delta_j$} potentially associated data {\small $\mathbb{D} = \set{\mathbb{D}_j}_\perp$} from {\small $\boldsymbol{\cal N}(\mathbb{D})$} independent target catalogs, where the seed catalog may be included as the zero-indexed element, {\small $\mathbb{D}_0$}. The entries of each target catalog {\small $j$} form a complete set of alternatives, {\small $\mathbb{D}_j =\cset{ \mathbb{D}_{j0}, \mathbb{D}_{jk} }_\wr$}, where {\small $\mathbb{D}_{j0} = \set{\sigma_{j0},\nu_j}$} stands for the \ct{\textit{non-observation}} in the catalog {\small $j$} with noise level {\small $\sigma_{j0}$} and signal-to-noise limit {\small $\nu_j$}, together with {\small $\boldsymbol{\cal N}(\mathbb{D}_j)$} data entries {\small $\mathbb{D}_{jk} = \set{\mathbb{D}_{jk0},\mathbb{D}_{jki}}_\perp$}. {\small $\mathbb{D}_{jk0} = \set{\theta_{jk},\delta_{jk}}$} denote the positional distance of a data entry to the nominal seed coordinates and its error, while {\small $\mathbb{D}_{jki} = \set{f_{jki},\sigma_{jki}}$} contain {\small $\boldsymbol{\cal N}(\mathbb{D}_{jk})$} physical parameters and their errors. \ct{Finally, we define an \textit{association} {\small $\alpha_\ell$} as a mapping determining one entry {\small $\mathbb{D}_{j\alpha_{{\ell}j}}$} of each catalog {\small $j$}, and denote {\small $\alpha_\ell\mathbb{D} \equiv \set{\mathbb{D}_{j\alpha_{{\ell}j}}}_\perp$}. Obviously, associations form a complete set of alternatives, {\small $\alpha = \cset{\alpha_\ell}_\wr$}.} \paragraph{Models, Parameters and the Classification Scheme} Classification is based on a set of mutually exclusive models {\small ${\cal M}=\set{{\cal M}_n}_\wr$}, each providing a physical description of a known object class as a set of functions {\small $\mu_{ni}(x_j;\boldsymbol{\omega})$} that can be compared to the data values {\small $f_{jki}$}. {\small $x_j$} is a physical quantity mapping a model prediction on a particular catalog (\textit{e.g.}, nominal frequency), and {\small $\boldsymbol{\omega}$} is a vector in the model parameter space {\small $\Omega_n$} of dimension {\small $\dim\Omega_n$}. The (prior) probability assigned to a model is understood as a marginalization over the model parameter space, i.e., {\small $ \boldsymbol{\cal P}({\cal M}_n) = \int_{\Omega_n}\!\mathbf{d}\boldsymbol{\omega}\;p_n(\boldsymbol{\omega})$}, where {\small $p_n(\boldsymbol{\omega})$} is called the \textit{parameter p.d.f.} of {\small ${\cal M}_n$}. A priori, we cannot assume that {\small $\set{{\cal M}_n}_\wr$} is exhaustive. \ct{This would mean {\small $\boldsymbol{\cal P}({\cal M}) < 1$}, which poses} a problem for the proper normalization of Bayesian posterior probabilities. We therefore introduce the \textit{classification scheme} {\small $\boldsymbol{\mathfrak{C}}$} as a set of conditions that allows us to treat {\small ${\cal M}$} as an exhaustive set, and write {\small $\boldsymbol{\cal P}({\cal M}|\boldsymbol{\mathfrak{C}}) = 1$} and {\small ${\cal M}|_{\boldsymbol{\mathfrak{C}}} \equiv \cset{{\cal M}_n}_\wr$}. In a more general sense, {\small $\boldsymbol{\mathfrak{C}}$} can be understood as the framework of \ct{factual} information (data), beliefs (theories and ancillary hypothesis) and decisions (\textit{e.g.}, how to classify objects), which enables us to define and delimit our set of models {\small ${\cal M}|_{\boldsymbol{\mathfrak{C}}}$}. \subsection{Application of Bayes' Theorem} \paragraph{Separating Association and Classification} The posterior probability for a \textit{candidate object} {\small $\alpha_\ell{\cal M}_n$} can be written under application of the product rule as \begin{small}\begin{equation} \boldsymbol{\cal P}(\alpha_\ell{\cal M}_n|\mathbb{D}\boldsymbol{\mathfrak{C}}) = \boldsymbol{\cal P}({\cal M}_n|\alpha_\ell\mathbb{D}\boldsymbol{\mathfrak{C}})\; \boldsymbol{\cal P}(\alpha_\ell|\mathbb{K})\quad.\label{Ppost} \end{equation}\end{small}% The posterior probability of an association depends only on the set of coordinates which we denote by {\small $\mathbb{K}$}, and we can omit {\small $\boldsymbol{\mathfrak{C}}$} in the condition of this term. By application of Bayes' theorem, both terms can be separately transformed as \begin{small}\begin{eqnarray} \boldsymbol{\cal P}({\cal M}_n|\alpha_\ell\mathbb{D}\boldsymbol{\mathfrak{C}}) &=& \frac{\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_n\boldsymbol{\mathfrak{C}})\,\boldsymbol{\cal P}({\cal M}_n|\boldsymbol{\mathfrak{C}})}{ \boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|\boldsymbol{\mathfrak{C}})}\quad, \label{BayesClass}\\ \boldsymbol{\cal P}(\alpha_\ell|\mathbb{K}) &=& \frac{\boldsymbol{\cal P}(\mathbb{K}|\alpha_\ell)\,\boldsymbol{\cal P}(\alpha_\ell)}{ \boldsymbol{\cal P}(\mathbb{K})}\quad. \label{BayesAss} \end{eqnarray}\end{small}% As both {\small ${\cal M}|_{\boldsymbol{\mathfrak{C}}}$} and {\small $\alpha$} form complete sets of alternatives \ct{we obtain the evidence terms} \begin{small}\begin{eqnarray} \boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|\boldsymbol{\mathfrak{C}}) &=& \sum_{n=1}^{\boldsymbol{\cal N}({\cal M})} \int_{\Omega_n}\!\!\!\mathbf{d}\boldsymbol{\omega}\; p_n(\boldsymbol{\omega}|\boldsymbol{\mathfrak{C}})\,\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_n\boldsymbol{\omega}\boldsymbol{\mathfrak{C}})\quad, \label{EClass}\\ \boldsymbol{\cal P}(\mathbb{K}) &=& \sum_\ell \;\boldsymbol{\cal P}(\mathbb{K}|\alpha_\ell)\,\boldsymbol{\cal P}(\alpha_\ell) \quad,\label{EAss} \end{eqnarray}\end{small}% where we have written the model likelihoods {\small $\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_n\boldsymbol{\mathfrak{C}})$} explicitly as integrals over their constrained parameter p.d.f.s {\small $p_n(\omega|\boldsymbol{\mathfrak{C}})$}, fulfilling {\small $\sum_n \int_{\Omega_n}\!\!\mathbf{d}\boldsymbol{\omega}\, p_n(\omega|\boldsymbol{\mathfrak{C}}) = 1$}. \paragraph{Priors and Likelihoods of Association} Associations by itself are just abstract combination of numbers. Without referring to data or physical models, we have to assign the same value {\small $\boldsymbol{\cal P}(\alpha_\ell)>0$} for all {\small $\ell$}, except for some {\small $\alpha_\ell$} which can be excluded with certainty (\textit{e.g.}, {\small $\boldsymbol{\cal P}(\alpha_\ell)=0$} if {\small $\alpha_{{\ell}0}=0$}). As a constant prior {\small $\boldsymbol{\cal P}(\alpha)$} cancels in Eqs.~\ref{BayesAss} and \ref{EAss}, we can set {\small $\boldsymbol{\cal P}(\alpha_\ell)=1$} in all terms with {\small $\boldsymbol{\cal P}(\alpha_\ell)\neq0$}. The likelihood of an association is determined by two contributions: (a) the probability of an associated data point to be observed at a effective distance {\small $\bar\theta_{jk}$} within an effective accuracy {\small $\bar\delta_{jk}$}, and (b) the \textit{confusion probability} {\small $\psi_j(k)$} to have a given number {\small $k$} of unrelated data points in a catalog of mean source density {\small $\eta_j$} within a radius {\small $\Delta_j$}, i.e., the Poisson probability \begin{small}\begin{equation} \psi_j(k) = \frac{(\pi \Delta_j^2 \eta_j)^k}{k!} e^{-\pi \Delta_j^2 \eta_j}\quad. \end{equation}\end{small} The effective distance and accuracy consider the seed position error {\small $\delta_{j0}$} by defining {\small $ \bar\theta_{jk} = \sqrt{\theta_{jk}^2 + \delta_{j0}^2}$ and $\bar\delta_{jk} = \sqrt{\delta_{jk}^2 + \delta_{j0}^2} $}, thus {\small $\boldsymbol{\cal P}(\mathbb{K}_{jk}|\alpha_\ell) \propto (\bar\theta_{jk}/\bar\delta_{jk})\;\exp(-\bar\theta_{jk}^2/2\bar\delta_{jk} ^2)$}. It is then straightforward to see\footnotemark\prefootnotemark{1}\ that \ct{ \begin{small}\begin{equation} \boldsymbol{\cal H}(\alpha_\ell|\mathbb{K}) = \sum_{j=1\atop \alpha_{{\ell}j}>0}^{\boldsymbol{\cal N}(\mathbb{D})} \left(\log\left[ \frac{\bar\delta_{j\alpha_{{\ell}j}}}{\bar\theta_{j\alpha_{{\ell}j}}\, \psi(\boldsymbol{\cal N}(\mathbb{D}_j)-1)}\right] + \frac12\left[ \frac{{\bar\theta}_{j\alpha_{{\ell}j}}^2}{{\bar\delta}_{j\alpha_{{\ell}j}}^2} -1\right]\right) - \sum_{j=1\atop \alpha_{{\ell}j}=0}^{\boldsymbol{\cal N}(\mathbb{D})} \log\psi\big(\boldsymbol{\cal N}(\mathbb{D}_j)\big)\;. \end{equation}\end{small} } \noindent The seed catalog \ct{does not} contribute to this term as its association is logically implied. \paragraph{Priors and Likelihoods of Classification} \footnotetext{Likelihoods containing contributions from both data point associations (Gaussian p.d.f.s) and non-observations (probabilities) require to define conditions to normalize the relative contribution of both kind of terms. In {$\boldsymbol{\cal H}(\alpha_\ell|\mathbb{K})$} this is done by requiring {$\boldsymbol{\cal P}(\mathbb{K}_{jk}|\alpha_\ell) \to 1$} for matching coordinates measured with arbitrary precision, {$\theta_{jk} < \delta_{jk} < \delta_{j0} \to 0$}.} Priors in classification are given by the model parameter p.d.f.s, the functional shape of which is part of the model generation and will not be discussed here. For the normalization of the prior p.d.f.s, relative abundances of known object classes from previous classifications can be used. The classification likelihood is the probability of the data points and non-observations in {\small $\alpha_\ell\mathbb{D}$} to match with the model prediction, and we can write \ct{ \begin{small}\begin{equation} \boldsymbol{\cal H}({\cal M}_n\boldsymbol{\omega}|\mathbb{D}\boldsymbol{\mathfrak{C}}) = \frac{\chi^2}{2} - \sum_{j=1\atop \alpha_{{\ell}j}=0}^{\boldsymbol{\cal N}(\mathbb{D})} \log{\rm erfc}\left[\frac{1}{\sqrt{2}}\, \left(\frac{\mu_{ni}(x_j;\boldsymbol{\omega})}{\sigma_{j0}}-\nu_j\right) \right]\quad. \end{equation}\end{small} } \noindent The second term considers the contribution of assumed non-observations, and \begin{small}\begin{equation} \chi^2=\sum_{j=1\atop \alpha_{{\ell}j}>0}^{\boldsymbol{\cal N}(\mathbb{D})} \sum_{i=1}^{\boldsymbol{\cal N}(\mathbb{D}_{jk})} \left(\frac{f_{j\alpha_{{\ell}j}i} - \mu_{ni}(x_j;\boldsymbol{\omega})}{\sigma_{j\alpha_{{\ell}j}i}}\right)^{\!\!2} \end{equation}\end{small}% is the usual ``goodness-of-fit'' measure for the data points in {$\alpha_\ell\mathbb{D}$}.\footnote{Here we require that for the fiducial case {$\mu_{ni}(x_j;\boldsymbol{\omega}) = \nu_j\sigma_{j0}$}, the probability of a data point {$\set{\nu_j\sigma_{j0},\sigma_{j0}}$} to be consistent with the model prediction is equal to the probability of a non-observation.}% $^,$\footnote{We use Gaussian p.d.f.s as we assumed in our data structure that only one error parameter is given for each position or quantity. If more detailed error information is available, the definition of the corresponding likelihoods has to be adapted.} \subsection{Classifying Objects} \paragraph{Definition and Properties of Confidence} Following Jaynes \cite[\S\,4]{Jaynes} we use the logarithm of the odds ratio to compare our classifications and define the \textit{confidence} of a candidate object {\small $\alpha_\ell{\cal M}_n$} as \begin{small}\begin{equation} c_{{\ell}n} = \log\left(\frac{\boldsymbol{\cal P}(\alpha_\ell{\cal M}_n|\mathbb{D}\boldsymbol{\mathfrak{C}})}{ 1-\boldsymbol{\cal P}(\alpha_\ell{\cal M}_n|\mathbb{D}\boldsymbol{\mathfrak{C}})}\right)\quad. \end{equation}\end{small}% The object of choice would then be the candidate object with maximum confidence, {\small $c_{\rm max}$}, and we denote the corresponding indices as {\small ${\ell}_{\rm max}$} and {\small $n_{\rm max}$}. Analogously, we can define the confidence of a data association as \begin{small}\begin{equation} a_\ell = \log\left(\frac{\boldsymbol{\cal P}(\alpha_\ell|\mathbb{K})}{ 1-\boldsymbol{\cal P}(\alpha_\ell|\mathbb{K})}\right) \end{equation}\end{small}% and denote value and index of the maximum as {\small $a_{\rm max}$} and {\small ${\hat\ell}_{\rm max}$}, respectively. Eq.~\ref{Ppost} implies that {\small $c_{{\ell}n}<a_\ell$} for all {\small $n$}. Because of {\small $\cset{\alpha_\ell{\cal M}_n}_\wr$}, we can have {\small $c_{{\ell}n}>0$} for only one combination {\small ${\ell}n$}. As this implies {\small $a_\ell>0$}, it can be taken as a condition for a unique and consistent object choice, preferring one {\small $\alpha_\ell{\cal M}_n$} over all others. In the contrary, we cannot conclude from {\small $a_\ell>0$} that {\small $c_{\rm max}>0$}, neither we can conclude that {\small $\ell={\ell}_{\rm max}$}. \paragraph{Quality Rating} Based on the discussion above, we can define for each object a quality rating, defining potential actions to be taken for catalog validation and verification. The most basic scheme would contain four ratings as follows. Rating \textbf{A} selects clear cases, and is given for {\small $c_{\rm max} \ge C_{\rm lim} > 3\,$}nat, corresponding to a rejection probability of the best alternative of {\small $>95\%$} ({\small $1\,{\rm nat} = 4.34\,$db}). No or little human inspection is necessary in these cases, and results of rejected object associations can be deleted. Rating \textbf{B} would be applied for {\small $c_{\rm max} \ge 0$}, and indicates likely cases, while rating \textbf{C} would be applied to potentially ambiguous cases with {\small $a_{\rm max} \ge 0$} while {\small $c_{\rm max}<0$}. Both require human inspection at different levels, and all results with {\small $c_{{\ell}n} \sim c_{\rm max}$} should be kept for validation. Finally, a rating \textbf{D} ({\small $a_{\rm max}<0$}) identifies objects which would normally be rejected in catalog generation, but which may still be interesting to look at for research purposes. Of course, this scheme may be adapted to the needs of reliability, and it may make sense to split up rating \textbf{A} using a sequence of increasing {\small $C_{\rm lim}$}. \subsection{Odd Objects\hspace{3cm}\ } \vspace*{-4.5ex} \hspace*{\fill}\parbox{5cm}{\scriptsize Our plan is to drop a lot of odd objects\\ onto your country from the air.\\ And some of these objects will be useful.\\ And some of them will just be odd.\\ \hspace*{\fill}\textit{Laurie Anderson}} \paragraph{Counter-evidence} We are left with a problem: Assume there is an object which does not fit into any of our model classes. How would it appear in our classification? Obviously, for such objects all integrals in the sum of Eq.~\ref{EClass} would become very small, so {\small $\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|\boldsymbol{\mathfrak{C}})$} would become very small, even if the data association has a high confidence. We therefore introduce the \textit{counter-evidence} for an associated object to fit into the classification scheme as \ct{ \begin{small}\begin{equation} \kappa_\ell = -\log\;\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|\boldsymbol{\mathfrak{C}}) \equiv \boldsymbol{\cal H}(\boldsymbol{\mathfrak{C}}|\alpha_\ell\mathbb{D}) \end{equation}\end{small}% }% and {\small $\kappa = \min(\kappa_\ell)$}. \ct{Thus, $\kappa$ can be seen as the \textit{information Hamiltonian of the classification scheme}, taken for the association for which it becomes minimal.} Large values for {\small $\kappa$} are an indication of \textit{classification exceptions}. Following the US performance artist Laurie Anderson% \footnote{\ct{Laurie Anderson, \textit{United States Live}, Warner Bros.\ (1983)}} we call such cases \textit{odd objects}: While exceptions are usually expected to be results of instrumental errors or defects in the target catalogs (``just odd''), they could also indicate the \textit{discovery} or a new, unexpected object type \ct{(``useful'')}. \paragraph{Introducing an exception class} \ct{That our method mingles candidates for rejection with candidates for discovery is a defect obtained by forcing the condition {\small $\boldsymbol{\cal P}({\cal M}|\boldsymbol{\mathfrak{C}})=1$} onto an, in principle arbitrary, classification scheme {\small $\boldsymbol{\mathfrak{C}}$}. To overcome this problem,} we introduce an \textit{odd-object class} {\small ${\cal M}_0$} defined by a single parameter \begin{small}\begin{equation} \xi \equiv -\log\big(\,\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_0)\;\boldsymbol{\cal P}({\cal M}_0)\,\big) \end{equation}\end{small}% for all {\small $\alpha_\ell\mathbb{D}$}. As {\small ${\cal M}_0$} is the logical complement of the set {\small ${\cal M}$}, we have {\small $\cset{{\cal M}_0,{\cal M}_n}_\wr$} without conditions, and \ct{we obtain the total evidence} \begin{small}\begin{equation} \boldsymbol{\cal P}(\alpha_\ell\mathbb{D}) = \boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|\boldsymbol{\mathfrak{C}}) + e^{-\xi}\quad. \end{equation}\end{small}% \ct{This implies {\small $c_{{\ell}0} = \kappa_\ell -\xi$}, and we} define the confidence for an object to be odd as \begin{small}\begin{equation} c_0 \equiv \kappa - \xi\quad. \end{equation}\end{small}% Moreover, objects of classes {\small ${\cal M}_n$} need to fulfill {\small $\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_n)\,\boldsymbol{\cal P}({\cal M}_n) > e^{-\xi}$} for some ${\ell}n$ in order to receive a rating \textbf{B} or above, while there was no such limit in {\small $\boldsymbol{\mathfrak{C}}$}. To prevent that odd objects are accidentally considered ``clear cases'' if {\small $\boldsymbol{\cal P}(\alpha_\ell\mathbb{D}|{\cal M}_n)\,\boldsymbol{\cal P}({\cal M}_n) \ll e^{-\xi}$} for all {\small ${\ell}n$}, we introduce a sub-rating \textbf{Ao} {\small $\subset$} \textbf{A} for {\small $c_0 > C_{\rm lim}$}, which requires human inspection. \section{Discussion and Philosophical Epilogue} \subsection{Benefits of Bayesian Classification} \paragraph{Models and Priors: Experience vs. Bias} In Bayesian classification we use models and priors, which are usually suspected in catalog generation to introduce bias. Shouldn't we use only the information contained in the \ct{\textit{given data set} in order} to be objective? Our Bayesian answer is: No, we shouldn't, and in fact, we never do. \ct{In general, it is the advantage of Bayesian methods to clearly state our priors, while orthodox methods often hide the prior assumptions used. For the special case of catalog generation, this means that we always have additional data available, usually in a complex and incoherent form, and also widely accepted models describing the nature of our potential objects, and these data and models \textit{are} used by ``experienced astronomers'' in the process called \textit{catalog validation}. All we do by introducing models and priors is to automate part of this experience, i.e., provide a condensed description of our prior knowledge and beliefs to the classification procedure. Our quality rating ensures that this affects only the trivial, routine tasks of validation, and prevents that potentially interesting alternatives to the best assignments are prematurely dropped (\textit{e.g.}, cases with {\small ${\ell}_{\rm max} \neq {\hat\ell}_{\rm max}$}).} \paragraph{Beyond Best Fits: Robustness and Model Complexity} Our method exhibits a fundamental aspect of Bayesian classification: Model parameters are not optimized as in ``best-fit'' approaches, but \textit{marginalized} in Eq. \ref{BayesClass} and \ref{EClass}. We emphasize that this is implied by plausibility logic: It is not our question which model can produce an optimal fit to the data for some parameter choice, but \textit{which model explains the data in the most natural way}, given prior expectations for its parameters. To discuss this in more detail, let us consider one parameter dimension {\small $\omega_i$} of the model parameter space {\small $\Omega_n$}, and assume that {\small $p_n(\boldsymbol{\omega}) \approx p_n(\boldsymbol{\omega}_{\backslash i})/|\Omega_{ni}|$}, with for {\small $\omega_i \in \Omega_{ni}$} and {\small $p_n(\boldsymbol{\omega})\simeq 0$} otherwise. \ct{Moreover, we assume that for {\small $\omega_i=0$} we achieve {\small $\boldsymbol{\cal H}^0_{ni} \equiv \boldsymbol{\cal H}({\cal M}_{n\backslash i}|\alpha_\ell\mathbb{D})$} integrated over all parameter dimensions except {\small $\omega_i$}. Varying {\small $\omega_i$} may decrease {\small $ \boldsymbol{\cal H}({\cal M}_{n\backslash i}|\alpha_\ell\mathbb{D})$} to a value {\small $\sim \boldsymbol{\cal H}^+_{ni} \lnsim \boldsymbol{\cal H}^0_{ni}$} (i.e., increase the likelihood) in some regime {\small $\omega_i \in \eta^+_{ni}$}, while it decreases the likelihood ({\small $\boldsymbol{\cal H}({\cal M}_{n\backslash i}|\alpha_\ell\mathbb{D}) \sim \boldsymbol{\cal H}^-_{ni} \gnsim \boldsymbol{\cal H}^0_{ni}$}) in some other regime {\small $\omega_i \in \eta^-_{ni}$}.} Everywhere else we assume {\small $\boldsymbol{\cal H}({\cal M}_{n\backslash i}|\alpha_\ell\mathbb{D}) \sim \boldsymbol{\cal H}^0_{ni}$}. Defining \ct{ \begin{small}\begin{equation} \Lambda^\pm_{ni} = \pm\left(e^{\boldsymbol{\cal H}^0_{ni}-\boldsymbol{\cal H}^\pm_{ni}} - 1\right) \quad{\rm and}\quad W^\pm_{ni} = \frac{|\eta^\pm_{ni}|}{|\Omega_{ni}|}\quad, \end{equation}\end{small}% }% we immediately obtain for the change in confidence caused by parameter {\small $\omega_i$} \begin{small}\begin{equation}\label{Occam} \big[\Delta c_{{\ell}n}\big]_{\omega_i} \sim \log\big(1 + \Lambda^+_{ni} W^+_{ni} - \Lambda^-_{ni} W^-_{ni}\big)\quad. \end{equation}\end{small}% A significant increase of the model confidence is only obtained if {\small $\Lambda^+_{ni} W^+_{ni} - \Lambda^-_{ni} W^-_{ni} \gnsim 1$}, i.e., if a significant \textit{net} improvement of the fit quality averaged over the ``prior mass'' {\small $\Omega_{ni}$} of the parameter is achieved. We shall call parameters with this property \textit{robust}, while parameters with {\small $\Lambda^+_{ni} W^+_{ni} \lesssim \Lambda^-_{ni} W^-_{ni}$} shall be called \textit{fragile}. The factors {\small $W_i$} are equivalent to the \textit{Ockham factors} defined by Jaynes \cite[\S\,20]{Jaynes}, referring to the principle of simplicity known as \textit{Ockhams razor}. However, Eq.~\ref{Occam} shows that Bayesian logic does \textit{not} lead to a flat penalization of model complexity; rather, a parameter which does not affect the fit quality ({\small $W^+_{ni} = W^-_{ni} = 0$}) does not affect the model confidence. It therefore seems more appropriate to say that \textit{Bayesian logic penalizes fine tuning}, i.e., the introduction of fragile parameters with little prior constraints for the mere purpose to improve the ``best fit'' for some particular choice of parameter values.\footnote{In his discussion of this topic on p.~605-607 of his book \cite{Jaynes}, Jaynes implicitly assumes that the likelihood is significantly different from zero only within {$\eta^+_{ni}$}. If a moderately good match {$\boldsymbol{\cal H}^0_{ni}$} has been achieved without the parameter {$\omega_i$}, this is equivalent to setting {$\Lambda^-_{ni} = 1$} and {$W^-_{ni} = 1 - W^+_{ni}$} in Eq.~\ref{Occam}, yielding {$[\Delta c_{{\ell}n}]_{\omega_i} \sim \boldsymbol{\cal H}^+_{ni} - \boldsymbol{\cal H}^0_{ni} + \log W^+_{ni}$}. Now the Ockham factor indeed penalizes the model complexity as it requires {$\boldsymbol{\cal H}^+_{ni} \gnsim \boldsymbol{\cal H}^0_{ni} - \log W^+_{ni}$} for significant improvement of confidence (note that {$\log W^+_{ni} < 0$}).} \paragraph{Bayesian Learning: Updating the Classification Scheme} Classification is naturally applied to a large number of objects {\small $\mathbb{O} \equiv \set{[\alpha\mathbb{D}{\cal M}]_s}_\perp$}, which allows us to use posterior number distributions to iteratively update all prior assumptions which we have entered. In particular total model priors {\small $\boldsymbol{\cal P}({\cal M}_n)$} can be updated as \begin{small}\begin{equation}\label{postprior} \frac{\boldsymbol{\cal N}(\mathbb{O}|_{n,\bf A})}{\boldsymbol{\cal N}(\mathbb{O}|_{\bf A})} \succ \boldsymbol{\cal P}({\cal M}_n)\quad, \end{equation}\end{small}% where {\small $\mathbb{O}|_{\bf A}$} [{\small $\mathbb{O}|_{n,\bf A}$}] denotes the set of all \textbf{A} rated objects [in model class {\small $n$}]. In the same way, updates can be applied to the shape of prior p.d.f.s of the models, if these are determined by empirical parameters. The most important parameter for posterior updates is hereby the odd object threshold {\small $\xi$}. If we consider {\small $\boldsymbol{\cal P}({\cal M}_n)$} determined by Eq.~\ref{postprior} as a function of {\small $\xi$} and call it {\small $R_0(\xi)$}, we note that {\small $R_0(0)=1$} and {\small $R_0(\xi)\to 0$} for {\small $\xi\to\infty$}. If classification exceptions hide a class of undiscovered objects with particular properties, we would expect that they are grouped around some large value of a {\small $\xi$}, while all objects fitting into the classification scheme have small values of {\small $\xi$}. In between, we expect a range where {\small $R_0(\xi)$} remains approximately constant, and a good choice of {\small $\xi$} for separating the two populations is then found by maximizing \begin{small}\begin{equation} \varepsilon(\xi) = \xi + \frac{\rm d}{{\rm d}\xi}\log R_0(\xi) \end{equation}\end{small}% within the range of {\small $\xi$} where {\small $R_0(\xi)> 0$}. Once {\small $\xi$} is found, we can update all model priors by Eq.~\ref{postprior}. In principle, every update is a redefinition of the classification scheme {\small $\boldsymbol{\mathfrak{C}}$}, and the goal of our iterative process is to find a converging chain of updates {\small $\boldsymbol{\mathfrak{C}}\succ\boldsymbol{\mathfrak{C}}'\succ\boldsymbol{\mathfrak{C}}''\succ\ldots$}, until a self consistent result is obtained. If this does not succeed, our conclusion might be that the classification task is ill-defined, and we may exchange our classification scheme {\small $\boldsymbol{\mathfrak{C}}$} by an entirely different {\small $\boldsymbol{\mathfrak{C}}^*$}, containing other models to define object classes. \renewcommand\AIPsectionpostskip {.4\bodytextbaselineskip} \subsection{Classification and Inference} \paragraph{Interpretation Schemes and Anomalies} With these considerations we make the link from Bayesian classification to Bayesian inference. There, we confront a set of models or theories --- we call it the \textit{interpretation scheme} {\small $\boldsymbol{\mathfrak{I}}$} --- with a series of data sets {\small $\mathbb{D}_s$}, which we now call \textit{tests} of the interpretation scheme, expecting that subsequent tests will lead to a more and more reliable estimation of the free parameters in our model space. Occasionally, however, results of experiments will not fit at all into the picture ({\small $\kappa \gg 1$)}, and we then call them \textit{anomalies}. Normally, we will cope with anomalies by successively extending the parameter spaces of models ({\small $\boldsymbol{\mathfrak{I}}\succ\boldsymbol{\mathfrak{I}}'\succ\boldsymbol{\mathfrak{I}}''\succ\ldots$}), but if anomalies become rampant, we will have to doubt the validity of our interpretation scheme as a whole. \ct{This may lead us to replace it with a new scheme involving entirely new theories ({\small $\boldsymbol{\mathfrak{I}}\to \boldsymbol{\mathfrak{I}}^*$}), involving a reinterpretation of all data sets observed so far.} \paragraph{The Course of Science in a Bayesian View} The gentle reader may have noticed that our interpretation scheme is what Thomas Kuhn has called a \textit{paradigm} \cite{Kuhn}. In a Bayesian language, it is that part of our ``web of beliefs'' which is kept unchanged in technical applications, slowly modified in the normal course of science, but questioned and eventually been overthrown when confronted with overwhelming anomalies. \ct{We have identified the counter-evidence as a measure to monitor such developments.} \ct{We may write {\small $\boldsymbol{\mathfrak{I}}(t)$} for an interpretation scheme continuously modified over time, and define {\small $\bar\kappa(t)$} as its average counter-evidence.} {\small $\boldsymbol{\mathfrak{I}}(t)$} can then be identified with Imre Lakatos' concept of a \textit{research programme} \cite{Lakatos}, and the sign of {\small $d\bar\kappa/dt$} would indicate whether it is ``progressing'' ({\small $d\bar\kappa/dt<0$}) or ``degenerating'' ({\small $d\bar\kappa/dt>0$}). Degeneration of a research programme --- or the decline of a paradigm --- is hereby not only caused by experimental anomalies, but also by fragile parameters introduced to cope with them. At the end of the road, we may enter into that what Kuhn calls a \textit{scientific revolution}, the \textit{incommensurable paradigm shift} {\small $\boldsymbol{\mathfrak{I}}\to \boldsymbol{\mathfrak{I}}^*$}, by which all known data obtain a new meaning \cite{Kuhn}. A further exploration of these topics would be beyond the scope of this paper, but it is intriguing to note \ct{how Bayesian methods allow a quantitative understanding of concepts in the philosophy of science which are otherwise considered irrational.} \renewcommand\AIPsectionfont {\small\bfseries} \renewcommand\AIPsectionpreskip {\bodytextbaselineskip plus 3pt minus 1pt} \begin{theacknowledgments} \footnotesize The author thanks Tim\,Pearson, Torsten\,En{\ss}lin and the anonymous referees for comments and discussions. \end{theacknowledgments} \begin{footnotesize} \bibliographystyle{aipproc}
1,116,691,501,386
arxiv
\section{Introduction}\label{sec:intro} A basic requirement for many models of deformable solids is that they should prevent interpenetration of mass. In context of hyperelasticity, i.e., nonlinear elasticity fully determined by a stored elastic energy function (see, e.g., \cite{Ba77a,Cia88B} for an introduction), this is ensured by a strong local resistance to compression built into the energy density, which in particular prevents local change of orientation, combined with a constraint preventing global self-penetration, usually the Ciarlet--Ne\v{c}as condition \cite{CiaNe87a}, see \eqref{eq:CNC} below. In this article, we study the approximation of the latter by augmenting the local elastic energy with a nonlocal functional with self-repulsive properties, formally corresponding to suitable Sobolev--Slobodecki\u\i\ seminorms of the inverse deformation. While all results presented here are purely analytical, our motivation is mainly numerical, related to the fact that the Ciarlet--Ne\v{c}as condition is hard to handle numerically in such a way that the algorithm maintains an acceptable computational cost while still provably converging. In particular, there is still no known projection onto the Ciarlet--Ne\v{c}as condition which is rigorous with acceptable computational cost, see \cite{AigLi2013a} for some partial results. There is a well-known straightforward penalty term that rigorously reproduces the Ciarlet--Ne\v{c}as condition in the limit (see \cite{MieRou16a}, e.g.), but it is hard to implement, non-smooth and computationally very expensive as a double integral on the full domain. Recent results on more practical rigorous approximation of the Ciarlet--Ne\v{c}as condition via nonlocal penalty terms added to the elastic energy were obtained in \cite{KroeVa19a} and \cite{KroeVa22Pa}, but these require additional regularity of elastic deformations which possibly interferes with the Lavrentiev phenomenon which is known to appear at least in particular nonlinear elastic models~\cite{FoHruMi03a}. Using the language of $\Gamma$-convergence (see \cite{Brai02B}, e.g.), we show that in combination with local nonlinear elastic energies, the self-repulsive terms studied here also provide a rigorous approximation of the Ciarlet--Ne\v{c}as condition without requiring regularity of deformations beyond what is naturally provided by the nonlinear elastic energy (Theorem~\ref{thm:main}). In addition, these admit natural variants near or on the boundary (Theorems~\ref{thm:main2} and~\ref{thm:main3}), which are significantly cheaper to compute in practice. The latter crucially rely on a global invertibility property of orientation preserving maps exploiting topological information on the boundary \cite{Kroe20a} (for related results see also \cite{Ba81a,HeMoCoOl21a}). Our results here still do not cover the full range of hyperelastic energies which are known to be variationally well-posed, though. In fact, we require lower bounds on the energy density which are strong enough so that deformation maps with finite elastic energy are automatically continuous, open and discrete, the latter two by the theory of functions of bounded distortion \cite{HeKo14B}. In our proofs, this is essential so that all local regularity is controlled by the elastic energy, while the nonlocal self-repulsive term asymptotically only controls global self-contact. For related results concerning self-avoiding curves and surfaces in more geometrical context with higher regularity, we refer to \cite{BaReiRie18a,BaRei20a,BaRei21a,YuBraSchuKee21a} and references therein. \subsection*{General assumptions} Let $\Omega\subset\ensuremath{\mathbb{R}}^{d}$ be a bounded Lipschitz domain, $d\ge2$, $p\in(d,\infty)$, $r>0$, $\ensuremath{\varepsilon}\ge0$, $q\ge 1$ and $s\geq 0$. By $W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ we denote the set of all functions $y\in W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ with $\det \nabla y(x)>0$ for a.e.\ $x\in\Omega$. We consider an integral functional modeling the internal elastic energy of a deformation $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ of a nonlinear hyperelastic solid. For simplicity, we restrict ourselves to the following ``neo-Hookian'' form given by \[ \ensuremath{\mathcal E}(y) = \int_{\Omega}\abs{\nabla y(x)}^{p}\d x + \int_{\Omega}\frac{\d x}{\br{\det \nabla y(x)}^{r}}. \] We are interested in precluding deformations corresponding to self-interpenetration of matter, i.e., non-injective $y$. Classically, the latter is imposed by adding the \emph{Ciarlet--Ne\v cas condition} \cite{CiaNe87a} as a constraint: \begin{align}\label{eq:CNC}\tag{CN} \int_{\Omega}\abs{\det \nabla y(x)}\d x= \abs{y(\Omega)}. \end{align} \begin{remark}\label{rem:CNC} By the area formula, the inequality \textup{``$\ge$''} in~\eqref{eq:CNC} always holds true and \eqref{eq:CNC} is equivalent to a.e.~injectivity of $y$ provided that $\det\nabla y>0$ a.e. As the latter is usually given, \eqref{eq:CNC} can also be expressed in the more standard form \begin{align*} \int_{\Omega}\det \nabla y(x)\d x\le \abs{y(\Omega)}. \end{align*} \end{remark} As a step towards a possible (numerical) approximation of \eqref{eq:CNC}, we regularize $\ensuremath{\mathcal E}$ by adding a singular nonlocal contribution $\ensuremath{\mathcal D}$. Below, we will show that \eqref{eq:CNC} automatically holds whenever $\ensuremath{\mathcal E}(y)<\infty$ and $\ensuremath{\mathcal D}(y)<\infty$ with various examples for~$\ensuremath{\mathcal D}$, see Propositions~\ref{prop:CNC}, \ref{prop:CNC2} and \ref{prop:CNC3}. The first such example for $\ensuremath{\mathcal D}$ is given by \[ \ensuremath{\mathcal D}_{U}(y) = \iint_{U\times U}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d +sq}}\abs{\det \nabla y(x)}\abs{\det \nabla y(\tilde x)} \d x\d\tilde x \] where $U\subset\Omega$ is some open neighborhood of~$\partial\Omega$ in~$\Omega$ and suitable parameters $q\in[1,\infty)$, $s\in[0,1)$. In particular, we can choose $U=\Omega$. Transforming the integral and invoking~\cite[Prop.~2]{MR1942116} reveals that the integral is singular if $s\ge1$. Formally, after a change of variables, $\ensuremath{\mathcal D}_{U}$ is the Sobolev--Slobodecki\u\i\ semi-norm of $y^{-1}$ in the space $W^{s,q}(y(U),\ensuremath{\mathbb{R}}^{d})$. As long as $sq\ge 0$, the functional $\ensuremath{\mathcal D}_{U}$ effectively prevents self-interpenetration, i.e., a loss of injectivity of $y$, as shown in Proposition~\ref{prop:CNC} below. To the best of our knowledge, variants of $\ensuremath{\mathcal D}_{U}$ for curves, with such a purpose in mind, first appeared in a master thesis~\cite{unseld} supervised by Dziuk, and have subsequently been studied in another master thesis~\cite{hermes}. The functional~$\ensuremath{\mathcal D}_{U}$ can be interpreted to be a sort of ``relaxation'' of the bi-Lipschitz constant. In this sense it is a rather weak quantity in comparison with similar concepts that have been introduced earlier~\cite{MR1098918,MR1692638}. We also study the boundary variant \[ \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y) = \iint_{\partial\Omega\times\partial\Omega}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d-1 +sq}} \d A(x)\d A(\tilde x) \] where $A$ denotes the $(d-1)$-dimensional Hausdorff measure. We give a rigorous statement of this approximation by establishing $\Gamma$-convergence which is the main result of this paper. To this end, we consider for $y\in W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$, $\ensuremath{\varepsilon}>0$, \[ E_{\ensuremath{\varepsilon}} (y) = \begin{cases} \ensuremath{\mathcal E}(y) + \ensuremath{\varepsilon}\ensuremath{\mathcal D}(y) & \text{if }y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d}), \\ +\infty &\text{else}. \end{cases} \] We reserve the symbol $E_{0}$ for the $\Gamma$-limit which will turn out to be \[ E_{0}(y) := \begin{cases} \ensuremath{\mathcal E}(y) & \text{if $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ satisfies~\eqref{eq:CNC},} \\ +\infty & \text{else.} \end{cases} \] \section{Preliminary results}\label{sec:pre} Change of variables in quite general form is important for us throughout, in form of the following special case of the area formula due to Marcus and Mizel. \begin{lemma}[cf.~{\cite[Theorem 2]{MaMi73a}}]\label{lem:transform} Let $y\in W^{1,p}(\Lambda,\ensuremath{\mathbb{R}}^{d})$ with $p>d$, where $\Lambda$ is a bounded domain in $\ensuremath{\mathbb{R}}^{d}$. Moreover, assume that $f:\ensuremath{\mathbb{R}}^{d}\to \ensuremath{\mathbb{R}}$ is measurable and $E\subset\Lambda$ is measurable. We use the convention that $f(y(x))\abs{\det\nabla y(x)}=0$ whenever $\abs{\det\nabla y(z)}=0$ for some $z\in E$ and abbreviate $N_{y}(z,E)=\#(y^{-1}(z)\cap E)$ for any $z\in\ensuremath{\mathbb{R}}^{d}$, where $\#$ denotes the counting measure. Then, if one of the functions $z\mapsto f(y(x))\abs{\det\nabla y(x)}$ and $z\mapsto u(z)N_{f}(z,E)$ is integrable, so is the other one and the identity \begin{equation*} \int_{E} f(y(x))\abs{\det\nabla y(x)}\d x = \int_{\ensuremath{\mathbb{R}}^{m}} f(z)N_{f}(z,E) \d z \end{equation*} holds. \end{lemma} \begin{proposition}\label{prop:lscE} For $p>d$ and $r>0$, the functional $\ensuremath{\mathcal E}:W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)\to [0,\infty]$ is lower semicontinuous with repect to weak convergence in $W^{1,p}$. \end{proposition} \begin{proof} The integrand of $\ensuremath{\mathcal E}$ is polyconvex, since $(F,J)\mapsto|F|^p+J^{-r}$, $\ensuremath{\mathbb{R}}^{d\times d}\times (0,\infty)\to [0,\infty],$ is convex. As shown in detail by Ball \cite{Ba77a}, sequential weak lower semicontinuity of $\ensuremath{\mathcal E}$ therefore follows from the weak continuity of the determinant, i.e., $y\mapsto \det \nabla y$ as a map between $W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$ and $L^{p/d}(\Omega)$, where both spaces are endowed with their weak topologies. \end{proof} The Ciarlet--Ne\v cas condition is a viable constraint for direct methods: \begin{lemma}[Weak stability of~\eqref{eq:CNC} {\cite[p.~185]{CiaNe87a}}] \label{lem:stable} Let $y_{k}\rightharpoonup y_{\infty}$ in $W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{n})$, $p>d$, and assume that~\eqref{eq:CNC} holds for all $y_{k}$, $k\in\ensuremath{\mathbb{N}}$. Then~\eqref{eq:CNC} applies to $y_{\infty}$ as well. \end{lemma} Using the theory of maps of bounded distortion, we can obtain even more. For sufficiently large $p$ and $r$, deformations with finite energy are open and discrete (see below) due to a result of Villamor and Manfredi \cite{ViMa98a}. With added global topological information, say, in form of \eqref{eq:CNC}, finite energy maps are even necessarily homeomorphisms \cite[Section 3]{GraKruMaiSte19a} (see also \cite{Kroe20a} for related results). In summary, we have the following. \begin{proposition}\label{prop:homeo} Let $d\geq 2$, \[ p>d(d-1)\quad(\ge d),\qquad r>\frac{p(d-1)}{p-d(d-1)}\quad(>d-1), \] and let $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ such that $\ensuremath{\mathcal E}(y)<\infty$. Then the continuous representative of $y$ is open (i.e., $y$ maps open subset of $\Omega$ to open sets in $\ensuremath{\mathbb{R}}^{d}$) and discrete (i.e., for each $z\in\ensuremath{\mathbb{R}}^d$, $y^{-1}(\{z\})$ does not have accumulation points in~$\Omega$). In particular, $y(\Omega)$ is open in $\ensuremath{\mathbb{R}}^d$. If, in addition, \eqref{eq:CNC} holds, then $y$ is a homeomorphism on~$\Omega$ and $y^{-1}\in W_{+}^{1,\sigma}(y(\Omega),\Omega)$, where \[ \sigma:=\frac{(r+1)p}{r(d-1)+p}>d. \] \end{proposition} \begin{remark}\label{rem:homeo} Possible self-contact on $\partial\Omega$ is not ruled out, and so $y$ is not necessarily a homeomorphism on~$\overline\Omega$. \end{remark} \begin{proof}[Proof of Proposition~\ref{prop:homeo}] With $F=\nabla y(x)$, \[ K^O(F):=\frac{\abs{F}^d}{\det F} \] is the \emph{outer distortion} of $y$ at $x$ (or \emph{dilatation} in the terminology of \cite{ViMa98a}). We infer from Young's inequality for some $\kappa,\rho\in(1,\infty)$ that \begin{align*} \abs{K^O(F)}^{\kappa} \leq C_{\rho}\left(\abs F^{d\kappa\rho} + {\br{\frac1{\det F}}^{\frac{\kappa\rho}{\rho-1}}}\right). \end{align*} Given $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$, we find that $K^O(\nabla y)\in L^{\kappa}(\Omega)$ provided $\ensuremath{\mathcal E}(y)<\infty$ as well as $d\kappa\rho=p$ and $\kappa=r(1-\frac1\rho)$. Since $\frac1\kappa=\frac1r+\frac dp<\frac{p-d(d-1)}{p(d-1)} + \frac dp = \frac1{d-1}$ and $\rho=1+\frac p{dr}>1$ we may conclude that $y$ is open and discrete as shown by Villamor and Manfredi \cite[Theorem 1]{ViMa98a}. Finally, \eqref{eq:CNC} implies that $y\in W_{+}^{1,p}$ is a map of (Brouwer's) degree $1$ for values of its image ($y\in \rm{DEG}1$ by \cite[Remark 2.19(b)]{Kroe20a}, e.g.). By \cite[Thm.~6.8]{Kroe20a}, it now follows that $y:\Omega\to y(\Omega)$ is a homeomorphism with weakly differentiable inverse, and $\nabla (y^{-1})\in L^d(y(\Omega);\ensuremath{\mathbb{R}}^{d\times d})$. Now that $y$ is invertible with weakly differentiable inverse, we may improve the last conclusion to $L^{\sigma}$. To this end, we first apply a change of variables and use $F^{-1}=\frac{\cof F}{\det F}$, whence $|F^{-1}|\leq c\abs{F}^{d-1}|\det F|^{-1}$. Then the assertion follows again by invoking Young's inequality to bound $\abs{F}^{(d-1)\sigma}|\det F|^{-(\sigma-1)}$. \end{proof} The final two lemmas of this section will be crucial ingredients in the construction of a recovery sequence in the proof of Theorem~\ref{thm:main}. They are also used in \cite{KroeVa19a,KroeVa22Pa} in similar fashion. For a closely related result and further references we refer to \cite[Theorem 5.1]{BaZa17a}. \begin{lemma}[domain shrinking]\label{lem:shrinking} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain. Then there exists a sequence of $C^\infty$-diffeomorphisms \[ \Psi_j:\overline{\Omega}\to \Psi_j(\overline{\Omega})\subset\subset \Omega \] such that as $j\to\infty$, $\Psi_j\to \identity$ in $C^m(\overline{\Omega};\ensuremath{\mathbb{R}}^d)$ for all $m\in \ensuremath{\mathbb{N}}$. \end{lemma} \begin{lemma}[composition with domain shrinking is continuous]\label{lem:shrinkcont} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain, $k\in \ensuremath{\mathbb{N}}_0$, $1\leq p <\infty$ and $f\in W^{k,p}(\Omega;\ensuremath{\mathbb{R}}^m)$, $m\in \ensuremath{\mathbb{N}}$. With the maps $\Psi_j$ of Lemma~\ref{lem:shrinking}, we then have that $f\circ \Psi_j\to f$ in $W^{k,p}(\Omega;\ensuremath{\mathbb{R}}^n)$. \end{lemma} \begin{proof}[Proof of Lemma~\ref{lem:shrinking}] If $\Omega$ is strictly star-shaped with respect to a point $x_0\in\Omega$, one may take $\Psi_j(x):=x_0+\frac{j-1}{j}(x-x_0)$. For a general Lipschitz domain, one can combine local constructions near the boundary using a smooth decomposition of unity: If, locally in some open cube $Q$, the set $\Omega$ is given as a Lipschitz subgraph, i.e., $\Omega\cap Q=\{x\in Q\mid x\cdot e\leq f(x')\}$ and $\partial\Omega\cap Q=\{x'+ef(x')\mid x\in Q\}$, where $e$ is a unit vector orthogonal to one of the faces of $Q$, $x':=x-(x\cdot e) e$ and $f$ is a Lipschitz function, we define \[ \hat{\Psi}_j(x;Q):=x'+\alpha_0 e+\frac{j-1}{j}(e\cdot x-\alpha_0)e\quad\text{for $x\in Q$, with $\alpha_0:=\inf_{x\in Q}e\cdot x$}. \] Notice that $\hat{\Psi}_j(\cdot;Q)$ pulls the local boundary piece $\partial\Omega\cap Q$ "down" (in direction $-e$) into the original domain while leaving the "lower" face of $Q$ fixed. Since $\partial\Omega$ can be covered by finitely many such cubes, we can write $\overline{\Omega}\subset Q_0\cup \bigcup_{k=1}^{n} Q_k$ with some open interior set $Q_0\subset\subset \Omega$. For a smooth decomposition of unity $1=\sum_{k=0}^n \varphi_k$ subordinate to this covering of $\Omega$ (i.e., $\varphi_k$ smooth, non-negative and compactly supported in $Q_k$), \[ \Psi_j(x):=\varphi_0(x)x+\sum_{k=1}^n \varphi_j(x) \hat{\Psi}_j(x;Q_k) \] now has the asserted properties. \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:shrinkcont}] We only provide a proof for the case $k=1$, which will include the argument for $k=0$. For $k\geq 2$, the assertion follows inductively. It suffices to show that as $j\to\infty$, $\partial_n [f\circ \Psi_j-f] \to 0$ in $L^p$, for each partial derivative $\partial_n$, $n=1,\ldots, d$. By the chain rule, \begin{align}\label{lemshrinkdef-1} \begin{aligned} &\partial_n [f\circ \Psi_j-f]= [(\nabla f)\circ \Psi_j] \partial_n \Psi_j -\partial_n f \\ &\quad =\Big([(\nabla f)\circ \Psi_j] \partial_n \Psi_j-(\partial_n f)\circ \Psi_j \Big) +\Big((\partial_n f)\circ \Psi_j -\partial_n f\Big). \end{aligned} \end{align} The first term above converges to zero in $L^p$ since $(\nabla f) e_n=\partial_n f$ for the $n$-th unit vector $e_n$, and $\partial_n\Psi_j \to \partial_n\identity=e_n$ uniformly. The convergence of the second term corresponds to our assertion for the case $k=0$, with $\tilde{f}:=\partial_n f\in L^p$. It can be proved in the same way as the well-known continuity of the shift in $L^p$: If $\tilde{f}$ is smooth and can be extended to a smooth function on $\ensuremath{\mathbb{R}}^d$, we have \begin{align}\label{lemshrinkdef-2} \norm{\tilde{f}\circ \Psi_j -\tilde{f}}_{L^p(\Omega;\ensuremath{\mathbb{R}}^m)}\leq \norm{\nabla\tilde{f}}_{L^\infty(\ensuremath{\mathbb{R}}^d;\ensuremath{\mathbb{R}}^{m\times d})} \norm{\Psi_j-\identity}_{L^p(\Omega;\ensuremath{\mathbb{R}}^d)}\underset{j\to\infty}{\longrightarrow} 0. \end{align} The general case follows by approximation of $\tilde{f}$ in $L^p$ with such smooth functions, by first extending $\tilde{f}$ by zero to all of $\ensuremath{\mathbb{R}}^d$, and then mollifying. Here, notice that for the mollified function, $\|\nabla\tilde{f}\|_{L^\infty}$ in \eqref{lemshrinkdef-2} is unbounded in general as a function of the mollification parameter, but one can always choose the latter to converge slow enough with respect to $j$ so that \eqref{lemshrinkdef-2} still holds. \end{proof} \section{Elasticity with vanishing nonlocal self-repulsion}\label{sec:main} In this section, we will study energies of the form \[ E_{\ensuremath{\varepsilon}} (y) = \begin{cases} \ensuremath{\mathcal E}(y) + \ensuremath{\varepsilon} \ensuremath{\mathcal D}(y) & \text{if }y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d}), \\ +\infty &\text{else}, \end{cases} \] in the limit $\ensuremath{\varepsilon}\to 0^+$, in the sense of $\Gamma$-convergence with respect to the weak topology of $W^{1,p}$. Here, we say that $E_{\ensuremath{\varepsilon}}$ $\Gamma$-converges to a functional $E_0$ if the following two properties hold for every sequence $\ensuremath{\varepsilon}(k)\to 0^+$ and every $y\in W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$: \begin{itemize} \item[(i)] (lower bound) For all sequences $y_k\rightharpoonup y$ weakly in $W^{1,p}$, \[ \liminf_k E_{\ensuremath{\varepsilon}(k)}(y_k)\geq E_0(y). \] \item[(ii)] (recovery sequence) There exists a sequence $y_k\rightharpoonup y$ weakly in $W^{1,p}$ s.t. \[ \limsup_k E_{\ensuremath{\varepsilon}(k)}(y_k)\leq E_0(y). \] \end{itemize} \begin{remark} Notice that we do not require \emph{compactness} here, i.e., that any sequence $(y_k)$ with bounded $E_{\ensuremath{\varepsilon}(k)}(y_k)$ has a subsequence weakly converging in $W^{1,p}$. This is automatic as soon as bounded energy implies a bound in the norm of $W^{1,p}$. However, in the most basic form, the energies we study are translation invariant and only control $\nabla y$ but not $y$. Of course, this would change as soon as a Poincar\'e inequality can be used due to a suitable boundary condition or other controls on $y$ or its average added via constraint or additional energy terms of lower order. \end{remark} We will discuss three different examples for $\ensuremath{\mathcal D}$, each preventing self-interpenetration, i.e., a loss of injectivity of $y$, in a different way. Recall that \[ E_{0}(y) = \begin{cases} \ensuremath{\mathcal E}(y) & \text{if $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ satisfies~\eqref{eq:CNC},} \\ +\infty & \text{else.} \end{cases} \] Throughout this section, we assume that $d,p,r,\sigma$ satisfy the assumptions of Proposition~\ref{prop:homeo}, namely \begin{equation}\label{eq:dprs} d\geq 2,\quad p>d(d-1),\quad r>\frac{p(d-1)}{p-d(d-1)}, \quad \sigma=\frac{(r+1)p}{r(d-1)+p}. \end{equation} \subsection{Bulk self-repulsion}\label{ssec:mainbulk} Here we consider the energy $E_\ensuremath{\varepsilon}$ with $\ensuremath{\mathcal D} := \ensuremath{\mathcal D}_{\Omega}$, i.e., \[ \ensuremath{\mathcal D}(y) = \ensuremath{\mathcal D}_{\Omega}(y) = \iint_{\Omega\times\Omega}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d +sq}}\abs{\det \nabla y(x)}\abs{\det \nabla y(\tilde x)} \d x\d\tilde x \] for $y\in W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$, $q\in[1,\infty)$, $s\in[0,1)$. The following statement is actually not required for our main result. However, together with its counterpart for the elastic energy (cf.~Proposition~\ref{prop:lscE}) it ensures well-posedness of the variational model. \begin{proposition}\label{prop:lscD1} For $p>d$, the functional $\ensuremath{\mathcal D}_\Omega:W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)\to [0,\infty]$ is lower semicontinuous with respect to weak convergence in $W^{1,p}$. \end{proposition} \begin{proof} For $\delta>0$ define \[ \ensuremath{\mathcal D}^{[\delta]}(y):=\iint_{\Omega\times\Omega}\frac{\abs{x-\tilde x}^{q}}{\max\{\delta,\abs{y(x)-y(\tilde x)}^{d+sq}\}}\abs{\det \nabla y(x)}\abs{\det \nabla y(\tilde x)}\,dx\,d\tilde x\leq \ensuremath{\mathcal D}(y). \] Let $(y_k)\subset W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$ with $y_k\rightharpoonup y$ in $W^{1,p}$, for some $y\in W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$. In particular, $y_k\to y$ in $C(\overline\Omega;\ensuremath{\mathbb{R}}^d)$ by embedding, and consequently, \begin{align}\label{wlscD-1} \liminf_{k\to\infty}\ensuremath{\mathcal D}^{[\delta]}(y_k)=\liminf_{k\to\infty} \iint_{\Omega\times\Omega} W_y(x,\tilde x,\det \nabla y_k(x),\det \nabla y_k(\tilde x))\,dx\,d\tilde x \end{align} where \[ W_y(x,\tilde x,J,\tilde{J}):=\frac{\abs{x-\tilde x}^{q}}{\max\{\delta,\abs{y(x)-y(\tilde x)}^{d+sq}\}}\abs{J}|\tilde{J}| \quad \text{for}~~x,\tilde x\in \Omega,~~J,\tilde{J}\in \ensuremath{\mathbb{R}}. \] Clearly, $W_y$ is symmetric in $(x,\tilde x)$ and $(J,\tilde{J})$, as well as separately convex in $(J,\tilde{J})$, i.e., convex in $J$ with $x,\tilde x,\tilde{J}$ fixed and convex in $\tilde{J}$ with $x,\tilde x,J$ fixed. By \cite[Theorem 2.5]{Pe16a} (see also the related earlier result \cite[Theorem 11]{El11Pa}), this implies weak lower semicontinuity of $J\mapsto \iint_{\Omega\times\Omega} W_y(x,\tilde x,J(x),J(\tilde{x}))\,dx\,d\tilde x$ in $L^\alpha(\Omega)$, in particular for $\alpha:=\frac{p}{d}$. Again exploiting the weak continuity of the determinant, i.e., $J_k:=\det \nabla y_k\rightharpoonup J:=\det \nabla y$ weakly in $L^{p/d}$, we thus get that \begin{align}\label{wlscD-2} \begin{aligned} \liminf_{k\to\infty}\iint_{\Omega\times\Omega} W_y(x,\tilde x,\det \nabla y_k(x),\det \nabla y_k(\tilde x))\,dx\,d\tilde x & \\ \quad \geq \iint_{\Omega\times\Omega} W_y(x,\tilde x,\det \nabla y(x),\det \nabla y(\tilde x))\,dx\,d\tilde x &= \ensuremath{\mathcal D}^{[\delta]}(y). \end{aligned} \end{align} Combining \eqref{wlscD-1} and \eqref{wlscD-2}, we see that $\ensuremath{\mathcal D}^{[\delta]}$ is weakly lower semicontinuous for each $\delta>0$. Since $\ensuremath{\mathcal D}_\Omega(y)=\sup_{\delta>0} \ensuremath{\mathcal D}^{[\delta]}(y)$, this implies weakly lower semicontinuity of~$\ensuremath{\mathcal D}_\Omega$. \end{proof} \begin{theorem}\label{thm:main} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$, be a bounded Lipschitz domain, and suppose that $q\geq 1$ and $s\in[0,1)$. In addition, suppose that~\eqref{eq:dprs} holds together with \begin{equation}\label{eq:sigma} s-\frac dq\le 1-\frac d\sigma. \end{equation} Then the functionals $E_{\ensuremath{\varepsilon}}$ $\Gamma$-converge to $E_{0}$ as $\ensuremath{\varepsilon}\searrow 0$, with respect to the weak topology of $W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$. \end{theorem} For the proof, we additionally need the following two propositions. \begin{proposition}[Finite $E_\ensuremath{\varepsilon}(y)$ implies \eqref{eq:CNC}]\label{prop:CNC} Suppose that $s,q\geq 0$ and \eqref{eq:dprs} holds, and let $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ such that $\ensuremath{\mathcal E}(y)<\infty$ and $\ensuremath{\mathcal D}(y)<\infty$. Then $y$ satisfies \eqref{eq:CNC}. \end{proposition} \begin{proof} The proof is indirect. Suppose that \eqref{eq:CNC} does not hold. By the area formula (cf.~Lemma~\ref{lem:transform}), this means that $Z_2:=\{z\in \ensuremath{\mathbb{R}}^d\mid N_y(z,\Omega)\geq 2\}$ has positive measure. As a consequence, $X_2:=y^{-1}(Z_2)$ also has positive measure, because $y$ satisfies Lusin's property (N) as a map in $W^{1,p}$ with $p>d$. In addition, we claim that $X_2$ is open. For a proof, take any $x\in X_2\neq \emptyset$. By definition of $X_2$, there exists another point $\tilde{x}\in X_2\setminus \{x\}$ such that $y(x)=y(\tilde{x})$. If we choose disjoint open neighborhoods $U,\tilde{U}\subset \Omega$ of $x,\tilde{x}$, respectively, then $y(U)$ and $y(\tilde{U})$ are open sets because $y$ is open by Proposition~\ref{prop:homeo}. Hence, their intersection $y(U)\cap y(\tilde{U})\subset Z_2$ is also open, and it contains $y(x)=y(\tilde{x})$. By continuity of $y$, we conclude that $y^{-1}(y(U)\cap y(\tilde{U}))$ is now an open subset of $X_2$ containing $x$ (and $\tilde{x}$). The above construction in particular shows that we can have two open, nonempty sets $V,W\subset X_2\subset \Omega$ with $x\in V\subset U\cap y^{-1}(y(U)\cap y(\tilde{U}))$ and $W:=\tilde{U}\cap y^{-1}(y(V))$ such that $\overline V\cap \overline W=\emptyset$ and $y(W)\subset y(V)$ are open. Hence, with \[ \delta:=\min\{ \nabs{x-\tilde{x}} \mid x\in \overline V,~\tilde{x}\in \overline W\}>0, \] we have that \[ \ensuremath{\mathcal D}(y)\geq \iint_{V\times W}\frac{{\delta}^{q}}{{\nabs{y(x)-y(\tilde{x})}}^{d+sq}} \nabs{\det\nabla y(x)}\nabs{\det\nabla y(\tilde{x})} \,\d x\d\tilde x. \] Changing variables in both integrals using Lemma~\ref{lem:transform}, also using that $N_y\geq 1$ on the image of $y$, we infer that \[ \ensuremath{\mathcal D}(y)\geq {\delta}^{q}\int_{y(W)}\int_{y(V)} \frac{1}{\nabs{\xi-\tilde{\xi}}^{d+sq}} \,\d\xi\d\tilde\xi. \] In the inner integral, for each $\tilde\xi\in y(W)$, there is always at least one singularity at $\xi=\tilde\xi\in y(W)\subset y(V)$, an interior point. Since $sq\geq 0$, this implies that $\ensuremath{\mathcal D}(y)=+\infty$, contradicting our assumption. \end{proof} \begin{proposition}\label{prop:distor} Let $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ be a homeomorphism $\Omega\to y(\Omega)$ with $y^{-1}\in W^{1,\sigma}(y(\Omega),\Omega)$, $q\in[1,\infty)$, $s\in[0,1)$. In addition, suppose that \eqref{eq:dprs} and~\eqref{eq:sigma} hold. If $\Omega'$ is open and $\Omega'\subset\subset\Omega$ then $\ensuremath{\mathcal D}_{\Omega'}(y|_{\Omega'})<\infty$. \end{proposition} \begin{proof} We apply Lemma~\ref{lem:transform} twice to change variables in each of the two integrals in $\mathcal D_{\Omega'}$, with $E=\Omega'$. First, change variables in the inner integral, say, over $x$, using $z=\xi$ and $f(z)={\nabs{y^{-1}(z)-\tilde{x}}^{q}}{\nabs{z-y(\tilde{x})}^{-(d+sq)}}$ for any fixed $\tilde{x}\in \Omega'$. Afterwards, use Fubini's theorem to change the order of integration and change variables in the integral over $\tilde{x}$, now for any fixed $\xi$ using $z=\tilde{\xi}$ and $f(z)={\nabs{y^{-1}(\xi)-y^{-1}(z)}^{q}}{\nabs{\xi-z}^{-(d+sq)}}$. We thus obtain that \begin{align*} \mathcal D_{\Omega'}(y|_{\Omega'}) &=\iint_{y(\Omega')\times y(\Omega')}\frac{\nabs{y^{-1}(\xi)-y^{-1}(\tilde \xi)}^{q}}{\nabs{\xi-\tilde\xi}^{d+sq}} \d\xi\d\tilde\xi. \end{align*} The right-hand side is just the $q$-th power of the seminorm belonging to the Sobolev--Slobodecki\u\i\ space $W^{s,q}(y(\Omega'),\ensuremath{\mathbb{R}}^{d})$. As $\overline{y(\Omega')}\subset y(\Omega)$ we may find $\psi\in C^{\infty}(\ensuremath{\mathbb{R}}^{d})$ supported in $y(\Omega)$ with $\psi=1$ on $y(\Omega')$. Choosing any regular value $r\in(0,1)$, the set $\psi^{-1}((r,1])$ has a smooth boundary. We denote its component containing $y(\Omega')$ by~$\Upsilon$. In case $s\in(0,1)$, using $y(\Omega')\subset\Upsilon\subset y(\Omega)$ and applying the embedding theorem, we infer \begin{align*} \mathcal D_{\Omega'}(y|_{\Omega'}) &\le \sq{y^{-1}|_{y(\Omega')}}_{W^{s,q}(y(\Omega'),\ensuremath{\mathbb{R}}^{d})}^{q} \le \sq{y^{-1}|_{\Upsilon}}_{W^{s,q}(\Upsilon,\ensuremath{\mathbb{R}}^{d})}^{q} \\ &\le C_{d,q,\sigma,\Upsilon}\norm{y^{-1}|_{\Upsilon}}_{W^{1,\sigma}(\Upsilon,\ensuremath{\mathbb{R}}^{d})}^{q} \le C_{d,q,\sigma,\Upsilon}\norm{y^{-1}|_{y(\Omega)}}_{W^{1,\sigma}(\Omega,\ensuremath{\mathbb{R}}^{d})}^{q}. \end{align*} The case $s=0$ is similar; in the intermediate step above, we now use $W^{\tilde{s},q}$ with some $\tilde{s}>0$ small enough so that $W^{1,\sigma}$ still embeds into $W^{\tilde{s},q}$ since $\sigma> \frac{dq} {q+d}$. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:main}] \emph{Lower bound ($\mathit\Gamma$-lim\,inf-inequality):} Assume that $y_{k}\rightharpoonup y$ in $W^{1,p}$ and $\ensuremath{\varepsilon}_{k}\searrow 0$. If $\liminf_k E_{\ensuremath{\varepsilon}_k}(y_k)=+\infty$, there is nothing to show. Hence, passing to a suitable subsequence (not relabeled), we may assume that the $\liminf$ is a limit and $E_{\ensuremath{\varepsilon}_k}(y_k)$ is bounded. Since $\ensuremath{\mathcal D}\ge0$ and $\ensuremath{\mathcal E}$ is weakly lower semicontinuity, we get that \begin{align}\label{thmmain-wlsc} \lim_k E_{\ensuremath{\varepsilon}_k}(y_k)\geq \liminf_k \ensuremath{\mathcal E}(y_k)\geq \ensuremath{\mathcal E}(y). \end{align} Moreover, by Proposition~\ref{prop:CNC}, we see that $y_k$ satisfies~\eqref{eq:CNC} for all $k$, and by Lemma~\ref{lem:stable}, this implies that $y$ satisfies~\eqref{eq:CNC}. Hence, $\ensuremath{\mathcal E}(y)=E_0(y)$, and \eqref{thmmain-wlsc} thus implies the asserted lower bound. \emph{Upper bound (construction of a recovery sequence):} Let $y\in W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$ be given. We may assume that $E_0(y)<\infty$, because otherwise there is nothing to show. We therefore have that $y\in W_{+}^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$, $y$ satisfies \eqref{eq:CNC} and $\ensuremath{\mathcal E}(y)<\infty$. By Proposition~\ref{prop:homeo}, the map $y:\Omega\to y(\Omega)$ is a homeomorphism. We choose $j\in\ensuremath{\mathbb{N}}$ sufficiently large and shrink the domain $\Omega$ to $\Omega_{j}=\Psi_j(\Omega)$, using Lemma~\ref{lem:shrinking}. Now define \[ y_{j}:\Omega\to y(\Omega_{j}), \qquad y_{j}:=y|_{\Omega_{j}}\circ \Psi_j. \] As $j\to \infty$, $y_{j}\to y$ in $W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)$ and $\ensuremath{\mathcal E}(y_{j})\to\ensuremath{\mathcal E}(y)$ by Lemma~\ref{lem:shrinkcont}. Here, concerning the term $(\det\nabla y)^{-r}$ in $\ensuremath{\mathcal E}$, notice that by the chain rule and the muliplicativity of the determinant, we have that \[ \frac{1}{(\det\nabla y_j)^{r}} =(f\circ\Psi_j) \frac{1}{(\det\nabla \Psi_j)^{r}},\quad\text{with}~~~ f:=\frac{1}{(\det\nabla y)^{r}}\in L^1(\Omega). \] Combined with the fact that $\det\nabla \Psi_j\to 1$ uniformly, Lemma~\ref{lem:shrinkcont} can therefore indeed be applied with $k=0$ and $p=1$ to obtain convergence of this singular term in $\ensuremath{\mathcal E}$. Since $\Omega_j\subset\subset \Omega$ for each $j$, we also have that \[ \ensuremath{\mathcal D}_{\Omega}(y_j)=\ensuremath{\mathcal D}_{\Omega_j}(y) <\infty \] by change of variables and Proposition~\ref{prop:distor}. Now let $\ensuremath{\varepsilon}_{k}\searrow0$ be given. We choose $j_{k}\to\infty$ such that $\ensuremath{\varepsilon}_{k}\ensuremath{\mathcal D}(y_{j_{k}})\xrightarrow{k\to\infty}0$. So $E_{\ensuremath{\varepsilon}_{k}}(y_{j_{k}}) = \ensuremath{\mathcal E}(y_{j_{k}}) + \ensuremath{\varepsilon}_{k}\ensuremath{\mathcal D}(y_{j_{k}})\xrightarrow{k\to\infty} \ensuremath{\mathcal E}(y) = E_{0}(y)$. \end{proof} \subsection{Bulk self-repulsion near the boundary}\label{ssec:mainbulkboundary} Here, we consider $\ensuremath{\mathcal D} := \ensuremath{\mathcal D}_{U_{\delta}}$ where, for any $\delta>0$, the set $U_{\delta}\subset\Omega$ can be chosen as any open neighborhood of $\partial\Omega$ which is at least $\delta$-thick in the sense that \begin{align}\label{eq:delta-thick} U_{\delta}\supset \br{\partial\Omega}^{(\delta)} = \sett{x\in\Omega}{\dist(x,\partial\Omega)<\delta}. \end{align} For the ease of notation, we abbreviate $\ensuremath{\mathcal D}_{\delta} := \ensuremath{\mathcal D}_{U_{\delta}}$ so that \[ \ensuremath{\mathcal D}(y) = \ensuremath{\mathcal D}_{\delta}(y) = \iint_{U_{\delta}\times U_{\delta}}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d +sq}}\abs{\det \nabla y(x)}\abs{\det \nabla y(\tilde x)} \d x\d\tilde x \] for $y\in W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$, $q\in[1,\infty)$, $s\in[0,1)$. The combined energy $E_\ensuremath{\varepsilon}$ now also depends on the choice of $U_\delta$, and to make this more visible, we will now write \[ E_{\ensuremath{\varepsilon},\delta} (y) = \begin{cases} \ensuremath{\mathcal E}(y) + \ensuremath{\varepsilon} \ensuremath{\mathcal D}_\delta(y) & \text{if }y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d}), \\ +\infty &\text{else}. \end{cases} \] We will see that $E_0$ is still the correct limit functional independently of $\delta$. In fact, we can even allow the simultaneous limit as $(\ensuremath{\varepsilon},\delta)\to (0,0)$. \begin{remark} The fact that the limit as $\delta\to 0^+$ is admissible offers an attractive choice of $U_\delta$ for numerical purposes: a single boundary layer of the triangulation, which requires $\delta$ of the order of the grid size $h$. In that case, the cost of a numerical evaluation of $\ensuremath{\mathcal D}_\delta$ scales like $h^{-2(d-1)}$ (like a double integral on the surface), which is much cheaper than for $\ensuremath{\mathcal D}_\Omega$ which scales like $h^{-2d}$. \end{remark} As before, for fixed $\ensuremath{\varepsilon},\delta>0$, the functional $E_{\ensuremath{\varepsilon},\delta}$ is well suited for minimization by the direct method: \begin{proposition}\label{prop:lscD2} For $p>d$, the functional $\ensuremath{\mathcal D}_\delta:W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)\to [0,\infty]$ is lower semicontinuous with respect to weak convergence in $W^{1,p}$. \end{proposition} \begin{proof} This is Proposition~\ref{prop:lscD1} with $\Omega$ replaced by $U_\delta$. Here, notice that no boundary regularity of $U_\delta$ is required: If needed, we can cover $U_\delta$ from inside with open domains with smooth boundary, and since the integrand of $\ensuremath{\mathcal D}_\delta$ is nonnegative, we can therefore write $\ensuremath{\mathcal D}_\delta$ as a supremum of weakly lower semicontinuous funtionals using the smooth smaller domains as domain of integration. \end{proof} \begin{theorem}\label{thm:main2} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain such that $\ensuremath{\mathbb{R}}^d\setminus \partial\Omega$ has exactly two connected components, $q\in[1,\infty)$, $s\in[0,1)$. In addition, suppose that \eqref{eq:dprs} and \eqref{eq:sigma} hold. Then for any $\delta_0\in [0,\infty]$, the functionals $E_{\ensuremath{\varepsilon},\delta}$ $\Gamma$-converge to $E_{0}$ as $(\ensuremath{\varepsilon},\delta)\to (0,\delta_0)$ ($\ensuremath{\varepsilon},\delta>0$), with respect to the weak topology of $W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$. \end{theorem} For the proof, we additionally need the following modification of Proposition~\ref{prop:CNC}. \begin{proposition}[Finite $E_{\ensuremath{\varepsilon},\delta}(y)$ implies \eqref{eq:CNC}]\label{prop:CNC2} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain such that $\ensuremath{\mathbb{R}}^d\setminus \partial\Omega$ has exactly two connected components, $q\in[1,\infty)$, $s\in[0,1)$. In addition, suppose that \eqref{eq:dprs} holds. Moreover, let $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ such that $\ensuremath{\mathcal E}(y)<\infty$ and $\ensuremath{\mathcal D}_\delta(y)<\infty$. Then $y$ satisfies \eqref{eq:CNC} on $\Omega$. \end{proposition} \begin{proof} Analogously to Proposition~\ref{prop:CNC}, we infer that $y$ satisfies \eqref{eq:CNC} on $U_\delta$. By Proposition~\ref{prop:homeo}, we obtain that $y:U_\delta\to y(U_\delta)$ is a homeomorphism. Here, notice that for this conclusion, we do not need any regularity of the boundary of $U_\delta$, since it is enough to apply Proposition~\ref{prop:homeo} with $\Omega$ replaced by subdomains of $U_\delta$ with smooth boundary, and a sequence of such subdomains covers $U_\delta$ from the inside. Such an inner covering can also be used for $\Omega$: Choose open $\Omega_{j}\nearrow\Omega$ such that $\partial\Omega_{j}$ is smooth, say, Lipschitz. In addition, using that $\partial\Omega$ itself is also Lipschitz, we can make sure that as for $\Omega$, we have that $\ensuremath{\mathbb{R}}^d\setminus \partial\Omega_j$ has exactly two connected components. For any fixed $\delta$, there exists a sufficiently large $j$ such that $\partial\Omega_{j}$ is contained in the open $\delta$-neighborhood $(\partial\Omega)^{(\delta)}$ of $\partial\Omega$, and therefore $\partial\Omega_{j}\subset U_\delta$. Consequently, $y|_{\partial\Omega_j}$ is injective, which implies that $y\in \operatorname{AIB}(\Omega_j)$ in the sense of \cite[Def.~2.1 and 2.2]{Kroe20a}. In addition, we know that $y\in W_+^{1,p}(\Omega_j;\ensuremath{\mathbb{R}}^d)$. By \cite[Thm.~6.1 and Rem.~6.3]{Kroe20a}, we infer that $y$ satisfies \eqref{eq:CNC} on $\Omega_j$. As the latter holds for all $j$, we conclude that $y$ satisfies \eqref{eq:CNC} on $\Omega$ by monotone convergence. \end{proof} \begin{remark} The proof of Proposition~\ref{prop:CNC2} exploits that $y$ is a homeomorphism near the boundary, which we obtain from Proposition~\ref{prop:homeo}. This forces the relatively restrictive assumptions on $p$ and $r$. While this may be technical to some degree, some restrictions are definitely needed. In fact, by itself, \eqref{eq:CNC} on a boundary strip like $U_\delta$ is not strong enough to provide the necessary global topological information: If one can squeeze surfaces to points with a deformation of finite elastic energy (possible if $p$ and $r$ are small enough), then self-penetration on $U_\delta$ is indeed possible for a $y\in W_+^{1,p}$ which is injective on $U_\delta$ outside a set of dimension $d-1$. Such a set of $d$-dimensional measure zero is invisible to \eqref{eq:CNC}. \end{remark} \begin{proof}[Proof of Theorem~\ref{thm:main2}] \emph{Lower bound ($\mathit\Gamma$-lim\,inf-inequality):} This is completely analogous to the proof of Theorem~\ref{thm:main}, using Proposition~\ref{prop:CNC2} instead of Proposition~\ref{prop:CNC}. \emph{Upper bound (construction of a recovery sequence):} Again, we can follow the proof of Theorem~\ref{thm:main} step by step, using the domain shrinking maps $\Psi_j$ to define $y_j:=y\circ \Psi_j$ as before. In particular, changing variables we now observe that \[ \ensuremath{\mathcal D}_{\delta}(y_j)=\ensuremath{\mathcal D}_{\Psi_j(U_\delta)}(y) \leq \ensuremath{\mathcal D}_{\Omega_j}(y) <\infty \] by Proposition~\ref{prop:distor}, for any fixed $j$. Given $(\ensuremath{\varepsilon}_k,\delta_k)\to (0,\delta_0)$, we thus again get a suitable recovery sequence given by $(y_{j(k)})$ as long as $j(k)\to\infty$ slow enough so that $\ensuremath{\varepsilon}_k \ensuremath{\mathcal D}_{\delta_k}(y_{j(k)})\to 0$. \end{proof} \subsection{Surface self-repulsion}\label{ssec:mainsurface} Here we look at $\ensuremath{\mathcal D}:=\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}$ where \[ \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y) = \iint_{\partial\Omega\times\partial\Omega}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d-1 +sq}} \d A(x)\d A(\tilde x), \qquad q\in[1,\infty), s\in[0,1), \] and $A(\cdot)$ denotes the $(d-1)$-dimensional Hausdorff measure. Again, this is a term well-suited for minimization via the direct method: \begin{proposition}\label{prop:lscD3} For $p>d$, the functional $\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}:W^{1,p}(\Omega;\ensuremath{\mathbb{R}}^d)\to [0,\infty]$ is lower semicontinuous with respect to weak convergence in $W^{1,p}$. \end{proposition} \begin{proof} As the trace of $y\in W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ on $\partial\Omega$ is compactly embedded in~$L^{1}$, we obtain pointwise a.e.\ convergence of the integrand. So the claim immediately follows from Fatou's Lemma. \end{proof} We will employ results of~\cite{Kroe20a} which require that $\Omega$ does not have ``holes'' as made precise in the following statement. \begin{theorem}\label{thm:main3} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain such that $\ensuremath{\mathbb{R}}^{d}\setminus\partial\Omega$ has exactly two connected components. Given~\eqref{eq:dprs}, we require $q\geq 1$ and $s\in[0,1]$ to be chosen such that \begin{equation}\label{eq:sigma3} q \br{(1-s)\sigma-d}>d^2-\sigma \end{equation} and \begin{equation}\label{eq:self-rep} sq\ge (d-1)\frac{p+d}{p-d}. \end{equation} Then the functionals $E_{\ensuremath{\varepsilon}}$ $\Gamma$-converge to $E_{0}$ as $\ensuremath{\varepsilon}\searrow 0$, with respect to the weak topology of $W^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$. \end{theorem} \begin{remark} With $\sigma=\sigma(r,p,d)>d$ as defined in \eqref{eq:dprs}, the conditions~\eqref{eq:sigma3} and~\eqref{eq:self-rep} are met if $0< s<1-\frac d\sigma$ and $q>\max\left\{\frac{d^2-\sigma}{(1-s)\sigma-d},\frac{d-1}s\cdot\frac{p+d}{p-d}\right\}$. \end{remark} For the proof, we additionally need the following two propositions. \begin{proposition}[Finite $E_\ensuremath{\varepsilon}(y)$ implies \eqref{eq:CNC}]\label{prop:CNC3} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain such that $\ensuremath{\mathbb{R}}^{d}\setminus\partial\Omega$ has exactly two connected components. Suppose that~\eqref{eq:dprs} and~\eqref{eq:self-rep} hold. Moreover, let $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ such that $\ensuremath{\mathcal E}(y)<\infty$ and $\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y)<\infty$. Then $y$ satisfies~\eqref{eq:CNC}. \end{proposition} \begin{proof} According to~\cite[Cor.~6.5]{Kroe20a} it is enough to show injectivity of $y|_{\partial\Omega}$. If the latter is not the case, we may choose $x_{0},\tilde x_{0}\in\partial\Omega$, $x_{0}\ne\tilde x_{0}$, such that $y(x_{0})=y(\tilde x_{0})$. Recalling that $y\in C^{0,\alpha}(\overline\Omega,\ensuremath{\mathbb{R}}^{d})$, $\alpha=1-\frac dp$, and abbreviating $\ensuremath{\varepsilon}=\tfrac13\abs{x_{0}-\tilde x_{0}}$, $t=d-1+sq$, we infer \begin{align*} \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y) &=\iint_{\partial\Omega\times\partial\Omega}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{t}} \d A(x)\d A(\tilde x) \\ &\ge\iint_{(\partial\Omega\cap B_{\ensuremath{\varepsilon}}(x_{0}))\times(\partial\Omega\cap B_{\ensuremath{\varepsilon}}(\tilde x_{0}))}\frac{\abs{x-\tilde x}^{q}}{\br{\abs{y(x)-y(x_{0})}+\abs{y(\tilde x_{0})-y(\tilde x)}}^{t}} \d A(x)\d A(\tilde x) \\ &\ge c_{\alpha}\iint_{(\partial\Omega\cap B_{\ensuremath{\varepsilon}}(x_{0}))\times(\partial\Omega\cap B_{\ensuremath{\varepsilon}}(\tilde x_{0}))}\frac{\br{\frac\eps3}^{q}}{\br{\abs{x-x_{0}}^{\alpha}+\abs{\tilde x_{0}-\tilde x}^{\alpha}}^{t}} \d A(x)\d A(\tilde x). \end{align*} Introducing local bi-Lipschitz charts $\Phi:V\to\partial\Omega\cap B_{\ensuremath{\varepsilon}}(x_{0})$, $\tilde\Phi:\tilde V\to\partial\Omega\cap B_{\ensuremath{\varepsilon}}(\tilde x_{0}))$ where $V,\tilde V\subset\ensuremath{\mathbb{R}}^{d-1}$ are open sets and $\Phi(0)=x_{0}$, $\tilde\Phi(0)=\tilde x_{0}$, we arrive at \begin{align*} \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y) &\ge c_{\alpha,\ensuremath{\varepsilon},q,t}\iint_{V\times\tilde V}\frac{\sqrt{\det D\Phi(\xi)^{\top}D\Phi(\xi)}\sqrt{\det D\tilde\Phi(\tilde\xi)^{\top}D\tilde\Phi(\tilde\xi)}}{\br{\abs{\Phi(\xi)-\Phi(0)}^{2}+\abs{\tilde\Phi(\tilde\xi)-\tilde\Phi(0)}^{2}}^{\alpha t/2}} \d\xi\d\tilde\xi \\ &\ge c_{\alpha,\ensuremath{\varepsilon},q,t,\Phi}\iint_{V\times\tilde V}\frac{\d\xi\d\tilde\xi}{\br{\abs{\xi}^{2}+\abs{\tilde\xi}^{2}}^{\alpha t/2}}. \end{align*} By assumption, both $V$ and $\tilde V$ contain $B_{\delta}(0)\subset\ensuremath{\mathbb{R}}^{d-1}$ for some $\delta>0$. Decomposing $(\xi^{\top},\tilde\xi^{\top})=\rho\eta^{\top}\in\ensuremath{\mathbb{R}}^{2d-2}$ where $\rho>0$, $\eta\in\mathbb S^{2d-3}$, yields \begin{align*} \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y) &\ge c_{\alpha,\ensuremath{\varepsilon},q,t,\Phi,d}\int_{0}^{\delta}\frac{\rho^{2d-3}}{\rho^{\alpha t}}\d\rho. \end{align*} This term is infinite provided $2d-3-\alpha t\le-1$ which is equivalent to~\eqref{eq:self-rep}. \end{proof} \begin{proposition}\label{prop:distor3} Let $\Omega\subset \ensuremath{\mathbb{R}}^d$ be a bounded Lipschitz domain. Assume that $y\in W_{+}^{1,p}(\Omega,\ensuremath{\mathbb{R}}^{d})$ is a homeomorphism $\Omega\to y(\Omega)$ with $y^{-1}\in W^{1,\sigma}(y(\Omega),\Omega)$ for which~\eqref{eq:sigma3} applies. If $\Omega'$ is open and $\Omega'\subset\subset\Omega$ and $\partial\Omega'$ is Lipschitz, then $\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega'}(y)<\infty$. \end{proposition} \begin{proof} First notice that $y(\Omega)$ is open and bounded in $\ensuremath{\mathbb{R}}^{d}$, the former by invariance of domain (see \cite[Theorem 3.30]{FoGa95B}, e.g.) and the latter due to the fact that $y\in C(\overline{\Omega};\ensuremath{\mathbb{R}}^{d})$ by embedding. Hence, $y(\overline{\Omega'})$ is a compact and connected subset of $y(\Omega)$ with positive distance to $\partial [y(\Omega)]$. We choose a domain $\Lambda\subset \ensuremath{\mathbb{R}}^{d}$ with smooth boundary such that $y(\Omega')\subset\subset \Lambda \subset\subset y(\Omega)$. By embedding, $y^{-1}\in C^{0,\beta}(\Lambda,\ensuremath{\mathbb{R}}^{d})$, $\beta=1-\frac d\sigma$. Abbreviating $t=d-1+sq$, we arrive at \begin{align*} \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega'}(y) &=\iint_{\partial\Omega'\times\partial\Omega'}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{t}} \d A(x)\d A(\tilde x) \\ &\le C_{\beta}\iint_{\partial\Omega'\times\partial\Omega'}\abs{x-\tilde x}^{q-t/\beta} \d A(x)\d A(\tilde x). \end{align*} The term $\abs{x-\tilde x}$ is bounded above since $\Omega$ is bounded. It approaches zero only in a neighborhood of the diagonal. In order to show that $\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega'}(y)$ is finite we only have to consider $\iint_{\Phi(V)\times\Phi(V)}\abs{x-\tilde x}^{q-t/\beta} \d A(x)\d A(\tilde x)$ where $\Phi:V\to U\subset\partial\Omega'$ is a chart and $V\subset B_{R}(0)\subset\ensuremath{\mathbb{R}}^{d-1}$ is an open set. Decomposing $\xi=\rho\eta\in\ensuremath{\mathbb{R}}^{d-1}$ where $\rho>0$, $\eta\in\mathbb S^{d-2}$, yields \begin{align*} &\iint_{\Phi(V)\times\Phi(V)}\abs{x-\tilde x}^{q-t/\beta} \d A(x)\d A(\tilde x) \\ &=\iint_{V\times V}\abs{\Phi(\xi)-\Phi(\tilde\xi)}^{q-t/\beta} {\sqrt{\det D\Phi(\xi)^{\top}D\Phi(\xi)}\sqrt{\det D\Phi(\tilde\xi)^{\top}D\Phi(\tilde\xi)}} \d\xi\d\tilde\xi \\ &\le C_{\Phi}\iint_{V\times V}\abs{\xi-\tilde\xi}^{q-t/\beta} \d\xi\d\tilde\xi \\ &\le C_{\Phi}\int_{B_{R}(0)}\int_{B_{R}(0)}\abs{\xi-\tilde\xi}^{q-t/\beta} \d\xi\d\tilde\xi \\ &\le C_{\Phi}\int_{B_{3R}(0)}\int_{B_{R}(\tilde\xi)}\abs{\xi}^{q-t/\beta} \d\xi\d\tilde\xi \\ &\le C_{\Phi,d,R}\int_{0}^{R}\rho^{d-1+q-t/\beta}\d\rho. \end{align*} The right-hand side is finite if $d-1+q-t/\beta>-1$ which is equivalent to~\eqref{eq:sigma3}. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:main3}] We proceed as in the proof of Theorem~\ref{thm:main}. For the \emph{lower bound} we use Proposition~\ref{prop:CNC3} in place of Proposition~\ref{prop:CNC}. To see that the \emph{recovery sequence} also works for $\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}$, we compute \begin{align*} \ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y_{j}) &=\ensuremath{\widetilde{\mathcal D}}_{\partial\Omega}(y|_{\Omega_{j}}\circ \Psi_j) \\ &=\iint_{\partial\Omega\times\partial\Omega}\frac{\abs{\xi-\tilde\xi}^{q}}{\abs{y(\Psi_{j}(\xi))-y(\Psi_{j}(\tilde\xi))}^{d-1+sq}} \d A(\xi)\d A(\tilde\xi) \\ &\le C_{\Psi_{j}}\iint_{\partial\Omega_j\times\partial\Omega_j}\frac{\abs{\Psi_{j}^{-1}(x)-\Psi_{j}^{-1}(\tilde x)}^{q}}{\abs{y(x)-y(\tilde x)}^{d-1+sq}} \d A(x)\d A(\tilde x) \\ &\le C_{\Psi_{j}}\norm{\nabla\Psi_{j}^{-1}}_{L^{\infty}}^{q}\iint_{\partial\Omega_j\times\partial\Omega_j}\frac{\abs{x-\tilde x}^{q}}{\abs{y(x)-y(\tilde x)}^{d-1+sq}} \d A(x)\d A(\tilde x) \end{align*} where $C_{\Psi_{j}}$ denotes a factor that bounds the terms arising from the change of variables. Now we deduce from Proposition~\ref{prop:distor3} (instead of Proposition~\ref{prop:distor}) that the right-hand side is finite. \end{proof} \subsection{Further generalizations and remarks}\label{ssec:general} \begin{remark}[More general elastic energies] It is easy to see that throughout, the integrand of $\ensuremath{\mathcal E}$ can be replaced by any polyconvex function admitting the original integrand as a lower bound (up to multiplicative and additive constants). Moreover, the latter is only exploited for the application of the theory of functions of bounded distortion in Proposition~\ref{prop:homeo}. More precisely, Theorems~\ref{thm:main}, \ref{thm:main2} and \ref{thm:main3} also hold for any elastic energy of the form \[ \ensuremath{\mathcal E}(y) = \int_{\Omega} W(\nabla y(x))\,\d x \] such that \begin{enumerate} \item[(i)] $W:\ensuremath{\mathbb{R}}^{d\times d}\to (-\infty,+\infty]$ is continuous and polyconvex, \item[(ii)] $W(F)\geq c |F|^p-C$ for all $F\in \ensuremath{\mathbb{R}}^{d\times d}$, where $p>d$, \item[(iii)] $W(F)\geq c \left(\frac{|F|^{d}}{\det F}\right)^\beta-C$ for all $F\in \ensuremath{\mathbb{R}}^{d\times d}$ with $\det F>0$, where $\beta>d-1$. \end{enumerate} Here, $p>d,\beta>d-1$, $c>0$, and $C\in\ensuremath{\mathbb{R}}$ are constants. Notice that (iii) directly provides the bound on the outer distortion we need to generalize Proposition~\ref{prop:homeo}. \end{remark} \begin{remark}[Boundary conditions and force terms] Due to the stability of $\Gamma$-convergence with respect to addition of continuous functionals, our main results continue to hold if $\ensuremath{\mathcal E}$ is modified by adding a term which is continuous in the weak topology of $W^{1,p}$ (typically either linear or lower order, exploiting a compact embedding). This includes many classical force potentials for body forces and surface tractions. Additional boundary conditions, say, a Dirichlet condition of the form $y=y_0$ on a part $\Lambda$ of $\partial\Omega$, are in principle also possible but not trivial to add, as they require modified recovery sequences in the proof of the theorems. In particular, we would need a suitable modification of Lemma~\ref{lem:shrinking} which keeps the Dirichlet part of the boundary fixed, as well as additional assumptions on $y_0$ which at the very least should map $\overline{\Lambda}$ to a reasonably smooth set out of self-contact. The easiest way to set up a meaningful model with full coercivity in $W^{1,p}$ which is compatible with our theorems is to confine the deformed material to a box by constraint ($y(\Omega)\subset \mathcal{B}$ for a given compact $\mathcal{B}\subset \ensuremath{\mathbb{R}}^d$ with non-empty interior). \end{remark} \begin{remark}[More general nonlocal self-repulsive terms] It is clear that our general proof strategy can also be applied to other nonlocal terms $\ensuremath{\mathcal D}$. The only key features of such a term $\ensuremath{\mathcal D}$ are the following: \begin{itemize} \item[(i)] for any deformation $y$ with finite elastic energy $\ensuremath{\mathcal E}(y)$, finite $\ensuremath{\mathcal D}(y)$ implies \eqref{eq:CNC} (cf.~Propositions~\ref{prop:CNC}, \ref{prop:CNC2} and \ref{prop:CNC3}); \item[(ii)] for any homeormorphisms $y\in W^{1,p}_+$ ($p>d$) whose inverse has the Sobolev regularity $W^{1,\sigma}$ ($\sigma>d$) obtained from the control of its distortion through the elastic energy (see Proposition~\ref{prop:homeo}), we obtain $\ensuremath{\mathcal D}(y)<\infty$, at least if we move to a slightly smaller domain $\Omega'\subset\subset \Omega$ (cf.~Propositions~\ref{prop:distor} and \ref{prop:distor3}). \end{itemize} Moreover, following \cite{KroeVa19a,KroeVa22Pa}, it is in principle possible to work with an everywhere finite $\ensuremath{\mathcal D}_\ensuremath{\varepsilon}$ instead of $\ensuremath{\varepsilon} \ensuremath{\mathcal D}$ (say, a suitable truncation of the latter), if we restrict ourselves to deformations satisfying a fixed energy bound. Here, the basic idea is to find at least one deformation $y_0$ so that $e_0:=\ensuremath{\mathcal E}(y_0)+\sup_{0<\ensuremath{\varepsilon}\leq 1}\ensuremath{\mathcal D}_\ensuremath{\varepsilon}(y_0)<\infty$, for instance the identity or another map far from self-contact. Then check if (i) still holds in such a case if we replace the assumption $\ensuremath{\mathcal D}(y)<\infty$ by $\ensuremath{\mathcal E}(y)+\ensuremath{\mathcal D}_\ensuremath{\varepsilon}(y)\leq e_0$ (for sufficiently small $\ensuremath{\varepsilon}$ independently of $y$). \end{remark} \begin{remark}[Mosco-covergence and recovery by homeomorphisms] Our proofs of Theorems~\ref{thm:main}, \ref{thm:main2} and \ref{thm:main3} actually provide more than $\Gamma$-convergence: The recovery sequence we construct always converges strongly in $W^{1,p}$, which means that we actually proved so called Mosco-convergence. Moreover, as constructed, each member of the recovery sequence is a homeomorphism on $\overline{\Omega}$. In particular, any admissible $y$ with finite $E_0(y)$ is always contained in the $C^0$-closure of these homeomorphisms, i.e., $y\in AI(\overline{\Omega})$ in the notation of \cite{Kroe20a}. Our results here therefore also show that within $W^{1,p}_+(\Omega;\ensuremath{\mathbb{R}}^d)$ with $p>d$, $AI(\overline{\Omega})$ coincides the class of maps satisfying $\eqref{eq:CNC}$ if we also impose strong enough a-priori bounds on the outer distortion to apply the result of Villamor and Manfredi as in Proposition~\ref{prop:homeo}. The general case is still not clear, cf.~\cite[Remark 2.19]{Kroe20a}. \end{remark} \subsection*{Acknowledgements}The work of S.K.~was supported by the GA \v{C}R-FWF grant 19-29646L. Major parts of this research were carried out during mutual research visits of S.K. at the Chemnitz University of Technology and of Ph.~R. at \'{U}TIA, whose hospitality is gratefully acknowledged.
1,116,691,501,387
arxiv
\section{introduction} \label{Sec : Introduction} In recent years the paradigm based on entanglement~\cite{Horodecki} as the unique genuine measure of quantum correlations has been challenged by the argument that the notion of nonseparability may be insufficient to encompass all correlations that can be fairly regarded as quantum, or nonclassical. This has given spur to the development of conceptually new correlation measures, such as quantum discord~\cite{Ollivier,Vedral,DiscordRev}, based on local measurements and able to reveal quantum correlations that are present even in separable states. These correlations can be interpreted as an extra amount of information that only coherent operations can unlock~\cite{Gu}. In fact, there are several indications suggesting that general quantum correlations might be exploited in quantum protocols~\cite{Datta2}, including mixed state quantum computation~\cite{Datta} and remote state preparation~\cite{RSP}. Therefore, a more complete theoretical and experimental investigation thereof is now a central issue in quantum science and technology \cite{Fer12}. \par The definition of discord involves an optimization over all possible local measurements in a bipartion, the optimal measurement leading to a minimal value of quantum discord. To perform the optimization is remarkably difficult, which hampers analytical progress in the area. This fact has led to the definition of other correlation measures which are conceptually similar but easier to compute, such as the geometric discord~\cite{Dakic}. In the realm of finite-dimensional systems, where the concept of discord was first introduced, analytic results for quantum (geometric) discord have been obtained for pairs of qubits when the global state is in X form (in arbitrary form)~\cite{AnalyticDiscord,Dakic}. \par In the realm of continuous variable (CV) systems, initial research efforts on quantum discord have focused on Gaussian measurements. The Gaussian quantum discord, proposed in~\cite{GiordaGaussDiscord,AdessoDatta}, is defined by restricting the minimization involved in the definition of discord to the set of Gaussian POVMs~\cite{GauuPOVM} and it can be analytically computed for Gaussian states. Its behavior in noisy channels has been studied in Ref.~\cite{Vasile} - where it was shown that it is more robust than entanglement to the decorrelating effect of independent baths and more likely to yield non-zero asympotic values in the case of a common bath - while its relation to the synchronization properties of detuned, correlated oscillators has been analysed in Ref.~\cite{Zambrini}. \par It is natural to investigate CV quantum discord beyond Gaussian measurements: non-Gaussian ones may indeed allow for a stronger minimization of discord, and in this case the Gaussian discord would be an overestimation of the true discord. Here we focus on Gaussian states and ask whether Gaussian measurements are optimal in this case, i.e., \textit{whether the Gaussian discord is the true discord for Gaussian states}. This question is relevant for two main reasons: On one hand, if discord is a truly useful resource for quantum information protocols~\cite{Gu,Datta2}, then it is crucial to have a reliable estimate of its actual value. On the other hand, from a fundamental point of view it is important to establish how different kinds of measurements can affect correlations in quantum states. A further motivation comes from the fact that indeed for some non-Gaussian states e.g., CV Werner states, non-Gaussian measurements such as photon counting has been proven to lead to a better minimization~\cite{NonGausDisc}. \par The optimality of Gaussian measurements has already been proven analytically for two-mode Gaussian states having one vacuum normal mode~\cite{AdessoDatta}, by use of the so-called Koashi-Winter relation~\cite{Koashi}, but no analytic argument is available in the general case. We address the question numerically, for the case of two-modes, upon considering two large classes of Gaussian states, the squeezed thermal states (STS) and the mixed thermal states (MTS), and allowing for a range of experimentally feasible non-Gaussian measurements based on orthogonal bases: the number basis, the squeezed number basis, the displaced number basis. As a result, we provide evidence that Gaussian quantum discord is indeed optimal for the states under study. In addition, we also investigate the CV geometric discord~\cite{GaussianGeom}, comparing the case of Gaussian and non-Gaussian measurements. \par This work is structured as follows. In sec. \ref{Sec : correlations} we review quantum discord and the Gaussian version of it; in sec. \ref{Sec : NonGaussian correlations} we thoroughly describe the basic question we want to address in this work and introduce non-Gaussian measurements and non-Gaussian discord; in sec. \ref{Sec : number basis}, \ref{Sec : squeezed number basis}, \ref{Sec : displaced number basis}, we present our key results concerning non-Gaussian discord upon measurements in the number basis, squeezed number basis and displaced number basis; in sec.\ref{Sec : Geometric} we discuss the behavior of non-Gaussian geometric discord; finally, sec. \ref{Sec. : conclusions} closes the paper discussing our main conclusions. \section{Quantum discord and Gaussian discord} \label{Sec : correlations} Starting from the seminal works by Ollivier and Zurek~\cite{Ollivier} and Henderson and Vedral~\cite{Vedral}, various measures of quantum correlations which go beyond the traditional entanglement picture have been defined~\cite{DiscordRev}. The most common measure of such correlations is the \emph{quantum discord}~\cite{Ollivier,Vedral}. Let us consider a bipartite system composed of subsystems $A$ and $B$. The total correlations in the global state are measured by the mutual information $I(A:B) = S(\varrho_A) + S(\varrho_B) - S(\varrho_{AB})$. Whenever $I(A:B) > 0$, the subsystems are correlated and we can gain some information about $A$ by measurements on $B$ only. However, there is no unique way of locally probing the state of $B$: to do it, we can perform different local measurements, or POVMs. Any such local POVM $\Pi_B $ is specified by a set of positive operators $\{ \Pi_B^x = M_B^x M_B^{x \dag} \} $ on subsystem $B$ summing up to the identity, $\sum_x \Pi_B^x = \mathbb{I} $. When measurement result $x$ is obtained, the state of $A$ is projected onto $\varrho_A^x= \mbox{Tr}_B [ M_B^x \varrho_{AB} M_B^{x \dag}] $. The uncertainty on the state of $A$ before the measurement on $B$ is given by $S(\varrho_A)$, while the average uncertainty on the state of $A$ after the measurement is given by the average conditional entropy $ S^\Pi (A|B) = \sum_x p_x S(\varrho_A^x)$. Their difference \begin{equation*} S(\varrho_A) - S^\Pi (A|B) = S(\varrho_A) - \sum_x p_x S(\varrho_A^x) \end{equation*} represents the average gain of information about the state of $A$ acquired through a local measurement on $B$. The maximal gain of information that can be obtained with a POVM, \begin{eqnarray} C(A:B) = \max_{\{ \Pi \in POVM \}} [S(\varrho_A) - S^{\Pi}(A|B) ] = \label{Eq.: classicalcorr} \\ \nonumber =S(\varrho_A) - \mbox{min}_{\{\Pi \in POVM \}} [ S^{\Pi}(A|B) ] \end{eqnarray} coincides with the measure of \textit{classical correlations} originally derived in~\cite{Vedral} under some basic and natural requirements for such a measure. Quantum discord is then defined as the difference between the mutual information and the classical correlations: \begin{equation} D(A:B) = I(A:B) - C(A:B) \end{equation} and measures the part of the total correlations that cannot be exploited to gain information on $A$ by a local measurement on $B$, i.e., measures the additional quantum correlations beyond the classical ones. \par It can be verified (see e.g.~\cite{Dakic}) that the classical correlations coincide with the mutual information in the system after the measurement, maximized over all possible POVMs: \begin{eqnarray} C(A:B)= \mbox{max}_{\{\Pi \in POVM \}} I^\Pi(A:B) \end{eqnarray} where $I^\Pi(A:B) = S(\varrho_A^\Pi) + S(\varrho_B^\Pi) - S(\varrho_{AB}^\Pi)$ and the unconditional post-measurement states are given by $\varrho_{AB}^\Pi = \sum_x M_B^x \varrho_{AB} M_B^{x \dag} $, $\varrho_{A}^\Pi = \mbox{Tr}_B [\sum_x M_B^x \varrho_{AB} M_B^{x \dag}] $, $\varrho_{B}^\Pi = \mbox{Tr}_A [\sum_x M_B^x \varrho_{AB} M_B^{x \dag}] $. Therefore, the quantum discord coincides with the difference between the mutual information before and after the measurement, minimized over all possible POVMs: \begin{eqnarray} D(A:B)= \ \mbox{min}_{\{\Pi \in POVM \}} [I(A:B) - I^\Pi(A:B)] \end{eqnarray} From the prevoius considerations, it is clear that $D(A:B)=0$ if and only if there is a local measurement $\Pi_B$ which leaves the global state of the system unaffected: $\exists \Pi, \ \varrho_{AB} = \varrho_{AB}^\Pi$. Such states are called \textit{quantum-classical states} and are in the form \begin{equation} \chi_{AB} = \sum_i p_i \varrho_{A,i} \otimes \ket{i} \bra{i} \end{equation} where $p_i$ is a probability distribution and $ \{ \ket{i} \}$ is a basis for the Hilbert space of subsystem $B$. For such states, there exists at least one local measurement that leaves the state invariant and we have $I(A:B) = C(A:B)$, which means that we can obtain maximal information about subsystem $A$ by a local measurement on $B$ without altering the correlations with the rest of the system. \par In the realm of continuous-variable systems, the \emph{Gaussian discord}~\cite{GiordaGaussDiscord, AdessoDatta} is defined by restricting the set of possible measurements in Eq.~(\ref{Eq.: classicalcorr}) to the set of Gaussian POVMs~\cite{GauuPOVM}, and minimizing only over this set. The Gaussian discord can be analytically evaluated for two-mode Gaussian states, where one mode is probed through (single-mode) Gaussian POVMs. The latter can be written in general as $$\Pi_B(\eta) = \pi^{-1} D_B (\eta) \varrho_M D_B^\dag (\eta)$$ where $D_B (\eta) = \exp(\eta b^\dag- \eta^\ast b)$ is the displacement operator, and $\varrho_M$ is a single-mode Gaussian state with zero mean and covariance matrix $\sigma_M = \left(\begin{array}{cc} \alpha & \gamma \\ \gamma & \beta \end{array} \right) $. Two-mode Gaussian states can be characterized by their covariance matrix $ \sigma_{AB} = \left(\begin{array}{cc} A & C \\ C^T & B \end{array} \right) $. By means of local unitaries that preserve the Gaussian character of the state, i.e. local symplectic operations, $\sigma_{AB}$ may be brought to the so-called standard form, i.e. $A = \mbox{diag}(a, a)$, $B = \mbox{diag}(b, b)$, $C = \mbox{diag}(c_1 , c_2 )$. The quantities $I_1 = \det A$, $I_2 = \det B$, $I_3 = \det C$, $I_4 = \det \sigma_{AB} $ are left unchanged by the transformations, and are thus referred to as symplectic invariants. The local invariance of the discord has therefore two main consequences. On the one hand, correlation measures may be written in terms of symplectic invariants only. On the other hand, we can restrict to states with $\sigma$ already in the standard form. Before the measurement we have \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} S (\varrho_{AB}) = h (d_+) + h (d_- ) , \\ S(\varrho_A)=h(\sqrt{I_1}), \ S(\varrho_B)=h(\sqrt{I_2}) \eae where $h[x] = (x + 1/2) \log(x + 1/2) - (x - 1/2) \log(x - 1/2) $ and $d_{\pm}$ are the symplectic eigenvalues of $\varrho_{AB}$ expressed by $d_{\pm}^2 = 1/2 [ \Delta \pm \sqrt{\Delta^2 - 4 I_4}$, $\Delta = I_1 + I_2 + 2 I_3$. After the measurement, the (conditional) post-measurement state of mode $A$ is a Gaussian state with covariance matrix $\sigma_P$ that is independent of the measurement outcome and is given by the Schur complement $ \sigma_P = A - C(B + \sigma_M)^{-1} C^T $. The Gaussian discord is therefore expressed by \begin{eqnarray} D^{\mathcal{G}}(A:B) = h(\sqrt{I_2}) - h(d_{-}) - h(d_{+} ) \nonumber \\ + \mbox{min}_{\sigma_M} h( \det\sqrt{\sigma_P}) \end{eqnarray} where we use two key properties: i) the entropy of a Gaussian state depends only on the covariance matrix, and ii) the covariance matrix $\sigma_P$ of the conditional state does not depend on the outcome of the measurement. The minimization over $\sigma_M$ can be done analytically. For the relevant case of states with $C = \mbox{diag}(c , \pm c )$, including STS and MTS (see below), the minimum is obtained for $ \alpha = \beta = 1/2, \gamma = 0 $ i.e. when the covariance matrix of the measurement is the identity. This corresponds to the coherent state POVM, i.e. to the joint measurement of canonical operators, say position and momentum, which may be realized on the radiation field by means of heterodyne detection. For {\em separable} states the Gaussian discord grows with the total energy of the state and it is bounded, $D \leq 1$; furthermore, we have $D=0$ iff the Gaussian state is in product form $\varrho_{AB} = \varrho_A \otimes \varrho_B $. \section{non Gaussian discord} \label{Sec : NonGaussian correlations} In this work we consider Gaussian states, and ask whether non-Gaussian measurements can allow for a better extraction of information than Gaussian ones, hence leading to lower values of discord. \par The optimality of Gaussian measurements has been already proven for a special case~\cite{AdessoDatta}: that of two-mode Gaussian states having one vacuum normal mode. Indeed any bipartite state $\varrho_{AB}$ can be purified, $\varrho_{AB}$ $\Longrightarrow$ $|\psi\rangle_{ABC}$; then, the Koashi-Winter relation~\cite{Koashi}, \begin{equation} D(A:B)=E_f(A:C)+S(\varrho_B)-S(\varrho_{AB}) \label{Koashi} \end{equation} relates the quantum discord $D$ and the entanglement of formation $E_f$ of reduced states $\varrho_{AB}$ and $\varrho_{AC}$ respectively. Given a (mixed) two-mode Gaussian state $\varrho_{AB}$, there exists a Gaussian purification $|\psi\rangle_{ABC}$. In general, the purification of $\varrho_{AB}$ requires two additional modes, so that $\varrho_{AC}$ is a three-mode Gaussian state. In the special case when one normal mode is the vacuum, the purification requires one mode only. In this case, $\varrho_{AC}$ represents a two-mode Gaussian state and $E_f(A:C)$ can be evaluated~\cite{EOF}. From $E_f(A:C)$, by means of Eq. (\cite{Koashi}), one can obtain $D(A:B)$ (the exact discord) and a comparison with $D^{\mathcal{G}}(A:B)$ proves that $D(A:B)=D^{\mathcal{G}}(A:B)$. \par In the general case, there is no straightforward analytical way to prove that Gaussian discord is optimal. Therefore, we perform a numerical study. Since taking into account the most general set of non-Gaussian measurements is an extremely challenging task, one can rather focus on a restricted subset. We choose to focus on a class of measurements that are realizable with current or foreseable quantum optical technology. These are the the projective POVMs, $\Pi = \{ \Pi_n\}$, represented by the following orthogonal measurement bases: \begin{equation} \Pi_n=D(\alpha)S(r)\ketbra{n}{n}S(r)^\dagger D(\alpha)^\dagger, \,\,\,\, n=0,\cdots,\infty \label{Eq: class} \end{equation} where $S(r)=\exp{(-r^*\frac{a^2}{2}-r\frac{(a^\dagger)^2}{2})}$ and $D(\alpha)=\exp(\alpha a^\dag - \alpha^* a)$ are respectively the single-mode squeezing and displacement operators~\cite{DO}. The set of projectors in (\ref{Eq: class}) is a POVM for any fixed value of $\alpha$ and $r$. If $\alpha=r=0$ we have the spectral measure of the number operator, describing ideal photon counting $\Pi_n = \ket{n} \bra{n}$. If $\alpha > 0, r=0$ we are projecting onto displaced number states~\cite{DisplacedNumber}, if $\alpha=0, r > 0 $ onto squeezed number states~\cite{SNS1,SNS2,SNS3,SNS4}. While more general non Gaussian measurements are in principle possible, the class (\ref{Eq: class}) encompasses most of the measurements that can be realistically accessed. \par In the following, we will evaluate the non-Gaussian quantum discord defined by \begin{align} D^{\mathcal{NG}}(A:B) = h(\sqrt{I_2}) - h(d_{-}) - h(d_{+} ) + S^{\Pi,\mathcal{NG}}(A|B) \label{Eq: nonGdiscdef} \end{align} where the non-Gaussian measurements are given by Eq. (\ref{Eq: class}) above. For the non-Gaussian conditional entropy we have \begin{align} S^{\Pi,\mathcal{NG}}(A|B) &= \sum_n p_n S(\varrho_{A,n})\,, \notag \\ \varrho_{A,n} &= \frac1{p_n}\mbox{Tr}_B [\Pi_n \varrho_{AB} \Pi_n]\,, \notag \\ p_n &= \mbox{Tr}_{AB} [\Pi_n \varrho_{AB} \Pi_n] \end{align} In the following we consider two classes of Gaussian states in order to assess the performances of the above measurements. These are the two-mode squeezed thermal states (STS)~\cite{STSteo,STSexp1,STSexp2}: \begin{equation} \varrho =S(\lambda)\nu_1(N_1)\otimes\nu_1(N_2)S(\lambda)^\dagger \label{Eq.: rhoSTS} \end{equation} and the two-mode mixed thermal states (MTS) \cite{MTS} \begin{equation} \rho =U(\phi)\nu_1(N_1)\otimes\nu_1(N_2)U(\phi)^\dagger \label{Eq.: rhoMTS} \end{equation} where $\nu_i(N_i)$ are 1-mode thermal states with thermal photon number $N_i$; $S(\lambda)=\exp\{\lambda (a_1^\dag a_2^\dag - a_1 a_2) \}$ is the two-mode squeezing operator (usually realized on optical modes through parametric down-conversion in a nonlinear crystal); and $U(\phi)=\exp\{\phi( a_1^\dag a_2 - a_1 a_2^\dag)\}$ is the two-mode mixing operator (usually realized on optical modes through a beam splitter). \par In particular, in the following we will focus on the simplest case of symmetric STS with $N_1=N_2 \in [10^{-5},1]$ $\lambda \in [0,0.5]$. As for MTS, we cannot consider the symmetric case (since if $N_1=N_2$ then the mutual information vanishes and there are no correlations in the system), therefore we consider the unbalanced case and focus on $\phi \in [0,\pi/2]$ and $N_1,N_2 \in [10^{-5},1]$. \section{Number basis} \label{Sec : number basis} Let $\Pi_n=\ketbra{n}{n}$. In this case, the post-measurement state is \begin{equation} \varrho^A_n\otimes\ketbra{n}{n}=\left(\sum_{h,k}\varrho_{(h,k),(n,n)}\ketbra{h}{k}\right)\otimes\ketbra{n}{n} \end{equation} and we have the following expression for the density matrix elements \begin{equation} \varrho_{(h,k),(n,n)}=\sum_{s,t} p^{th}_s (N_1) p^{th}_t (N_2) O_{hn}(st)O_{kn}^*(st) \label{Eq.: rhoApostnumber} \end{equation} where $ p^{th}_s (N) = N^s \ (1+N)^{-(s+1)} $ and $O_{hn}(st) = \bra{hn} O \ket{st} $ with $O=S(\lambda),U(\phi)$ for STS and MTS respectively. The post-measurement state $\varrho^A_n$ is diagonal (see appendix \ref{Sec. : postdiagonal}), \begin{equation} \bra{h} \varrho_n^A \ket{k} = \delta_{hk} \ \varrho_{(h,h),(n,n)} \end{equation} As a consequence, the entropy of the post-measurement state can be expressed as: $S(\varrho^A_n)=H(\{ \varrho_{(h,h),(n,n)}\})=H(\vec{p}(A|B=n))$ where $H$ is the Shannon entropy of the conditional probability $\vec{p}(A|B=n)=(p(0,n),p(1,n),\cdots)/p_n$, and therefore the overall conditional entropy can be simply expressed in terms of the photon number statistics: \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} S(A|\{\Pi_n\})&=&\sum_n p_n H(\vec{p}(A|B=n))= \nonumber \\&=& H(\vec{p}(A,B))-H(\vec{p}(B)) \label{Eq.: CondEntSTSnumber} \eae with $\vec{p}(A,B)=\{p(A=n,B=m)\}$ and $\vec{p}(B)=\{p(B=n)\}$. In view of this relation, the only elements of the number basis representation of the density matrix $\varrho$ that are needed are the diagonal ones, i.e. one has to determine the photon number statistics for the two-mode STS or MTS state. The required matrix elements can be obtained in terms of the elements of the two-mode squeezing and mixing operators (see appendix \ref{Sec. : postdiagonal}). One has of course to define a cutoff on the dimension of the density matrix. This can be done upon requiring that the error on the trace of each state considered be sufficiently small: $\epsilon_{err}=1-\mbox{Tr}\varrho\le 10^{-3}$. \begin{figure}[t] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_sts_publish.eps} \caption{ Gaussian and non-Gaussian quantum discord for STS as a function of $\lambda$, for different values of $N_1=N_2$} \label{Fig: number basis qdisc} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_mts_01_publish.eps} \includegraphics[width=0.4\textwidth]{Ngdisc_mts_1_publish.eps} \caption{Gaussian and non-Gaussian quantum discord for MTS states as a function of $\phi$ for different values of $N_1$ and $q=N_2/N_1$} \label{Fig: number basis mts} \end{figure} We have compared Gaussian and non-Gaussian quantum discord (with the non-Gaussian measurements corresponding to photon number measurements) for STS and MTS states with a wide range of squeezing, mixing and thermal parameters. In Fig. \ref{Fig: number basis qdisc} we show results for STS with varying $\lambda$ and $N_1=N_2=10^{-2}$, $N_1=N_2=1$. The key result is that the non-Gaussian quantum discord is always greater than its Gaussian counterpart for all values of $N_1$ and $\lambda$. The gap grows with increasing $N_1$ and $\lambda$. In Fig.~\ref{Fig: number basis mts} we show results for MTS $N_1=\{0.1, 1\}$ and $q=N_2/N_1=\{0,0.1,0.4,0.5\}$. Also in this case, the non-Gaussian discord is always higher than the Gaussian one.\\ Both results indicate that the Gaussian (heterodyne) measurement is optimal for STS and MTS states, at least compared to photon counting, in the sense that it allows for a better extraction of information on mode $A$ by a measurement on mode $B$. \section{Squeezed Number basis} \label{Sec : squeezed number basis} \begin{figure}[bt] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_sts_squeezed_publish.eps} \caption{Gaussian and non-Gaussian quantum discord for STS with $N_1=1$ as a function of $\lambda$ and for different values of local squeezing $r$} \label{Fig: squeezed number basis qdisc} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_mts_squeezed_publish.eps} \caption{Gaussian and non-Gaussian quantum discord for MTS states for $N_1=1$, $N_2=0$ as a function of $\phi$ and for different values of local squeezing $r$} \label{Fig: squeezed number basis mts} \end{figure} We now analyze the case of non-Gaussian measurements represented by the squeezed number basis $\ketbra{n_r}{n_r}=S(r)\ketbra{n}{n}S(r)^\dagger$, where $S(r)=\exp{(-r^*\frac{a^2}{2}-r\frac{(a^\dagger)^2}{2})}$ is the single mode squeezing operator. A local measurement in the squeezed number basis is equivalent to a measurement in the number basis, performed on a locally squeezed state. In formulas, the probability of measuring $n_r$ on one subsystem when the state is the $\varrho$ is \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} p_\varrho(n_r)&=&\hbox{Tr}(\openone\otimes\ketbra{n_r}{n_r}\varrho)=\hbox{Tr}(\openone\otimes\ketbra{n}{n}S^\dagger(r)\varrho S(r))= \nonumber \\&=& \hbox{Tr}(\openone\otimes\ketbra{n}{n}\varrho^r)=p_{\varrho_r}(n) \label{Eq.: probSquezeednumber} \eae i.e., is equal to the probability of measuring $n$ on the locally squeezed state $\varrho_r = S(r) \varrho S(r)^\dag$, and the relative post-measurement state is \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} \varrho_{n_r}^A&=&\hbox{Tr}_B[\openone\otimes\ketbra{n_r}{n_r}\varrho\openone\otimes\ketbra{n_r}{n_r}]/p_\varrho(n_r)=\nonumber \\&=& \hbox{Tr}_B[\openone\otimes\ketbra{n}{n}\varrho_r\openone\otimes\ketbra{n}{n}]/p_\varrho^r(n) \varrho_{r_{n}}^A \label{Eq.: rhoApostSqueezednumber} \eae The general idea is that measurements on a state $\varrho$ in a basis that is obtained by performing a unitary (Gaussian) operation $V$ on the number basis $\ketbra{n}{n}$ can be represented as measurements on the number basis of a modified state $\varrho_V = V \varrho V^\dag$ on which the local unitary operation acts.\\ In the case of the squeezed number basis, the post-measurement state is not diagonal, therefore the reasoning leading to Eq. (\ref{Eq.: CondEntSTSnumber}) does not hold. The post-measurement state matrix elements $(\varrho_{r_{n}}^A)_{h,k}=\varrho_{(h,k),(n,n)} $ can be obtained directly by evaluating the expression (\ref{Eq.: rhoApostnumber}) where now the expression $O_{hk}(st)=\bra{h k}O\ket{st}$ (where $O=S(\lambda),U(\phi)$) must be substituted with $O_{hk}'(st)=\bra{h k}S(r)O\ket{st} = \sum_q \bra{k} S(r) \ket{q} \bra{hq} O\ket{st} $, and the elements of the single mode squeezing operator are given in \cite{SingleModeSqueezing} (eq. 20) or in \cite{SingleModeSqueezingKnight} (eq. 5.1).\\ We have evaluated the Gaussian and non-Gaussian quantum discord for STS and MTS states with a wide range of two-mode squeezing and thermal parameters. Non-Gaussian measurements are done in the squeezed photon number basis, $\Pi_n = S(r)\ket{n} \bra{n} S(r)^\dag$ with variable $r \in [0,0.5]$. The effect of local squeezing on non-Gaussian quantum discord is negligible in the whole parameter range under consideration: we compare the non-Gaussian discord for different values of $r$ and find that all curves collapse. This can be seen in fig. \ref{Fig: squeezed number basis qdisc} and fig. \ref{Fig: squeezed number basis mts} where plot the behavior for $N_1=N_2=0.01$ (STS) and $N_1=1, N_2=0$ (MTS). The same behavior is observed in the whole parameter range under investigation. We have verified numerically that the post-measurement states of mode A $\varrho_{r_{n}}^A$ are not equal as $r$ varies (i.e., the post-measurement states corresponding to measurement result $n_r$ change with $r$), yet the sum $\sum_n p_n S(\varrho_{r_{n}}^A)$ is equal for all values of $r$ under investigation. Therefore, the squeezing in the measurement basis has no effect on the discord, at least for the values of squeezing considered: in particular, it cannot afford a deeper minimization than that obtained without local squeezing. This indicates that the heterodyne measurement remains optimal also with respect to measurement in the squeezed number basis. \section{Displaced Number basis} \label{Sec : displaced number basis} We finally analyze the case of non-Gaussian measurements represented by the displaced number basis $\ketbra{n_\alpha}{n_\alpha}=D(\alpha)\ketbra{n}{n}D(\alpha)^\dag$, where $D(\alpha)=\exp(\alpha a^\dag -\alpha^* a)$ is the single mode displacement operator. According to the general considerations above, a local measurement in the displaced number basis is equivalent to a measurement in the number basis, performed on a locally displaced state $\varrho_\alpha$. As in the case of the squeezed number basis, the post-measurement state is not diagonal and we need all matrix elements $(\varrho_{\alpha_{n}}^A)_{h,k}=\varrho_{(h,k),(n,n)} $. They can be obtained directly by evaluating the expression (\ref{Eq.: rhoApostnumber}) where the expression $O_{hk}(st)=\bra{h k}S(\lambda)\ket{st}$ (where $O=S(\lambda),U(\phi)$) must be substituted with $O_{hk}'(st)=\bra{h k}D(\alpha)O\ket{st}=\sum_q \bra{k} D(\alpha)\ket{q} \bra{hq} O\ket{st}$, and the elements of the single mode displacement operator are given in~\cite{Parisbook} (eq. 1.46). \begin{figure}[bt] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_sts_displaced_publish.eps} \caption{Gaussian and non-Gaussian quantum discord for STS with $N_1=1$ as a function of $\lambda$ and for different values of local displacement $\alpha$} \label{Fig: displaced number basis qdisc} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.4\textwidth]{Ngdisc_mts_displaced_publish.eps} \caption{Gaussian and non-Gaussian quantum discord for MTS states for $N_1=1$, $N_2=0$ as a function of $\phi$ and for different values of local displacement $\alpha$} \label{Fig: displaced number basis mts} \end{figure} The evaluation of the non-Gaussian quantum discord can be simplified by first noticing that one can consider real values of $\alpha$ only. Indeed, the quantum discord only depends on the modulus $|\alpha|$. This is shown in detail in the appendix~\ref{Sec : phase irrelevant}, by using the characteristic function formalism. Consider $\varrho_{n_\alpha}^A $, the post-measurement state of mode $A$ after measurement result $n_{\alpha}$ is obtained on $B$. If we change the phase of $\alpha$, $\alpha \rightarrow \alpha' \equiv e^{ i \theta} \alpha $ we find that \begin{equation} \varrho_{n_{\alpha'}}^A = U \varrho_{n_\alpha}^A U^\dag \end{equation} where $U$ is a unitary operation corresponding to a simple quadrature rotation \begin{equation} a_1 \to a_1 e^{i \theta} \quad a_1^\dag \to a_1^\dag e^{-i \theta} \end{equation} Therefore, we have $\varrho_{\alpha'_n}^A \neq \varrho_{\alpha_n}^A$, but $\varrho_{\alpha'_n}^A$ and $\varrho_{\alpha_n}^A$ have the same spectrum, since they are related by a unitary. Therefore, the entropy of the reduced post-measurement state $\varrho_{\alpha}^A$ does not depend on the phase of $\alpha$ but just on $|\alpha|$. If follows that the non-Gaussian quantum discord of $\varrho_{\alpha}$ does not depend on the phase of $\alpha$. \\ We have evaluated the Gaussian and non-Gaussian quantum discord for STS and MTS states with a wide range of two-mode squeezing and thermal parameters. Non-Gaussian measurements are done in the displaced photon number basis, $\Pi_n = D(\alpha)\ket{n} \bra{n} D(\alpha)^\dag$ with variable $\alpha \in [0,2.5]$. In fig.~\ref{Fig: displaced number basis qdisc} and fig.~\ref{Fig: displaced number basis mts} we plot the Gaussian and non-Gaussian quantum discord. We see that greater displacements lead to lower values of the non-Gaussian quantum discord, but the decrease is insufficient to match the Gaussian quantum discord, which remains optimal. However, the non-Gaussian quantum discord approximates the Gaussian one as $\alpha \to \infty$. This is analytically proven below in the appendix~\ref{Sec : growing alpha}. There we find that for both STS and MTS \begin{eqnarray} & \ & \varrho_{\alpha_n}^A \to \varrho_{\alpha_0}^A \qquad \mbox{as} \quad \alpha \to \infty \label{Eq: limit} \end{eqnarray} i.e, the conditional states $\varrho_{\alpha_n}^A$ becomes independent of $n$ and equal to the $n=0$ result. As a consequence, the conditional entropy in the displaced number basis is equal to the entropy of the post-measurement state for any measurement result, and, in particular, for $n=0$: \begin{equation} S^{\Pi,\mathcal{NG}} (A|B) = \sum_n p_n S(\varrho_{\alpha_n}^A) \to S(\varrho_{\alpha_0}^A) \label{Eq: conditonalnonG} \quad \mbox{as} \quad \alpha \to \infty \end{equation} But $\varrho_{\alpha_0}^A $ is just the post-measurement state we obtain after a heterodyne detection on mode $B$ (equal for all measurement result modulo a phase space translation which is irrelevant as for the entropy). Therefore, we also have $S^{G} (A|B) = S(\varrho_{\alpha_0}^A) $ and the non Gaussian discord $D^{\mathcal{NG}}(A:B)$ in the displaced number basis tends to the Gaussian discord $D^{\mathcal{G}}(A:B)$ as $\alpha \to \infty$. \par Actually, we cannot prove that the $D^{\mathcal{NG}}(A:B)$ is lower bounded by $D^{\mathcal{G}}(A:B)$, and we cannot rule out the possibility that $D^{\mathcal{NG}}(A:B) < D^{\mathcal{G}}(A:B)$ for intermediate values of $\alpha$. However, our numerical data do not support this possibility since we never observe $D^{\mathcal{NG}}(A:B) < D^{\mathcal{G}}(A:B)$ and we expect that $D^{\mathcal{NG}}(A:B) \to D^{\mathcal{G}}(A:B)$ from above as $\alpha \to \infty$. \\ In conclusion, we have analytical and numerical evidence that the heterodyne measurement remains optimal also with respect to measurement in the displaced number basis. \section{Geometric discord} \label{Sec : Geometric} In this section, we briefly consider the recently introduced measure of geometric discord and compare results with those obtained for the quantum discord. The geometric discord~\cite{Dakic} is defined as \begin{equation} D_G(A:B) = \min_{\{\chi_{AB} \in \mathcal{C} \}} || \varrho_{AB} - \chi_{AB} ||_2 \end{equation} and it measures the distance of a state from the set $\mathcal{C}$ of quantum-classical states where $||A||_2 = \mbox{Tr}[A^\dag A]$ is the Hilbert-Schmidt distance. Clearly $D_G = 0$ iff $D=0$, since both measures vanish on the set of classically correlated states. In particular, it has been be proven that $D_G$ can be seen a measure of the discrepancy between a state before and after a local measurement on subsystem $B$~\cite{Luo}: \begin{equation} D_G(A:B)=\min_{\{\Pi \in POVM \}} || \varrho_{AB} - \varrho_{AB}^\Pi ||_2 \label{Eq.: geomdisc} \end{equation} where the unconditional post-measurement state is given by $\varrho_{AB}^\Pi = \sum_x M_B^x \varrho_{AB} M_B^{x \dag} $. Notice that $D_G$ and $D$ are not monotonic functions of one another and the relation between them is still an open question. However, in many cases $D_{G}$ is much simpler to evaluate than $D$. \par Analogous to the case of Gaussian discord, a Gaussian version of the geometric discord can be defined by restricting to Gaussian measurements~\cite{GaussianGeom}. Again, it can be analytically computed for two-mode Gaussian states. With the same reasoning of sec. \ref{Sec : correlations} one easily obtains \begin{equation} D_G^{\mathcal{G}} (\varrho_{AB} ) = \mbox{min}_{\sigma_M} \mbox{Tr}[(\varrho_{AB}-\varrho_P \otimes \varrho_M)^2] \end{equation} Exploiting the property that $\mbox{Tr}[\varrho_1 \varrho_2] = 1/\det[(\sigma_1 + \sigma_2)/2],$ for any two Gaussian states $\varrho_1$ and $\varrho_2$, \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} D_G (A:B) = \mbox{min}_{\sigma_M} \{ 1/\sqrt{\det\sigma_{AB}} + \\ \nonumber + 1/\sqrt{ \det(\sigma_P \oplus \sigma_M )} - 2/\sqrt{\det[(\sigma_{AB} + \sigma_P \oplus \sigma_M)/2]} \} \eae For for the relevant case of STS and MTS, the minimum is obtained with the $\sigma_M$ elements given by $ \alpha = \beta = \frac{\sqrt{ab} (\sqrt{4 ab - 3 c^2} + \sqrt{ab})} {3a}$, $\gamma = 0 $. The least disturbing Gaussian POVM for STS, according to the Hilbert-Schmidt distance, is thus a (noisy) heterodyne detection, a result which is analogous to what found in the case of quantum discord. If one constrains the mean energy per mode, the Gaussian quantum discord gives upper and lower bounds to the Gaussian geometric discord. In absence of such a provision, the geometric discord can vanish for arbitrarily strongly nonclassical (entangled) Gaussian states, as a consequence of the geometry of CV state spaces. \par Also in this case, we may consider non-Gaussian measurements and evaluate a non-Gaussian geometric discord: \begin{equation} D_G^{\mathcal{NG}} (A:B) = \mbox{Tr}[(\varrho_{AB}-\varrho_{AB}^\Pi)^2] \label{Eq: nonGgeomdef} \end{equation} For measurement in the number basis, we can easily obtain \begin{eqnarray}} \newcommand{\eae}{\end{eqnarray} D_G^{\mathcal{NG}} = \mu(\varrho) + \sum_{n pq} |\bra{p n }\varrho \ket{q n}|^2 \label{Eq.: nonGausgeom} \eae where $\mu(\varrho)=\frac{1}{4 \sqrt{\det(\sigma)}}$ is the (Gaussian) state purity~\cite{Parisbook}. In the case of measurements in the squeezed or displaced number basis, we have to use $\varrho_r$ and $\varrho_\alpha$ instead of $\varrho$ in Eq. (\ref{Eq.: nonGausgeom}). In general, in order to compute the geometric discord we need to compute matrix elements, and we use the same numerical methods described above. \\ \begin{figure}[bt] \centering \includegraphics[width=0.4\textwidth]{Nggeom_sts_squeezed_publish.eps} \includegraphics[width=0.4\textwidth]{Nggeom_mts_squeezed_publish.eps} \caption{ (Top) Gaussian and non-Gaussian geometric discord for STS with $N_1=1$ as a function of $\lambda$ and for different values of local squeezing $r$; (Bottom) Gaussian and non-Gaussian geometric discord for MTS states for $N_1=1$, $N_2=0$ as a function of $\phi$ and for different values of local squeezing $r$} \label{Fig: squeezed number basis geomdisc} \end{figure} \begin{figure}[bt] \centering \includegraphics[width=0.4\textwidth]{Nggeom_sts_displaced_publish.eps} \includegraphics[width=0.4\textwidth]{Nggeom_mts_displaced_publish.eps} \caption{(Top) Gaussian and non-Gaussian geometric discord for STS with $N_1=1$ as a function of $\lambda$ and for different values of local displacement $\alpha$ (Bottom) Gaussian and non-Gaussian geometric discord for MTS states for $N_1=1$, $N_2=0$ as a function of $\phi$ and for different values of local displacement $\alpha$} \label{Fig: displaced number basis geomdisc} \end{figure} \subsection{Results}. We have compared the Gaussian and non-Gaussian geometric discord for STS and MTS in a wide range of parameters. We have considered measurements in the number, squeezed number and displaced number basis for the same values of the parameters given in the preceding sections. Results are plotted in Figs.~\ref{Fig: squeezed number basis geomdisc} and \ref{Fig: displaced number basis geomdisc}. In general, at variance with the results for quantum discord, we find that non-Gaussian measurements can provide lower values of geometric discord than Gaussian ones. Among the class of non-Gaussian measurements we have considered, the optimal one is provided by the number basis, which gives values of geometric discord that are always lower than those given by the optimal Gaussian measurement. The non-Gaussian geometric discord increases for increasing $r$ and $\alpha$, and it can become greater than its Gaussian counterpart. These results are very different from the quantum discord case: on one hand, the (non-Gaussian) geometric discord is substantially affected by the local squeezing; on the other hand, it does not approach the Gaussian one when the displacement $\alpha \to \infty$, but it grows monotonically. Indeed if we increase the squeezing or displacement in the measurement basis, the post-measurement state is more distant (in Hilbert-Schmidt norm) from the original one. As already noticed, performing the measurement is the squeezed (displaced) number basis in equivalent to first squeezing (displacing) the state and then measuring it in the number basis. The local squeezing and displacement have the effect of increasing the energy of the state, shifting the photon number distribution $P(B=n)$ towards greater values of $n$. This causes the overlap between the post measurement state and the original state to decrease, and therefore their distance to increase. \par Let us futher comment on the difference between the quantum discord and the geometric discord cases. Quantum discord and geometric discord both vanish for classical states, but are not monotonic functions of one another, and thus they are truly different quantities. The geometric discord, based on the Hilbert-Schmidt distance, is a geometric measure of how much a state is perturbed by a local measurement, while quantum discord assesses to which extent correlations are affected by a local measurement. While for the quantum discord well-defined operational and informational interpretations can be found~\cite{Gu,Datta2}, for the geometric discord the situation is more problematic. Indeed, one can design protocols in which the geometric discord can in some cases be related to the protocols' performances \cite{RSP, Tufarelli}; however, recent discussions ~\cite{Piani}, show that, as consequence of the noninvariance of the Hilbert-Schmidt norm under quantum evolutions, it is difficult to find a conclusive argument about the relevance of geometric discord as a measure of quantumness of correlations. Our data show that non-Gaussian measurements can yield optimal values of the geometric discord, contrary to the case of quantum discord. Hence, the behavior of quantum discord and geometric discord with respect to different types of measurements is different. This is a further indication that the geometric discord cannot be used as a good benchmark for the quantum discord and that the degree of quantumness measured, if any, by such a quantity has a fundamentally different nature. \section{Discussion and conclusions} \label{Sec. : conclusions} The definition of discord involves an optimization over all possible local measurements (POVMs) on one of the subsystems of a bipartite composite quantum system. In the realm of continuous variables (CV), initial research efforts on quantum discord restricted the minimization to the set of (one-mode) Gaussian measurements. \par In this work we have investigated CV quantum discord beyond this restriction. We have focused on Gaussian states, asking whether Gaussian measurements are optimal in this case, i.e., whether the Gaussian discord is the true discord for Gaussian states. While a positive answer to this question had already been given for the special case of two-mode Gaussian states having one vacuum normal mode (by means of an analytical argument based on the Koashi-Winter formula), no general result was available so far. We have addressed our central question upon considering two large classes of two-mode Gaussian states, the squeezed thermal states (STS) and the mixed thermal states (MTS), and allowing for a wide range of experimentally feasible non-Gaussian measurements based on orthogonal bases: the photon number basis, the squeezed number basis, the displaced number basis. For both STS and MTS states, in the range of parameters considered, the Gaussian measurements always provide optimal values of discord compared to the non-Gaussian measurements under analysis. Local squeezing of the measurement basis has no appreciable effect on correlations, while local displacement leads to lower values of the non-Gaussian discord, which approaches the Gaussian one in the limit of infinite displacement. \par Overall, for the explored range of states and measurements, we have evidence that the Gaussian discord is the ultimate quantum discord for Gaussian states. We note that the optimality of Gaussian measurements suggested by our analysis is a property which holds only for Gaussian states. In the case of non-Gaussian states, e.g., CV Werner states, non-Gaussian measurements such as photon counting can lead to a better minimization, as was recently proven in Ref.~\cite{NonGausDisc}. \par We also have investigated the CV geometric discord~\cite{GaussianGeom}, comparing the Gaussian and non-Gaussian cases. We have shown that the behavior of geometric discord is completely different from that of quantum discord. On one hand, non-Gaussian measurements can lead to lower values of the geometric discord, the number basis measurement being the optimal one; on the other hand, the effects of both local squeezing and displacement are strong and consist in a noteworthy increase in the non-Gaussian geometric discord. The remarkable differences between quantum and geometric discord imply that the latter cannot be used as a benchmark of the former. \par Both in the case of the discord and geometric discord a definite answer on the optimal measurement minimizing the respective formulas would require the extension of the set of non-Gaussian measurements to possibly more exotic ones and the application of those realizable in actual experiments to a broader class of Gaussian and non-Gaussian states. While we leave this task for future research, our results on discord support the conjecture that Gaussian measurements are optimal for Gaussian states and allow to set, for the class of states analyzed, a tighter upper bound on the entanglement of formation for $1 \times 2$ modes Gaussian states, via the Koashi-Winter relation.
1,116,691,501,388
arxiv
\section{Introduction} \label{sec:intro} Until recently, signal processing was dominated by conventional model-based algorithms, which rely on mathematical and physical models of the real world. They are inherently interpretable and often incorporate domain knowledge, such as statistical assumptions, smoothness, structure of the model space, and origin of the noise. However, this approach can become mathematically intractable if problems are complex. Machine learning (ML) provides an alternative approach to this challenge by building data-driven mathematical models. Neural networks (NNs) and supervised learning in particular offer a proper framework for various signal-processing problems~\cite{deepreview2015}. Below, we briefly review a few recent trends that served as motivation for developing the proposed variable projection network (VPNet). A traditional ML approach is to decompose the problem into separate feature extraction and learning steps \cite{MLbook}. In this case, the data is preprocessed in order to extract static features based on the given domain knowledge. These features are inputs to conventional ML algorithms. Although the dimension of the original data is significantly reduced in the first step, these handcrafted features are usually suboptimal with respect to the whole learning process \cite{replearning}. Deep learning provides alternatives to the traditional approach, overcoming some of its drawbacks \cite{deepreview2015}. \CH{start new} Learned features of NNs can be used as input for non NN methods, like discriminant correlation filters, as well~\cite{yang_multi-object_2019}. Ref.~\refcite{diaz-vico_deep_2020} combined traditional kernel-based Support Vector Machines (SVMs) with deep learning approaches. Another common method would would be to use the features as input for one or multiple other NN for multi-target prediction~ \cite{reyes_performing_2019, mishra_neural_2020, girshick_fast_2015, ren_faster_2015}. \CH{stop new}. Using more hidden layers in deep neural networks (DNNs) has increased the learning abilities of NNs \cite{science}. This enables DNNs to use the first layers for feature extraction and further layers for performing operations on the features learned. Convolutional neural networks (CNNs) are special, optionally deep architectures and are the leading ML approaches in 2D and 3D image processing and computer vision~\CH{added references}\cite{AlexNet, ZFNet, GoogLeNet, ronneberger2015u, manzanera_scaled_2019, leming_ensemble_2020}. Here, the built-in feature extraction layers perform multiple convolutional filtering and dimension-reduction (pooling) steps. Despite their advantages, DNNs and CNNs continue to raise several concerns. Their improved efficiency comes at the cost of higher computational complexity and numerical difficulties in the training process (see, e.g.,\ overfitting and divergence). Due to the large number of nonlinear connections between the model parameters, DNN and CNN approaches can be considered as black-box methods, where the parameters have no or little physical meaning and are difficult or impossible to interpret. Additionally, training these networks requires vast amounts of labeled data, which is problematic to collect in many applications, such as telecommunications \cite{DNNtelecom}, and biomedical engineering \cite{serkan2016, ECGsurvey}. Although data augmentation, transfer learning, outlier removal, and ensemble methods can mitigate this problem, reducing the data hunger of deep learning approaches is still a major challenge in this field. Despite the popularity of deep learning, traditional ML algorithms continue to dominate in many 1D signal-processing tasks \cite{serkanarXiv}, especially in biomedical signal classification, for example, of electroencephalograms (EEGs), electromyograms (EMGs), and ECGs. The main reason for this lies in the nature of clinical applications, where both accuracy and explainability are important. These cannot be guaranteed by the previously mentioned NN approaches, since they do not extract medically interpretable features. VPNet, however, breaks this impasse by harnessing the theory of variable projection (VP) to provide a framework for solving nonlinear least-squares problems, whose parameters can be separated into linear and nonlinear ones. In many fields of signal processing, there is a large number of linear parameters, which are driven by a smaller number of nonlinear variables (see Eq.~\eqref{eq:vpfunc}). For example, signal compression, representation, and feature-extraction algorithms are often based on linear coefficients of some transformation, such as Fourier and wavelet transforms, which can be parameterized via properties of the window function, mother wavelet, etc. The VPNet was designed to merge the expert knowledge used by traditional model-based approaches with the learning abilities of NNs. The proposed architecture is inspired by the so-called model-driven NN concept, which is a emerging trend in signal processing. In Section~\ref{sec:relwork}, we review the existing literature on incorporating model-based information into machine learning. The theoretical background, the general formulation of VPNet, and the corresponding forward and backpropagation algorithm are discussed in Section~\ref{sec:VPNet}. Section~\ref{sec:experiments} describes multiple experiments we performed to evaluate and compare the performance of VPNet to that of other NNs. Finally, Section~\ref{sec:conc} presents conclusions and the expected broader impact of our research. \section{Related works} \label{sec:relwork} Approximation theory gives a general framework to approach the fundamental task in machine learning that is to learn a good representation of the data~\cite{replearning}. Classical methods in approximation theory build up complicated functions by using linear combinations of elementary functions, whereas neural networks use compositions of simple functions. The structure of these compositions constrains the feasible region where we search for the solution of the corresponding ML task. The model-driven NN concept implements these constraints such that the design of the NN architecture resembles the solution to well understood mathematical problems, such as ordinary or partial differential equations~\cite{neuralODE, deepPDE} (ODE, PDE), signal~\cite{deepunfolding,deepunfolding3, wiener-hammersteinNN} and image~\cite{reactdiff, bilevel, learnPDE} processing, optimization~\cite{optNet}, and control~\cite{dynsysNN, wiener-neural, wiener-neural2}. ODE- and PDE-constrained learning strategies belong to a family of model-driven ML techniques that relates the rigorous mathematical background of differential equations to deep learning problems. On one hand, numerical solvers provide various ways to derive and to interpret the output of NN architectures, such as residual neural networks~\cite{neuralODE}, Hamiltonian networks~\cite{deepPDE}, based on the discretization scheme of the corresponding ODE and PDE. On the other hand, deep learning can incorporate domain knowledge automatically which would otherwise require a significant human effort~\CH{added citation}\cite{yang_multi-object_2019, learnPDE, reactdiff}, e.g. good insights into the problem, and mathematical formulation of a priori information. Although this approach does not necessarily reduce the number of trainable weights, it helps to design reversible architectures that allow for memory-efficient implementations~\cite{reversibleDNN}. Another branch of model-driven NNs, such as deep unfolding~\cite{deepunfolding} or Wiener-\cite{wiener-neural2}, and Hammerstein-type~\cite{wiener-hammersteinNN} NNs, originates from signal processing problems. The former approach unfolds the iterations of classical model-based algorithms into layer-wise NN structures whose parameters are optimized based on the training data. This way the resulting NN retains the powerful learning ability of DNNs, inherits expert knowledge, and reduces the size of the training data \cite{DNNtelecom}. Wiener- \cite{wiener-neural2} and Hammerstein-type \cite{wiener-hammersteinNN} NNs are alternatives that combine the advantages of model-based methods and deep learning techniques. These networks comprise cascades of static nonlinear elements and dynamic linear blocks that represent NNs and linear time-invariant (LTI) systems, respectively. Recently, these methods have shown great potential in many fields, for instance, in system identification \cite{wiener-hammersteinNN}, control engineering \cite{wiener-neural2}, sparse approximation theory \cite{sparseLinInv, neuralISTA}, and telecommunication \cite{unfoldcomm, deepunfoldingRF}. The motivation behind integrating optimization problems into DNN architectures is similar to the ODE/PDE-driven networks, namely, designing optimization problems to real-world processes is a labor-intensive work which also needs expert knowledge. To date, several new NN architectures have been proposed in order to learn these optimization problems automatically from data. Solving ill-posed inverse problems is a typical example for such neural networks. In this case, each layer is constrained by a penalized linear least-squares problem where the parameters of the regularization term, such as threshold values, linear kernels, weights of the shrinkage functions, constitute the trainable weights~ \cite{bilevel, shrinkageFields, variationalNN}. OptNet\cite{optNet} gives the most general framework in this family, where the layers encode convex quadratic programming (QP) problems. The Hessian matrix of the QP's objective function along with its equality and inequality constraints are learnable parameters. The representation power of an OptNet layer is higher than that of the two-layer ReLU networks, which can reduce the overall depth of DNN architectures (see Theorems 2 and 3 in Ref.~\refcite{optNet}). Besides its advantages, the forward/backward passes of an OptNet layer are much more computationally expensive than a linear or convolutional layer. This is due to the fact that constrained QP problems have no closed form solution in general, thus the forward pass requires the use of iterative numerical solvers in each layer for each update. \CH{start add some out-of-scope references} We acknowledge that there are many other model-based~\cite{pereira_fema_2020} and model-free approaches~\cite{lara-benitez_asynchronous_2020, peng_deep_2021, kazi_dynensamble_2020, ahmadlou_enhanced_2010, sanchez-reolid_deep_2020}. Especially, for time series data there are methods based on spiking neural networks~\cite{req_spiking, simp_spiking} including their variations~\cite{lara-benitez_experimental_2021, song_spiking_2021} which are beyond the scope of this paper. \CH{end} To the best of our knowledge, this is the first time that the VP operators have been exploited in the context of learning end-to-end systems. However, we note that the proposed VPNet can be considered a special case of OptNet. Indeed, a VP layer forwards the solution of an unconstrained separable nonlinear least-squares (SNLLS) problem to the next layer (cf. Eq.~(1) in Ref.~\refcite{optNet}). The corresponding nonlinear parameters are the trainable weights of the VP layer, and the linear ones are the extracted features, which are forwarded to the next layer. In contrast to a general OptNet layer, both the solution and the gradients of a VP layer can be calculated analytically that is provided by the theoretical framework of variable projection~\cite{golub_pereyra1973}. This speeds up the training and the inference, which can be further improved by the use of orthogonal and discrete orthogonal function systems (see e.g. Section~\ref{sec:vpforwardprop}). \section{Variable Projection Networks} \label{sec:VPNet} \subsection{Variable projections} Variable Projection (VP) \cite{golub_pereyra1973} provides a framework for addressing nonlinear modeling problems of the form \begin{equation} x \approx \hat{x} = \sum\limits_{k=0}^{n-1} c_k \Phi_k({\theta}) = {\Phi}({\theta}) {c}, \label{eq:vp} \end{equation} where $x \in \mathbb{R}^m$ and $\Phi_k \in \mathbb{R}^m$ denote the input data to be approximated and a parametric function system, respectively. The symbol $\Phi(\theta)$ refers to both the function system itself and a matrix of size $\mathbb{R}^{m \times n}$. The linear parameters ${c} \in \mathbb{R}^n$ and the nonlinear parameters ${\theta} \in \mathbb{R}^p$ of the function system ${\Phi}$ are separated. The least-squares fit of this problem means minimization of the nonlinear functional \begin{equation*} r({c},{\theta}) := \left\|x - {\Phi}({\theta}) {c}\right\|_2^2. \label{eq:vpproblem} \end{equation*} Without nonlinear parameters (i.e.,\ if ${\theta}$ is fixed), the model is linear in the coefficients ${c}$. The minimization of $r$ with respect to $c$ leads to the well-known linear least-squares approximation. Note that it is in fact the best approximation problem in Hilbert spaces. The optimal solution can be expressed by means of Fourier coefficients and orthogonal projection operators $\mathcal{P}_{{\Phi}(\theta)}$: \begin{equation} {c} = {\Phi}^+(\theta) x, \qquad \hat{x} = \mathcal{P}_{{\Phi}(\theta)} x = {\Phi}(\theta){\Phi}^+(\theta) x, \label{eq:vpop} \end{equation} where ${\Phi}^+(\theta)$ denotes the Moore--Penrose pseudoinverse of matrix ${\Phi}(\theta)$. The concept is closely related to mathematical transformation methods, such as Fourier and wavelet transforms, that can be interpreted as orthogonal projections by a given function system with a predefined $\theta$. From a practical point of view, the coefficients $c$ can be interpreted as features extracted by VP, and $\hat{x}$ is a result of low-pass filtering and dimension reduction. The minimization of $r$ in the general case can be decomposed into the minimization by the nonlinear parameters ${\theta}$, while the linear parameters ${c}$ are computed by the orthogonal projection. Thus, according to the work of Golub and Pereyra~\cite{golub_pereyra1973}, minimizing $r$ is equivalent to minimizing the following VP functional: \begin{equation} r_2({\theta}) := \left\|x - {\Phi}({\theta}){\Phi}^+({\theta}) x\right\|_2^2. \label{eq:vpfunc} \end{equation} In Ref.~\refcite{varpro_matlab}, a robust gradient-based Matlab implementation were provided for the numerical optimization of $r_2$. Mathematically, VP is a formalization for adaptive orthogonal transformations that allows filtering and feature extraction by means of parametric function systems. If a nonlinear optimization problem can be separated into linear and nonlinear parameters, VP may also act as a solver, which opens up other possible applications \cite{golub_pereyra2003, SNLLS2021}. In the ML context, VP can be used as a feature extraction method and as a modeling technique for the training procedure \cite{vptrain}. Pereyra et al.\ proposed VP as an optimization method for a given class of feedforward NNs. They modelled the whole network with VP and used the VP optimization method from Ref.~\refcite{golub_pereyra1973} as an alternative to stochastic gradient methods. This methodology is, however, limited to NNs with only one hidden layer. Approaching VP from a different and novel direction, based on its feature extraction ability, we introduce VPNet. Previous results have shown that several biomedical signal-processing problems can be addressed efficiently with variable projection by means of adaptive rational and Hermite functions as well as B-splines \cite{genVP, ensembleECG}. VP features have been used in particular for ECG and EEG representation, compression, classification, and segmentation \cite{ECGdelin,weightedHermite,ecgtalk,qrsmodel,ECMI2018,tbme_paper,ratECG,ratECGseg,ratECGclass}. The results show that VP provides a very compact, yet morphologically accurate, representation of signals with respect to the target problem. Additionally, the nonlinear parameters themselves carry direct morphological information about the signals, and they are usually human-interpretable. \subsection{VPNet architecture} The key idea of this architecture is to create a network that combines the representation abilities of VP and the prediction abilities of NNs in the form of a composite model. The basic VPNet architecture is a feedforward NN, where the first layer(s) applies a VP operator that is forwarded to a fully connected, potentially deep NN (see Fig.~\ref{fig:vpnet}). The construction is similar to that of CNNs in the sense that the first layer(s) of the network can be interpreted as a built-in feature extraction method. Note that more complex VPNet architectures are also possible, for instance, based on the models of U-Net \cite{ronneberger2015u} and AutoEncoder \cite{Goodfellow-et-al-2016}, which will be investigated as part of our future work. \begin{figurehere} \begin{center} \includegraphics[width=0.48\textwidth]{./Figures/VPNet_architecture_short.png} \caption{VPNet architecture.} \label{fig:vpnet} \end{center} \end{figurehere} Depending on its target application, the VP layer we propose has two possible behaviours. It either performs a filtering of the form \begin{equation} f^{\text{(vp)}}(x) := \Phi(\theta)\Phi^+(\theta) x = \hat{x} \qquad (x \in \mathbb{R}^m), \label{eq:vplayerfilt} \end{equation} or a feature extraction of the form \begin{equation} f^{\text{(vp)}}(x) := \Phi^+(\theta) x = c \qquad (x \in \mathbb{R}^m), \label{eq:vplayerfeat} \end{equation} where ${\theta} \in \mathbb{R}^p$ denotes the nonlinear system parameters of the given function system ${\Phi}$, as defined above. These VP operators refer to the orthogonal projection and the general Fourier coefficients of the input $x$ by means of the parametric system $\Phi(\theta)$, as in Eq.~\eqref{eq:vpop}. The filter method may be better suited to regression problems, while the feature extraction is suitable for classification problems. The nonlinear system parameter vector ${\theta}$ comprises the learnable parameters of the VP layer. Note that many inverse problems~\cite{golub_pereyra2003} can be viewed as SNLLS data fitting problems including a small set of adjustable nonlinear parameters ${\theta}$ with direct physical interpretations. For instance, the function system $\Phi_k(t;\tau_k,\lambda_k)=\cos(\lambda_k t + \tau_k)$ can be used in frequency estimation and in EEG, where the network would learn dominant frequencies $\lambda_k$ and phases $\tau_k$ that characterize a certain class of signals, such as seizures in EEG recordings \cite{EEGseizureclass, strf1, strf2}. MRI imaging is another setting \cite{varpro_mri}, where $\Phi_k(t;\lambda_k)=\exp(-\lambda_k t)$ with $\lambda_k\in\mathbb{R}^+$ yields information about the tissue type. The previously mentioned properties and advantages of the VP operator are implicitly built into VPNet: \begin{itemize} \item \emph{Role}: A novel model-driven network architecture for 1D signal-processing problems. \item \emph{Generality}: VPNet can be built from arbitrary parameterized function systems, which allows the direct incorporation of domain knowledge into the network. \item \emph{Interpretability}: The VP layer can be explained as a built-in feature-extraction method. Further, the layer parameters are the nonlinear VP system parameters, which have an interpretable meaning. They are usually directly connected to morphological properties of the input data (see, e.g.,\ Section~\ref{sec:hermite}). \item \emph{Simplicity}: Since the VP layer is usually driven by only a few system parameters, VPNet may provide a compact alternative to CNNs and DNNs. In fact, the VP layer can significantly decrease the number of parameters in a DNN. \end{itemize} \subsection{VP forward propagation} \label{sec:vpforwardprop} In order to calculate the forward pass of the VP layer, a linear least-squares (LLS) problem has to be solved for a certain value of $\theta$ in each training iteration (see Eqs.~\eqref{eq:vplayerfilt}-\eqref{eq:vplayerfeat}). Several numerical methods exist to solve such problems, among which QR factorizaton and singular value decomposition (SVD) are the most common techniques. The QR method (requires $\sim 2mn^2 - 2n^3/3$ flops) is fast and reliable for well-conditioned problems, but may fail when ${\Phi}({\theta})\in\mathbb{R}^{m\times n}$ is nearly rank-deficient. Therefore, in our implementation, we utilize the SVD (requires $\sim 2mn^2 + 11 n^3$ flops) that is the most stable way to solve unconstrained LLS problems~\cite{numlinalg}. Although, it is computationally more demanding than the QR factorization in cases when $m\sim n$, their complexity is approximately the same if $m\gg n$. Note that the latter inequality usually holds in practice, since in VPNet $m$ stands for the length of the input signal, which is much greater than the number of extracted features $n$. \CH{start complexity} The low computational complexity is based on the fact that the non linearity is precomputed and stored in the ${\Phi}$-matrix. As a consequence, during evaluation, the VP layer just performs a matrix multiplication. Further, since the number of features computed by the VP layer is typically very low, the following layers can have lower complexity as well. The weight matrix of a fully connected layer, following the VP layer, is element of $\mathbb{R}^{n\times l}$ instead of $\mathbb{R}^{m\times l}$ without the VP layer, with $n$ is the number of coefficients, $m$ is the length of the input signal and $l$ is the number of neuron in the fully connected layer. Since, $n$ is usually by far smaller than $m$, the weight matrix is significantly smaller for a fixed number or neuron $l$. \CH{end} For shallow neural networks, when only a few hidden layers are connected to the VP layer, solving the corresponding LLS problem in each training iteration is obviously the bottleneck of VPNet that influences both the computational complexity and the numerical accuracy. In the following, we provide a realization of the VP layer with Hermite functions, and we demonstrate how the choice of the function system and its parametrization influence the conditionality of ${\Phi}({\theta})$. \subsubsection{Adaptive Hermite system} \label{sec:vpforwardprop_Hermite} In order to alleviate the computational burden of the VP layer, a straightforward option is to parametrize orthogonal function systems. As a case study, let us consider Hermite polynomials \cite{szego}, which are defined by the three-term recurrence relation: \begin{equation*} H_{k+1}(t) = 2tH_k(t)-2kH_{k-1}(t) \quad (k\in\mathbb{N}^+, t\in \mathbb{R})\textrm{,} \end{equation*} where $H_0(t)=1$ and $H_1(t)=2t$. These classical orthogonal polynomials can be parametrized via dilation and translation: \begin{equation} \Phi_k(t;\tau,\lambda)=\sqrt{\lambda} \Phi_k(\lambda (t-\tau)), \label{eq:adapthsys} \end{equation} where \begin{equation} \Phi_k(t) = H_k(t)e^{-t^2/2}/\sqrt{\pi^{1/2}2^k k!} \qquad (k\in\mathbb{N}^+)\textrm{.} \label{eq:hermitefun} \end{equation} The functions $\Phi_k(t;\tau,\lambda)$ are the translated and dilated variations of the well-known Hermite functions, thus we refer to them as "adaptive Hermite functions". The forward propagation of the corresponding Hermite-VP layer can be defined by the matrix $\Phi(\theta)$ in Eq.~\eqref{eq:vpfunc}. For a given parameter value $\theta=(\tau,\lambda)$, the $k$-th column of $\Phi(\theta)$ is equal to the values of the $k$-th adaptive Hermite function evaluated at some predefined points $t_0, t_1,\ldots,t_{m-1}\subseteq[a;b]$, where $[a;b]$ stands for the sampling interval. In the case of proper discretization \cite{gautschi}, the columns of $\Phi(\theta)$ are pairwise orthogonal and unit vectors for all $\theta$; therefore, $\Phi^+(\theta)=\Phi^T(\theta)$, which speeds up the computation of both the forward and the backward passes. There are two strategies for choosing the discretization points: nonuniform and uniform sampling. The former relies on the Gauss--Hermite quadrature rules, which associates the points $t_0, t_1,\ldots,t_{m-1}\subseteq[a;b]$ with the roots of Hermite polynomials~\cite{gausshermite}. This approach is the most accurate way to define discrete orthogonal systems, but it requires both the precomputation of the roots and the resampling of the input signals at these nonequidistant points. Therefore, we consider the computationally simpler uniform discretization instead. This sampling scheme, although less accurate, satisfies discrete orthogonality, and thus the identity $\Phi^+(\theta)=\Phi^T(\theta)$ holds, provided that the number of sampling points $m$ is large enough, and $\theta\in\Gamma$, where \begin{align*} \Gamma= \left\{(\tau,\lambda)\in\mathbb{R}\times\mathbb{R}_+\;:\; \tau + \frac{3}{\lambda} \leq b, \quad \tau - \frac{3}{\lambda} \geq a\right\}\textrm{.} \label{eq:const_affin} \end{align*} If $\theta\notin\Gamma$, it can happen that the adaptive Hermite functions $\Phi_k(t,\tau,\lambda)$ are not discrete orthogonal anymore. In the worst-case scenario, they can be linearly dependent, which results in a rank deficient matrix $\Phi(\theta)$. In Fig.~\ref{fig:cond_fun}, we demonstrate this phenomenon by evaluating the condition number of $\Phi(\theta)\in\mathbb{R}^{m\times n}$ for $m=1000,\ n=3$, and for a range of parameters $\theta=(\tau,\lambda)\in [500,1100] \times [0.05,0.012]$. It can be seen that the condition number diverges from the ideal case (green dashed line) as we change $\tau$ and $\lambda$ irrespective of $\Gamma$. This can be avoided if we choose the parameters from the feasible region $\Gamma$. The rationale behind this behaviour is given in Appendix~A. \vspace{4mm} \input{./Figures/condnumHermite.tex} \subsection{VP backpropagation} \label{sec:vpbackprop} Let us discuss the training of a general feedforward NN in a supervised manner. Let \[ (x_i,y_i) \qquad (i=1,2,\dots,N) \] be the annotated input-target pairs of the training data, where the input vector $x_i \in \mathbb{R}^m$ and the target vector $y_i \in \mathbb{R}^s$ (in the case of regression) or the target label $y_i \in \mathbb{N}$ or probabilities $y_i \in [0,1]^c$ (in the case of classification). A general feedforward NN can be expressed as the composition of layer functions of the form \[ NN_{\boldsymbol\theta}(x)=\left(f_{\theta^{(L)}}^{(L)}\circ \ldots \circ f_{\theta^{(\ell)}}^{(\ell)} \circ \ldots \circ f_{\theta^{(2)}}^{(2)} \circ f_{\theta^{(1)}}^{(1)}\right)(x), \] where $x \in \mathbb{R}^m$ stands for the input samples, $f_{\theta^{(\ell)}}^{(\ell)}$ and $\theta^{(\ell)}$ denote the function and the parameters of layer $\ell$, respectively. The symbol $\boldsymbol\theta$ refers to the set of parameters $\theta^{(\ell)}$. The layer functions $f^{(\ell)}$ may refer to linear mappings, convolutional filters, nonlinear activations, pooling, VP operators, etc. Let \[ \hat{y}_i := NN_{\boldsymbol\theta}(x_i) \qquad (i=1,2,\dots,N) \] denote the predicted values for each input. The training of the network can be addressed as a minimization problem, involving a proper loss (i.e., cost) function $J$ that evaluates the error between predicted and target values. Common loss functions are the Mean Squared Error (MSE), that is, the least-squares cost function (regression problems, $y_i \in \mathbb{R}^s$), and the Binary Cross Entropy (BCE) loss (binary classification, $y_k \in \{0,1\}$): \begin{align*} J_{MSE}(\boldsymbol\theta) &:= \dfrac{1}{N}\sum\limits_{i=1}^N \|y_i - \hat{y}_i \|_2^2, \\ J_{BCE}(\boldsymbol\theta) &:= -\dfrac{1}{N}\sum\limits_{i=1}^N \left(y_i \log \hat{y}_i + (1-y_i) \log(1 - \hat{y}_i) \right). \end{align*} In our experiments, we used the Cross Entropy loss $J_{CE}$, which is the multi-class extension of BCE (classification, $y_k \in \mathbb{N}$); see also Section~\ref{sec:experiments}. The state-of-the-art method for training feedforward networks is backpropagation~\CH{added citation}\cite{rumelhart_learning_1987}, where $J$ is minimized by means of a stochastic gradient-descent optimization (see e.g. Adam~\cite{Adam}, Adagrad~\cite{Adagrad}, RMSprop~\cite{RMSprop}). \CH{start add some backprop infos / citations} There are multiple implementation of the propagation algorithm for different programming languages, target hardware platforms and machine learning frameworks~\cite{hung_parallel_1993, hung_oo_backprop_1994, paszke2017automatic, tensorflow2015-whitepaper}. \CH{end} The gradient descent update formula for each layer parameter is \[ \theta^{(\ell)} := \theta^{(\ell)} - \eta \dfrac{\partial J}{\partial \theta^{(\ell)}}, \] where $\eta > 0$ is called the learning rate. Briefly, backpropagation provides a recursive way of computing the gradients above based on the chain rule: \[ \dfrac{\partial J}{\partial f^{(\ell-1)}} = \dfrac{\partial J}{\partial f^{(\ell)}} \cdot \dfrac{\partial f^{(\ell)}}{\partial f^{(\ell-1)}}, \qquad \dfrac{\partial J}{\partial {\theta}^{(\ell)}} = \dfrac{\partial J}{\partial f^{(\ell)}} \cdot \dfrac{\partial f^{(\ell)}}{\partial {\theta}^{(\ell)}}. \] This way, only the partial derivatives of the layer function $f^{(\ell)}$ with respect to its input $(\partial f^{(\ell)}/\partial f^{(\ell-1)})$ and to its parameters $(\partial f^{(\ell)}/\partial {\theta}^{(\ell)})$ must be calculated. These derivatives are usually well known for the common layer types and can also be directly calculated for the VP layers. Based on Ref.~\refcite{golub_pereyra1973}, the partial derivatives of the VP operators with respect to their input and nonlinear parameters can be expressed as follows. In the case of a filtering-type VP layer (see Eq.~\eqref{eq:vplayerfilt}): \begin{align*} f^{\text{(vp)}}(x) &= \Phi(\theta)\Phi^+(\theta) x, \qquad \dfrac{\partial f^{\text{(vp)}}}{\partial x} = \left[\Phi(\theta)\Phi^+(\theta)\right]^T, \\ \dfrac{\partial f^{\text{(vp)}}}{\partial \theta_j} &= \dfrac{\partial \left[\Phi(\theta)\Phi^+(\theta)\right]}{\partial \theta_j} x, \end{align*} where \[ \partial\left[\Phi(\theta)\Phi^+(\theta)\right] = (I - \Phi\Phi^+) \partial \Phi \Phi^+ + \left[(I - \Phi\Phi^+) \partial \Phi \Phi^+\right]^T.\] In the case of a feature-extraction-type VP layer (see Eq.~\eqref{eq:vplayerfeat}): \begin{align*} f^{\text{(vp)}}(x) &= \Phi^+(\theta) x, \qquad \dfrac{\partial f^{\text{(vp)}}}{\partial x} = \left[\Phi^+(\theta)\right]^T, \\ \dfrac{\partial f^{\text{(vp)}}}{\partial \theta_j} &= \dfrac{\partial \Phi^+}{\partial \theta_j} x, \end{align*} where \begin{align*}\partial \Phi^+ = &-\Phi^+ \partial \Phi \Phi^+ + \Phi^+ \left[\Phi^+\right]^T \partial \Phi^T (I - \Phi\Phi^+) \\ &+ (I-\Phi^+\Phi) \partial \Phi^T \left[\Phi^+\right]^T \Phi^+. \end{align*} The naive implementation of the backpropagation, particularly in the case of DNNs, can lead to numerical issues, such as divergence and overfitting. In order to avoid this, a regularization term in the form of an $\ell_2$ penalty on the weight parameters is added to the loss \cite{Goodfellow-et-al-2016}. Here, we introduce a percent root-mean-square difference (PRD) regularization that can be applied to a single feature-extraction VP layer in the case of a classification problem. The modified loss function we propose is \begin{align*} &J_{VP}(\boldsymbol \theta) := J_{CE}(\boldsymbol \theta) + \dfrac{\alpha}{N}\sum\limits_{i=1}^N \dfrac{r_2(x_i; \theta^{\text{(vp)}})}{\|x_i\|_2^2} = \\ &J_{CE}(\boldsymbol \theta) + \dfrac{\alpha}{N}\sum\limits_{i=1}^N \dfrac{\left\|x_i - \Phi\left(\theta^{\text{(vp)}}\right)\Phi^+\left(\theta^{\text{(vp)}}\right) x_i\right\|_2^2}{\|x_i\|_2^2}, \end{align*} where $\alpha \ge 0$ controls the penalty effect. The motivation behind this regularization is twofold: First, it is based on the previous results that incorporate VP as feature extraction, which show that the precise VP approximation may lead to 'good' features and therefore to high classification accuracy. Second, we expect that the optimal VPNet classifier extracts the main characteristics of the input signals, which means that we presume 'good' approximation. This penalty term seemingly breaks the formulation of the backpropagation, but the original method can easily be extended by a bypass step that is applied to the VP layer only. The gradient with respect to the VP parameters is modified as follows: \[ \dfrac{\partial J_{VP}}{\partial \theta^{\text{(vp)}}} = \dfrac{\partial J_{CE}}{\partial \theta^{\text{(vp)}}} + \dfrac{\alpha}{N}\sum\limits_{i=1}^N \dfrac{1}{\|x_i\|_2^2} \cdot \dfrac{\partial r_2}{\partial \theta^{\text{(vp)}}}, \] where \[ \partial r_2 = -2 x_i^T (I-\Phi\Phi^+) \partial \Phi \Phi^+ x_i. \] We just developed the formulas for attaining the necessary gradient information for training VPNet via backpropagation. This allows us to train VPNets in the same way as convolutional and fully connected NNs. \section{Experiments} \label{sec:experiments} Using supervised classification problems inspired by particular biomedical signal-processing applications, we evaluated VPNet and compared it to fully connected and 1D convolutional networks. We present the details of the experiments, specifically about the network architectures, the VP system of choice, and the synthetic and real datasets. \subsection{Network architecture} \label{sec:architecture} Here we provide details about the networks we compared, the learning methods, and the network parameters. The networks were feedforward, consisting of the following layers: \begin{itemize} \item \emph{VPNet}: a VP layer, a fully connected (FC) layer with ReLU activation, an FC layer with SoftMax activation; \item \emph{Fully connected NN}: one or two FC layers with ReLU, an FC layer with SoftMax; \item \emph{CNN}: a 1D convolutional and pooling layer, an FC layer with ReLU, an FC layer with SoftMax. \end{itemize} For signal-classification tasks, the inputs were $\mathbb{R}^m$ samples and the outputs were interpreted as a probability distribution over predicted output classes. The FC layers performed linear mappings with nonlinear activation (ReLU or SoftMax). The VP layer was of the feature-extraction type (see Eq.~\eqref{eq:vplayerfeat}), and the CNN implemented 1D convolution and mean or maximum pooling as in Ref.~\refcite{serkan2016}. Based on cross entropy loss with VP regularization (see Section~\ref{sec:vpbackprop}), offline backpropagation with Adam optimizer \cite{Adam} was applied for learning. The hyperparameters and the parameter selection strategies were as follows: \begin{itemize} \item \emph{Learning parameters}: learning rate, VP penalty (VPNet only), batch size, and the number of epochs. The last two were fixed (512 and 10-100, respectively). The optimal learning rate and penalty can be found by a grid search. \item \emph{Network parameters}: number of layers, number of neurons, VP dimension $n$ (VPNet only), convolutional and pooling kernel sizes (CNN only). Here we either used fixed dimensions so that the three architectures are comparable or evaluated possible configurations by a grid search. \item \emph{Layer parameters}: linear weights and biases, nonlinear VP parameters (VPNet only), kernel weights and biases (CNN only). These parameters were optimized by backpropagation. Initialization was random for the linear and kernel parameters. However, the VP parameters have interpretable meaning, which may lead to special initialization. We investigated two options: a grid search on the intervals of possible values and initialization by means of pretraining the VP layer to reconstruct input data (i.e., minimizing $r_2$ in Eq.~\eqref{eq:vpfunc}). The latter approach is especially useful in the case of complex waveforms which possibly need more VP parameters to learn. \end{itemize} \begin{figure*}[!t] \begin{center} \includegraphics[width=0.97\textwidth]{./Figures/vplayer.png} \caption{VPNet architecture for QRS classification: the VP layer takes the whole signal as input, decomposes the QRS complexes into linear combinations of adaptive Hermite functions, and then forwards the coefficients of the Hermite components to the next fully connected layer.} \label{fig:hermitevpnet} \end{center} \end{figure*} \subsection{VP system of choice} \label{sec:hermite} Although Hermite functions have shown great potential in many fields, such as molecular biology \cite{molbio}, computer tomography \cite{ctapp}, radar \cite{hermiteradar}, and physical optics \cite{optics}, their main application area is 1D biomedical signal processing. The shape features of Hermite functions are well suited to producing models of compactly supported waveforms such as spikes \cite{hexp8, hexp4, hexp5, hexp1, hexp7}, which is why we used them in ECG heartbeat classification. The nonlinear parameters $\tau$ and $\lambda$ in Eq.~\eqref{eq:adapthsys} represent the time shift and the width of the modeled waveforms, respectively. Thus, the network learns the positions and the shapes of those waves/spikes which separate one class from another. For instance, in electrocardiography, a heartbeat signal comprises three individual waveforms (i.e.,\ the QRS, T, and P waves), which represent different phases of the cardiac cycle, and their properties are directly used by medical experts for diagnosis. These features are learned by the VP layer: The amplitude and shape information is extracted by the linear coefficients $c_k$, while position and width of the waves are represented by $\tau$ and $\lambda$ (see Fig.~\ref{fig:hermitevpnet}). This approach is essentially different from CNN-based methods, where no direct connections exist between learned and medical descriptors. \subsection{Synthetic data} Our goal was, on the one hand, to synthesize a dataset where we know the actual structure of the data depending on the generator parameters. On the other hand, the dataset had to have practical relevance (i.e., be related to actual signal-processing problems). The generator system of choice was the adaptive Hermite system, which seemed to fulfill these expectations due to its applications in signal processing (see Section~\ref{sec:hermite}). The principles we followed to generate the dataset are discussed below. Let us consider a general signal model by means of a linear combination of adaptive Hermite functions of the form \[ x_i = \Phi(\tau_i, \lambda_i) \cdot c^{(i)} = \sum\limits_{k=0}^{n-1} c^{(i)}_{k} \Phi_k(\tau_i, \lambda_i), \] where $(\tau_i, \lambda_i)$ and $c^{(i)}$ $(i = 1,2,\dots,M)$ refer to the sample-specific nonlinear parameters and coefficients, respectively. Based on the completeness of the Hermite system in $L^2(\mathbb{R})$, this formula provides a general approximation for arbitrary signals. However, the signal-processing applications of VP and the Hermite system show that proper selection of the nonlinear parameters may lead to accurate low-order approximations. Further investigation into this topic revealed that the nonlinear parameters correspond to coarse changes in the signal morphologies, while the coefficients reflect fine details~\cite{tbme_paper2}. For instance, we refer to Ref.~\refcite{ensembleECG}, where the nonlinear parameters where utilized as global, patient-specific and the coefficients as heartbeat-specific descriptors. Motivated by these aspects, we sought to construct a dataset where the nonlinear parameters are close to each other and the coefficients form noticeably separable classes. \begin{figure*}[!t] \vspace{-2em} \subfloat[][Coefficients]{\includegraphics[width=0.33\textwidth, trim=90 270 90 200, clip]{./Figures/synhermite_coeffs2.pdf}} \hfil \subfloat[][System parameters]{\includegraphics[width=0.33\textwidth, trim=90 270 90 200, clip]{./Figures/synhermite_params2.pdf}} \hfil \subfloat[][Samples]{\includegraphics[width=0.33\textwidth, trim=90 270 90 200, clip]{./Figures/synhermite_samples2.pdf}} \caption{Synthetic dataset: first, the coefficients~(a) and the parameters of the Hermite functions~(b) are generated; which are then used to compute the input samples~(c).} \label{fig:synhermite} \end{figure*} More precisely, we considered five coefficients (i.e.,\ $c^{(i)} \in \mathbb{R}^5$) so that the points $(c^{(i)}_{1},c^{(i)} _{2},c^{(i)}_{3}) \in \mathbb{R}^3$ formed three separable spherical shells that correspond to the target class labels (see Fig.~\ref{fig:synhermite}~(a)). The motivation behind spherical shells was twofold. They are simple enough for human interpretation, but sufficiently complex to require complex networks. The last two coefficients, $c^{(i)}_{4}$ and $c^{(i)}_{5}$, served as random factors and for amplitude normalization. Their effect is to mislead the classifier, but at the same time to decrease the chance of overfitting. The nonlinear parameters $\tau_i$ and $\lambda_i$ are similar for each sample up to a random factor, and the sample-specific parameter values are generated randomly with given mean and variance (see Fig.~\ref{fig:synhermite}~(b)). This random factor simulates the nonlinear noise in the measurement. Fig.~\ref{fig:synhermite}~(c) presents the samples. We conclude that the simulation met our expectations: the resulting samples were hard to separate, but the underlying structure was easy to interpret. Note that this is a standard process to generate synthetic data which was utilized by other authors as well~\cite{datagen}. In the actual implementation, 5000 samples per class were generated for both the training and test sets. We evaluated a total of more than 8000 possible hyperparameter configurations of the three network architectures. A range of numbers of neurons in the hidden layer, various numbers of VP dimensions, and various CNN kernel and pooling sizes, learning rates and VP initializations were considered. The VP penalty was initially fixed to $0.1$. The simulations showed that the VP regularization can not only increase the learning speed, but also ensure convergence of an otherwise divergent configuration. In this regard, $0.1$ was found to be a good choice. The aggregated results are presented in Fig.~\ref{fig:results} (a) and (b). There, the configurations are grouped into six categories: VPNets of dimension $n=7$ and $n=9$ in Eq.~\eqref{eq:vp}, fully connected NNs (FCNN), and CNNs with kernel sizes of 5, 15, and 25. Fig.~\ref{fig:results} (a) shows the training accuracy curves corresponding to the best hyperparameter combination in each category. In Fig.~\ref{fig:results}~(b) and (c), the best test accuracies are plotted against the number of neurons in the hidden layer and the total number of learnable network parameters, respectively, for each category. We note that the $y$-axis of Fig.~\ref{fig:results}~(b) is restricted to the interval between $95\%$ and $100\%$ for better visual interpretability. In the following, we compare the performance of VPNet with respect to different network complexities. \begin{figure*}[!t] \vspace{-2em} \subfloat[][Best training curves]{\includegraphics[width=0.33\textwidth]{./Figures/synhermite_training.pdf}} \hfil \subfloat[][Best test accuracies]{\includegraphics[width=0.33\textwidth]{./Figures/synhermite_results.pdf}} \hfil \subfloat[][Best test accuracies]{\includegraphics[width=0.33\textwidth]{./Figures/synhermite_results_p.pdf}} \caption{Evaluation on synthetic data} \label{fig:results} \end{figure*} The results demonstrate the efficiency and potential capabilities of VPNet. Fig.~\ref{fig:results}~(a) indicates its fast learning ability. In fact, VPNet may converge faster than the other network architectures. Fig.~\ref{fig:results}~(b) and (c) show that VPNet can potentially outperform FCNNs and CNNs in terms of the best accuracies on the test set. Although all architectures achieved accuracies close to 100\%, VPNet achieved this with low structural complexity, which refers not only to the number of neurons, but also to the total number of network parameters (see Fig.~\ref{fig:results}~(c)). In this regard, VPNet is superior, because with FCNNs and CNNs of the same effective receptive field, the number of parameters (i.e., the linear and kernel weights and biases) grows linearly with sample size and number of neurons. With VPNet, in contrast, the number of nonlinear parameters ($p=2$) is independent of sample size and output dimension. For the sake of clarity, we remark that the kernel size or the number of convolutional layers in a CNN do not necessarily depend on the input size. Although, in order to detect global morphologic behavior of signals (e.g. heartbeats), the CNN is expected to have a large enough effective receptive field, that requires larger kernels or multiple layers stacked together in a linear scale. See also Ref.~\refcite{cnnerf}. \begin{tablehere} \tbl{Evaluation on synthetic data: best test accuracies vs. number of parameters\label{tab:results}} {\begin{tabular}{lccc} \toprule \textbf{\#} & \textbf{VPNet} & \textbf{CNN} & \textbf{FCNN} \\ \colrule 30-39 & 85.86\% & & \\ 40-49 & 99.41\% & & \\ 50-59 & 99.57\% & & \\ 60-69 & 99.64\% & 71.65\% & \\ 70-79 & 99.87\% & 84.32\% & \\ 80-89 & 99.85\% & 93.33\% & \\ 90-99 & 99.94\% & 97.71\% & \\ 100-119 & 99.97\% & 98.86\% & \\ 120-139 & 99.98\% & 99.41\% & 81.14\%\\ 140-159 & 99.97\% & 99.77\% & \\ 160-179 & 99.96\% & 99.92\% & 97.01\%\\ 180-199 & & 99.90\% & \\ 200-239 & & 99.85\% & 98.34\%\\ 240-279 & & 99.89\% & 99.47\%\\ 280-319 & & 99.86\% & 99.65\%\\ 320-359 & & & 99.68\%\\ 360-399 & & & 99.77\%\\ 400-479 & & & 99.67\%\\ 480- & & & 99.91\%\\ \botrule \end{tabular}} \end{tablehere} In addition to Fig.~\ref{fig:results}~(c), the best test accuracies depending on the number of learnable parameters are given in Table~\ref{tab:results}. Here, the number of parameters are grouped into bins for easier interpretation. The results show that the VPNet outperforms the CNNs and FCNNs for each bin, and reaches peak performance earlier than the other two. Besides the numerical comparison, statistical hypothesis testing were also performed for each bin, if applicable. The differences between the best performing VPNets and CNNs are statistically significant by both paired-sample $t$-tests and McNemar's tests with significance level 5\%. \subsection{Real ECG data} We sought to prove the relevance of VPNet not only in simulation, but also using real signal-processing data. We chose a particular ECG signal-processing problem: classification of heartbeat arrhythmia (see Ref.~\refcite{aami}). The state of the art is supervised ML by traditional approaches (see Refs.~\refcite{ECGsurvey,ECGsurvey2,ECGsurvey3} and Section~\ref{sec:relwork}), including VP-based static feature extraction \cite{ensembleECG,ratECG,ratECGclass}. Here we focused on a related subproblem where we could compare the performance of the selected network configurations. In more detail, we investigated the separation of the two largest arrhythmia classes: normal and ventricular ectopic beats (VEBs). The source of the data is the benchmark MIT-BIH Arrhythmia Database \cite{MIT-BIH}, available from PhysioNet \cite{PhysioNet}. The database is split into sets DS1 and DS2 according to Ref.~\refcite{deChazal}, for training and inference, respectively. The whole database contains more than 100~000 annotated heartbeats, but it is heavily biased towards the normal class, that usually distorts the performance evaluation. Here, we investigated two cases for data acquisition. First, a balanced subset was extracted: all VEBs and the same number of normal beats from each record. This yielded 4260 plus 4260 heartbeat signals for training (set DS1), and 3220 plus 3220 signals for testing (set DS2). This balanced subset is expected to provide undistorted evaluation and fair comparison of the NN architectures. The second, unbalanced subset consists of all normal beats and VEBs of the whole database, yielding around 50~000 heartbeats for both training and testing. This unbalanced subset represents a more realistic scenario, and supports partial comparability to the state-of-the-art. Note that the DS1 and DS2 heartbeats come from different patients, which means that there is no data leakage in either cases. We used the preprocessing and heartbeat extraction methods discussed in Ref.~\refcite{ratECG}, but chose a window size of 100 samples ($\sim$ 0.28 s) around the R peak annotations. This window was expected to cover the whole QRS complex and potentially the PR and ST segments of each heartbeat. Example heartbeats of the two classes are displayed in Fig.~\ref{fig:heartbeats}. \begin{figurehere} \begin{center} \subfloat[][Normal]{\includegraphics[width=0.5\linewidth]{./Figures/ecg_train_n_2.pdf}} \subfloat[][VEB]{\includegraphics[width=0.5\linewidth]{./Figures/ecg_train_v_2.pdf}} \caption{Example heartbeats of the training set} \label{fig:heartbeats} \end{center} \end{figurehere} \begin{figure*}[!t] \vspace{-2em} \subfloat[][Normal beat]{\includegraphics[width=0.33\textwidth, trim=90 270 90 283, clip]{./Figures/normal1.pdf}} \hfil \subfloat[][Abnormal beat]{\includegraphics[width=0.33\textwidth, trim=90 270 90 283, clip]{./Figures/abnormal1.pdf}} \hfil \subfloat[][Abnormal beat]{\includegraphics[width=0.33\textwidth, trim=90 270 90 283, clip]{./Figures/abnormal3.pdf}} \caption{Output of a trained VP layer: for a normal beat~(a) and two abnormal beats~(b)-(c).} \label{fig:interpret} \end{figure*} To demonstrate the interpretability of the results, we depicted the response of a trained VP layer to three input QRS complexes in Fig.~\ref{fig:interpret}. It can be seen that the Hermite-VP layer learned in fact the position $\tau$ and the width $\lambda$ of the QRS complexes such that it gives an approximation (red) to the meaningful part of the original (blue) curves. In addition to the QRS complex, the input data window may include irrelevant information, such as baseline wander, noise, part of the P and the T waves. However, these irrelevant information are discarded due to the optimization of $\tau$ and $\lambda$, and thus only the meaningful part of the input signal is approximated at the end of the training. Consequently, the VP layer is likely to be more tolerant to noise as well. In fact, the Hermite-VP representation of ECG recordings can simultaneously cope with various noise sources such as baseline wander, and power-line interference~\cite{tbme_paper2}. The layer can also retain diagnostically important morphological information via the extracted coefficients. In Fig.~\ref{fig:interpret}, the red curve is equal to the linear combination of the Hermite functions, whose coefficients are the output of the VP layer. The magnitude of these coefficients indicate the presence of each elementary components in the signal. For instance, Fig.~\ref{fig:interpret}~(b) shows an asymmetric QRS complex, which is reflected in a high coefficient $c_2$ that corresponds to an odd Hermite function. In contrast, Fig.~\ref{fig:interpret}~(c) plots a highly symmetric QRS complex, which resembles to a Gaussian function indicated by the high value of $c_1$. Therefore, both the parameters $\tau,\ \lambda$ and the output $c_i$'s of the VP layer are interpretable. Note that the level of interpretability tends to decrease as we connect more and more hidden layers to the network. \CH{add a sentence here interpreterbility} The reason behind is that the whole network does not seek to reconstruct the parameters with which the data where constructed but it rather searches for the parameters that maximize the distinctness of the classes. Since the term presented in~\ref{sec:vpbackprop} penalizes the model for not reconstructing the original signal, a larger value for $\alpha$ mitigates the decreased interpretability. \CH{end} However, the VP layer provides a fully transparent feature extractor, which directly influences the output of the network due to the least-squares penalty in the modified loss function $J_{VP}$. Therefore, a trained VP layer can be used to improve the generalization properties of DNNs by synthesizing more realistic data samples in the learned feature space~\cite{dataaug, feataug}. \begin{table*}[!b] \tbl{Performance evaluation on real data\label{tab:ecg_results}} {\begin{tabular}{ccccccc} \toprule \multicolumn{2}{c}{\multirow{2}{*}{\textbf{Case/Method}}} & \textbf{Total} & \multicolumn{2}{c}{\textbf{Normal}} & \multicolumn{2}{c}{\textbf{VEB}} \\ && \textbf{accuracy} & \textbf{$Se$} & \textbf{$+P$} & \textbf{$Se$} & \textbf{$+P$} \\ \colrule \multirow{3}{*}{Balanced} & VPNet & 96.65\% & 99.38\% & 94.23\% & 93.91\% & 99.34\%\\ & FCNN & 94.38\% & 93.79\% & 94.91\% & 94.97\% & 93.86\% \\ & CNN & 96.34\% & 97.76\% & 95.05\% & 94.91\% & 97.70\%\\ \colrule \multirow{3}{*}{Unbalanced} & VPNet & 98.45\% & 99.57\% & 98.78\% & 83.07\% & 93.37\%\\ & FCNN & 97.49\% & 98.50\% & 98.81\% & 83.70\% & 80.21\% \\ & CNN & 98.35\% & 99.39\% & 98.85\% & 84.07\% & 90.93\%\\ \colrule \multicolumn{2}{c}{\textit{State-of-the-art} \cite{ECGsurvey}} & N/A & 80--99\% & 85--99\% & 77-96\% & 63--99\% \\ \botrule \end{tabular}} \end{table*} The performace of VPNet was measured in a similar way as in the synthetic case, with more than 3500 possible hyperparameter configurations examined. The aggregated results are presented in Fig.~\ref{fig:ecg_results}~(a) and (b), for the balanced and unbalanced case, respectively. Here, the FCNN and CNN cases were restricted so that the output dimensions of the first layer were similar to the VP dimensions, and only the number of neurons in the hidden layer were varied. Note that VPNet again required a remarkably low number of network parameters. We also evaluated another, larger FCNN configuration (FCNN++), where the number of neurons in the first layer was not restricted to that of the VP dimension $n$, but had the same number as in the second, hidden layer. The structure and distribution of training and test data were more complex than in the synthetic case, which clearly made the classification task more difficult for all network architectures. Again, we conclude that VPNet can outperform FCNNs and CNNs for low-complexity networks. Note that VPNet reaches peak performance at low network complexity (at low number of hidden neurons, i.e. at low number of system parameters), and the performance starts to decrease early if we increase the complexity. This behaviour is slightly different for CNNs and FCNNs. A possible reason behind is that the first layer of the VPNet acts as a model-based feature extraction, i.e. provides a low-dimensional sparse representation of the input (4 or 8 features for 100 samples). Increasing the complexity of the fully-connected layers of VPNet without increasing the VP parameters or features will lead to over-parametrization and overfitting. \begin{figure*}[!t] \vspace{-2em} \hfil \subfloat[][Balanced subset]{\includegraphics[width=0.33\textwidth]{./Figures/ecg_results.pdf}} \hfil \subfloat[][Unbalanced subset]{\includegraphics[width=0.33\textwidth]{./Figures/ecg_results_full.pdf}} \hfil \caption{Evaluation on real data, best test accuracies} \label{fig:ecg_results} \end{figure*} In addition to the total accuracies, the usual performance metrics are also provided in Table~\ref{tab:ecg_results}. Namely, sensitivity/precision ($Se$) and positive predictivity/recall ($+P$) was evaluated for each classes, as \begin{equation} Se = \frac{TP}{TP+FN} \quad\text{and}\quad +P = \frac{TP}{TP+FP}, \end{equation} where $TP$, $FP$, and $FN$ are the true positive, false positive, and false negative matches, respectively. Reference intervals of the state-of-the-art are also given according to the survey Ref.~\cite{ECGsurvey}. We note that the direct comparison is not always possible, since most of these results refer to a 3 or 5 class classification of the database. \section{Conclusion} \label{sec:conc} We developed a novel model-driven NN which incorporates expert knowledge via variable projections. The VP layer is a generic, learnable feature extractor or filtering method that can be adjusted to several 1D signal-processing problems by choosing an application-specific function system. The proposed architecture is simple, which means it has only a few, interpretable parameters. Our case studies showed that VPNet can achieve similar or slightly better classification accuracy than its fully connected and CNN counterparts while using a smaller number of parameters. In our tests, the convergence of the VPNet was slightly better than that of the CNN and the FCNN counterparts. However, the VP layer required only two parameters for learning in all cases, whereas the number of weights and biases for the FCNN and CNN grew linearly with the length of the input signals. These results show that VPNet can be applied effectively to various problems in 1D signal processing including classification, regression, and clustering, which we will investigate as part of future work. \section*{Broader Impact} We have proposed a new compact and interpretable neural network architecture that can have a broader impact in mainly two fields: machine learning and signal processing. The key idea is to create a network that combines the representation abilities of variable projections and the prediction abilities of NNs in the form of a composite model. This concept can be generalized to other machine learning algorithms. For instance, VP-SVM, and other combined VP methods, such as VP-K-means and VP-C-means, can extend the potential areas of application, including classification, regression, and clustering problems. Since the nonlinear parameters of the VP layer are interpretable, they can also be used in feature-space augmentation, where new data is generated from existing one in order to improve the generalization properties of DNNs \cite{dataaug, feataug}. Signal-processing aspects of VPNet were discussed in the ECG heartbeat classification case study. Additionally, VPNet may have great potential in a wide range of applications especially where VP has proven to be an efficient estimation method (cf. Section~\ref{sec:hermite}). Note that many already existing adaptive signal models have been reformulated as VP problems \cite{golub_pereyra2003, SNLLS2021}; however, parameterized wavelets \cite{burrus} have not yet been studied in this context. Therefore, we encourage researchers to study this class of wavelets in the framework of VPNet. Model-driven neural network solutions can have a great impact in biomedical engineering and healthcare informatics, where medical data classification alone is usually not enough, as physiological interpretation and explainability of the results are also important. However, special care should be taken to avoid automation bias when these approaches are applied to real-world problems \cite{autbias}. These clinical decision-support systems are difficult to validate, since this requires medical expertise and vast amounts of data. The latter is naturally unbalanced in the sense that one class of signals (e.g.\ from healthy patients), is overrepresented compared to the others. In order to address these potential biases, VPNet should be tested in various scenarios that include, for instance, noisy and incomplete measurements, or unbalanced data. \nonumsection{Acknowledgments} \noindent This work has been supported by the “University SAL Labs” initiative of Silicon Austria Labs (SAL) and its Austrian partner universities for applied fundamental research for electronic based systems; and the COMET-K2 "Center for Symbiotic Mechatronics" of the Linz Center of Mechatronics (LCM), funded by the Austrian federal government and the federal state of Upper Austria; and EFOP-3.6.3-VEKOP-16-2017-00001: Talent Management in Autonomous Vehicle Control Technologies – The Project is supported by the Hungarian Government and co-financed by the European Social Fund. This paper was supported by the J\'anos Bolyai Research Scholarship of the Hungarian Academy of Sciences. \nonumsection{Data Availability} \noindent The data and code that support the findings of this study are available at Ref.~\refcite{vpnetgit}.
1,116,691,501,389
arxiv
\section{Introduction} In recent years, integral field spectrographs (IFSs) have largely substituted long-slit spectrographs in studies designed to characterize the abundance distribution of chemical elements in external galaxies. IFSs have permitted for the first time to measure abundances throughout the entire two-dimensional extent of a galaxy (or a large part thereof) and, thus, to detect azimuthal and radial trends (Vogt et al. 2017). In the last years, several observational works have been found evidence of significant azimuthal variations in the abundance gradients in external galaxies. S{\'a}nchez et al. (2015) and S{\'a}nchez-Menguiano et al. (2016) analyzed in detail the chemical inhomogeneities of the external galaxy NGC 6754 with the Multi Unit Spectroscopic Explorer (MUSE), concluding that the azimuthal variations of the oxygen abundances are more evident in the external part of the considered galaxy. Vogt et al. (2017) studied the galaxy HCG 91c with MUSE and arrived to the conclusion that the enrichment of the interstellar medium has proceeded preferentially along spiral structures, and less efficiently across them. Azimuthal variations have been detected in the oxygen abundance also in the external galaxy M101 by Li et al. (2013). Ho et al. (2017) presented the spatial distribution of oxygen in the nearby spiral galaxy NGC 1365. This galaxy is characterized by a negative abundance gradient for oxygen along the disc, but systematic azimuthal variations of $\sim$ 0.2 dex occur over a wide radial range of galactic radii and peak at the two spiral arms in NGC 1365. In the same work, the authors presented a simple chemical evolution model to reproduce the observations. Azimuthal variations can be explained by two physical processes: after a local self enrichment phase in the inter-arm region, a consequent mixing and dilution phase si dominant on larger scale (kpc scale) when the spiral density waves pass through. Probing azimuthal inhomogeneities of chemical abundances has been attempted in the Milky Way system too. Balser et al. (2011), measuring H II region oxygen abundances, found that the slopes of the gradients differ by a factor of two in their three Galactic azimuth angle bins. Moreover, significant local iron abundance inhomogeneities have also been observed with Galactic Cepheids (Pedicelli et al. 2009; Genovali et al. 2014). Balser et al. (2015) underlined the importance of azimuthal metallicity structure in the Milky Way disc making for the first time radio recombination line and continuum measurements of 21 HII regions located between Galactic azimuth $\phi$=90$^{\circ}$- 130$^{\circ}$. The radial gradient in [O/H] is -0.082 $\pm$ 0.014 dex kpc$^{-1}$ for $\phi$=90$^{\circ}$- 130$^{\circ}$, about a factor of 2 higher than the average value between $\phi$=0$^{\circ}$- 60$^{\circ}$. It was suggested that this may be due to radial mixing from the Galactic Bar. Analyzing the Scutum Red-Supergiant (RSG) clusters at the end of the Galactic Bar, Davies et al. (2009) concluded that a simple one-dimensional parameterisation of the Galaxy abundance patterns is insufficient at low Galactocentric distances, as large azimuthal variations may be present. Combining these results with other data in the literature points towards large-scale ( $\sim$ kpc) azimuthal variations in abundances at Galactocentric distances of 3-5 kpc. It thus appears that the usual approximation of chemical evolution models assuming instantaneous mixing of metallicity in the azimuthal direction is unsubstantiated. Azimuthal abundance gradients due to radial migration in the vicinity of spiral arms in a cosmological context have been studied in detail by Grand et al. (2012, 2014, 2016), and S{\'a}nchez-Menguiano et al. (2016). Alternatively, Khoperskov et al. (2018) investigated the formation of azimuthal metallicity variations in the discs of spiral galaxies in the absence of initial radial metallicity gradients . Using high-resolution N -body simulations, they modeled composite stellar discs, made of kinematically cold and hot stellar populations, and study their response to spiral arm perturbations. They found that azimuthal variations in the mean metallicity of stars across a spiral galaxy are not necessarily a consequence of the reshaping, by radial migration, of an initial radial metallicity gradient. They indeed arise naturally also in stellar discs which have initially only a negative vertical metallicity gradient. The aim of this paper is to develop a detailed 2D galactic disc chemical evolution model, able to follow the evolution of several chemical elements as in previous 1D models, but also taking into account azimuthal surface density variations. In this the paper when we refer to the thin and thick discs we mean the low- and high-[$\alpha$/Fe] sequences in the [$\alpha$/Fe]-[Fe/H] plane. Defining the thin and thick discs morphologically, rather than chemically, identifies a mixture of stars from both the low- and high-[$\alpha$/Fe] sequences, and vise versa (Minchev et al. 2015, Martig et al. 2016). It is, therefore, very important to make this distinction and avoid confusion. We follow the chemical evolution of the thin disk component, i.e. the low-$\alpha$ population. We assume that the oldest stars of that low-$\alpha$ component are associated with ages of $\sim$ 11 Gyr, in agreement with asteroseismic age estimates (Silva Aguirre et al. 2018). Starting from the classical 1D Matteucci \& Fran{\c c}ois (1989) approach (the Galactic disc is assumed to be formed by an infall of primordial gas) we included 2D surface density fluctuation in the Milky Way disc chemo-dynamical model by Minchev et al. (2013) (hereafter MCM13), as well as using analytical spiral arm prescriptions. Our paper is organized as follows. In Section 2, we describe the framework used for the new model. In Section 2.1 the adopted nucleosynthesis prescriptions are reported. In Section 2.2 the density fluctuation from the chemo-dynamical model by MCM13 are indicated. In Section 2.3 we present the analytical expressions for the density perturbations due to Galactic spiral arm. In Section 3 we presents our results with the density fluctuation from chemo-dynamical models and with an analytical spiral arm prescription are reported. Finally, our conclusions are drawn in Section 4. \section{A 2D galactic disc chemical evolution model} The basis for the 2D chemical evolution model we develop in this section is the classical 1D Matteucci \& Fran{\c c}ois (1989) approach, in which the Galactic disc is assumed to be formed by an infall of primordial gas. The infall rate for the thin disc (the low-$\alpha$ sequence) of a certain element $i$ at the time $t$ and Galactocentric distance $R$ is defined as: \begin{equation}\label{infall} B(R,t,i)= X_{A_i} \, b(R) \, e^{-\frac{t}{\tau_D(R)}}, \end{equation} where $X_{A_i}$ is the abundance by mass of the element $i$ of the infall gas that here is assumed to be primordial, while the quantity $\tau_D(R)$ is the time-scale of gas accretion. The coefficient $b(R)$ is constrained by imposing a fit to the observed current total surface mass density $\Sigma_{D}$ in the thin disc as a function of the Galactocentric distance given by: \begin{equation} \Sigma_D(R,t_G)=\Sigma_{D,0}e^{-R/R_{D}}, \label{mass} \end{equation} where $t_G$ is the present time, $\Sigma_{D,0}$ is the central total surface mass density and $R_{D}$ is the disc scale length. The fit of the $\Sigma_D(R)$ quantity using the infall rate law of eq. (\ref{infall}) is given by: \begin{equation} \sum_i \int_0^{t_G} X_{A_i} b(R) e^{-\frac{t}{\tau_D(R)}} dt = \Sigma_D (R,t_G), \end{equation} The observed total disc surface mass density in the solar neighbourhood is $\Sigma_D (8 \mbox{ kpc}, t_G)=$ 54 M$_{\odot}$ pc$^{-2}$ (see Romano et al. 2000 for a discussion of the choice of this surface density). The infall rate of gas that follows an exponential law is a fundamental assumption adopted in most of the detailed numerical chemical evolution models in which the instantaneous recycling approximation (IRA) is relaxed. An important ingredient to reproduce the observed radial abundance gradients along the Galactic disc is the inside-out formation on the disc (Spitoni \& Matteucci 2011, Cescutti et al. 2007, Mott et al. 2013). The timescale $\tau_D(R)$ for the mass accretion is assumed to increase with the Galactic radius following a linear relation given by (see Chiappini et al. 2001): \begin{equation} \tau_{D}(R) = 1.033 R(\mbox{kpc}) - 1.27 \mbox{ Gyr} \label{tau} \end{equation} for Galactocentric distances $\geq$ 4 kpc. For the star formation rate (SFR) we adopt a Kennicutt (1998) law proportional to the gas surface density: \begin{equation} \Psi(R,t) = \nu \Sigma_g^k(R,t), \label{SFR} \end{equation} where $\nu$ is the star formation efficiency (SFE) process and $\Sigma_g(R,t)$ is the gas surface density at a given position and time. The exponent $k$ is fixed to 1.5 (see Kennicutt 1998). We divide the disc into concentric shells 1 kpc wide in the radial direction. Each shell is itself divided into 36 segments of width $\ang{10}$. Therefore at a fixed Galactocentric distance 36 zones have been created. With this new configuration we can take into account variations of the SFR along the annular region, produced by density perturbations driven by spiral arms or bars. Therefore, an azimuthal dependence appears in eq. (\ref{SFR}) and, which can be written as follows: \begin{equation} \Psi(R,t,\phi) = \nu \Sigma_g^k(R,t,\phi). \label{SFR2} \end{equation} In this paper we will show results related to the effects of density fluctuations of the chemo-dynamical model of MCM13 and we will test the effects of an analytical formulation for the density perturbations created by spiral arm waves. The reference model without any density azimuthal perturbation is similar to the one by Cescutti et al. (2007), which as been shown to be quite successful in reproducing the most recent abundance gradients observed in Cepheids (Genovali et al. 2015). \subsection{Nucleosynthesis prescriptions} In this work we present results for the azimuthal variations of abundance gradients for oxygen and iron. As done in a number of chemical evolution models in the past (e.g. Cescutti et al. 2006, Spitoni et al. 2015, 2019, Vincenzo et al. 2019), we adopt the nucleosynthesis prescriptions by Fran{\c c}ois et al. (2004) who provided theoretical predictions of [element/Fe]-[Fe/H] trends in the solar neighbourhood for 12 chemical elements. Fran{\c c}ois et al. (2004) selected the best sets of yields required to best fit the data (details related to the observational data collection are in Fran{\c c}ois et al. 2004). In particular, for the yields of Type II SNe they found that the Woosley \& Weaver (1995) ones provide the best fit to the data: no modifications are required for the yields of iron, as computed for solar chemical composition, whereas for oxygen, the best results are given by yields computed as functions of the metallicity. The theoretical yields by Iwamoto et al. (1999) are adopted for the Type SNeIa, and the prescription for single low-intermediate mass stars is by van den Hoek \& Groenewegen (1997). Although Fran{\c c}ois et al. (2004) prescriptions still provide reliable yields for several elements, we must be cautious about oxygen. Recent results have shown that rotation can influence the oxygen nucleosynthesis in massive stars (Meynet \& Meader 2002) and therefore chemical evolution (Cescutti \& Chiappini 2010), in particular at low metallicity. However, this does not affects our results being the data shown in this project relatively metal rich. Moreover, we are mostly interested in differential effects, rather than absolute values. \subsection{2D disc surface density fluctuations from the MCM13 model} We consider the gas density fluctuations present in the Milky Way like simulation obtained by Martig et al. (2012) and chosen in MCM13 for their chemodynamical model. The simulated galaxy has a number of properties consistent with the Milky Way, including a central bar. MCM13 followed the disc evolution for a time period of about 11 Gyr, which is close to the age of the oldest low-$\alpha$ disc stars in the Milky Way. The classical 1D chemical evolution model is quite successful in reproducing abundance gradient along the Galactic disc (Cescutti et al. 2007). The chemical evolution model used by MCM13 was very similar to the one adopted here; a comparison between its star formation history and that of the simulation was presented in Fig. A.1 by Minchev et al. (2014), showing good agreement. To extracted the gas density variations we binned the disk into 18 1-kpc-wide radial bins and 10$^{\circ}$-wide azimuthal bins at $|z|<$ 1 kpc. The time resolution is 37.5 Myr for 11 Gyr of evolution. All of the above is used for our new model described below. With the aim of preserving the general trend of the 1D chemical evolution model, we introduce a density contrast function $f$ related to the perturbations originated by the MCM13 model. At a fixed Galactocentric distance $R$, time $t$ and azimuthal coordinate $\phi$, the new surface mass density is: \begin{equation} \Sigma_D(R,t,\phi)=\Sigma_D(R,t) f(\phi,R,t). \end{equation} We impose that the average value of the density contrast $f$ is 1, i.e.: \begin{equation} \langle f(\phi,R,t) \rangle_{\phi}=1. \end{equation} This guarantees that, at a fixed Galactocentric distance $R$ and a time $t$, the average surface mass density is the one predicted by the 1 D chemical evolution model. \subsection{ISM density fluctuations from analytical spiral structure} Here we investigate the effect of an analytical spiral arm formulation on the azimuthal variations of the abundance gradients. In particular, we analyse steady wave spiral patterns. As suggested by Bertin et al (1989) and Lin \& Shu (1966) when the number of important spiral modes of oscillation is small, the spiral structure is expected to have a highly regular grand design and to evolve in time in a quasi- stationary manner. In this work, we consider the model presented by Cox \& G{\'o}mez (2002). The expression for the time evolution of the density perturbation created by spiral arms, referred to an inertial reference frame not corotating with the Galactic disk, in terms of the surface mass density is: \begin{equation} \Sigma_S(R,\phi,t)= \chi(R,t_G) M(\gamma), \end{equation} where $\chi(R,t_G)$ is the present day amplitude of the spiral density: \begin{equation} \chi(R,t_G)=\Sigma_{S,0} e^{-\frac{R-R_0}{R_{S}}}, \end{equation} while $M(\gamma)$ is the modulation function for the ``concentrated arms'' given by Cox \& G{\'o}mez (2002). The $M(\gamma)$ function can be expressed as follows: \begin{equation} M(\gamma)= \left(\frac {8}{3 \pi} \cos(\gamma)+\frac {1}{2} \cos(2\gamma) +\frac{8}{15 \pi} \cos(3\gamma) \right), \label{MGAMMA} \end{equation} \begin{equation} \gamma(R,\phi,t)= m\left[\phi +\Omega_s t -\phi_p(R_0) -\frac{\ln(R/R_0)}{\tan(\alpha)} \right]. \label{gamma} \end{equation} In eq. (\ref{gamma}), $m$ is the number of spiral arms, $\alpha$ is the pitch angle, $R_S$ is the radial scale-length of the drop-off in density amplitude of the arms, $\Sigma_{0}$ is the surface arm density at fiducial radius $R_0$, $\Omega_s$ is the pattern angular velocity, with the azimuthal coordinate $\phi$ increasing counter-clockwise and a clockwise rotation, $\phi_p(R_0)$ is the coordinate $\phi$ computed at $t$=0 Gyr and $R_0$. An important feature of such a perturbation is that its average density at a fixed Galactocentric distance $R$ and time $t$ is zero, \begin{equation} \langle \Sigma_S \rangle_{\phi}= \Sigma_{S, 0} e^{-\frac{R-R_0}{R_{S}}} \langle M(\gamma) \rangle_{\phi}=0. \end{equation} In Fig. \ref{MGAMMAF} we show the modulation function $M(\gamma)$ of ``concentrated arms'' on the Galactic plane using the model parameters suggested by Cox \& G{\'o}mez (2002): $R_0=8$ kpc, $\alpha=\ang{15}$, $R_S=7$ kpc. The modulation function is computed at 5 Gyr assuming the angular velocity value of $\Omega_s$ = 20 km s$^{-1}$ kpc$^{-1}$ and $\phi_p(R_0)=0$. In this work we aim to investigate the effects of spiral arm density perturbations on the chemical enrichment by ejecta from stellar populations perfectly corotating with the Galactic disk. Our purpose here is the study of regular gas density perturbation linked to simple but reliable spiral arm descriptions. To properly describe the temporal evolution of local density perturbations, the relative spiral arm speed pattern compared to the Galactic disk motion must be computed (further details will be provided in Section 3.2, in the Result discussion). Cox \& G{\'o}mez (2002) provided a value for the spiral arm perturbation density at 8 kpc equal to $\rho_0= \frac{14}{11}$ m$_H$ cm$^{-3}$. Our implementation requires the surface density $\Sigma_{S,0}$, which can be recovered from the $z$ direction amplitude provided by Cox \& G{\'o}mez (2002) (their eq. 1), with the following relation: \begin{equation} \Sigma_{S,0}=2 \rho_0 \int_0^\infty \mbox{sech}^2\left(\frac{x}{H}\right)dx=2 H \rho_0, \end{equation} where $H$ is the disc scale-height. Adopting $H$=180 pc (chosen to match the scale-height of the thin stellar disc proposed by Dehnen \& Binney 1998, Model2; and in agreement with Spitoni et al. 2008) we obtain \begin{equation} \Sigma_{S,0}=21.16 \mbox{ M}_{\odot} \mbox{ pc}^{-2}. \end{equation} It is important to underline that in our approach the time dependence of the density perturbation by the spiral arms is only in the modulation function $M(\gamma)$ through the term $\Omega_s t$ (see eqs. \ref{MGAMMA} and \ref{gamma}). Currently, there are no analytical prescriptions for the time evolution of both the amplitude of the spiral arm perturbation and its radial profile in the Galactic evolution context (spiral arm redshift evolution). Therefore, we make the reasonable assumption that during the Galactic evolution the ratio between the amplitude of the spiral density perturbation $\chi(R,t)$ and the total surface density $\Sigma_D(R,t)$ computed at the same Galactic distance $R$ remains constant in time, i.e. $ \frac{d}{dt} \, \left[ \chi(R,t)/\Sigma_D(R,t) \right]$=0, assuming a coeval evolution of these two structures in time. We define the dimensionless quantity $\delta_S(R,\phi,t)$ as the following ratio: \begin{equation} \delta_S(R,\phi,t)= \frac{ \Sigma_S(R,\phi,t)+ \Sigma_D(R,t)}{\Sigma_D(R,t)}=1 + \frac{ \Sigma_S(R,\phi,t)}{\Sigma_D(R,t)}. \label{delta} \end{equation} With the assumption that the ratio $\chi(R,t)/\Sigma_D(R,t)$ is constant in time, eq. (\ref{delta}) becomes: \begin{equation} \delta_S(R,\phi,t) =1 + M(\gamma)\frac{ \chi(R,t_G)}{\Sigma_D(R,t_G)}. \label{delta2} \end{equation} If we include the contribution of the perturbation originated by spiral arm in the SFR driven by a linear Schmidt (1959) law (i.e. $\Psi= \nu \Sigma_g(R,t)$) we have that: \begin{equation} \Psi(R,t,\phi)_{d+s} = \nu \Sigma_g(R,t) \delta_S(R,\phi,t). \label{SFR_D} \end{equation} We are aware that this is a simplification to the more complex behavior seen in N-body simulations (Quillen et al. 2011, Minchev et al. 2012b, Sellwood and Carlberg 2014) and external galaxies (Elmegreen et al. 1992; Rix \& Zaritsky 1995; Meidt et al. 2009), where multiple spiral patterns have been found. We will make use of this description in Section 3.2.2, where we will consider the simultaneous perturbation by a number of spiral patterns moving at different angular velocities. \begin{figure} \includegraphics[scale=0.5]{Figure_1_5gyr.png} \caption{The modulation function $M(\gamma)$ of eq. (\ref{MGAMMA}) for concentrated arms by Cox \& G{\'o}mez (2002) with $N=2$ spiral arms, fiducial radius $R_0=8$ kpc, pitch angle $\alpha=\ang{15}$, and $\phi_p(R_0)=0$.} \label{MGAMMAF} \end{figure} \begin{figure*} \centering \includegraphics[scale=.48]{ces_log.png} \caption{The galactic disc SFR in units of M$_{\odot}$ pc$^{-2}$ Gyr$^{-1}$ computed at 0.1, 0.7, 6, 11 Gyr after the start of disc formation, for the chemical evolution model in which we tested the effects of the density fluctuations resulting from the MCM13 model.} \label{mSFR} \end{figure*} \begin{figure*} \centering \includegraphics[scale=.48]{ces2.png} \caption{ The galactic disc SFR normalized to the maximum value SFR$_{R, ,\ max}$ of the annular region located at the Galactocentric distance $R$, i.e SFR($R$, $\phi$)/SFR$_{R, max}$, computed computed at 0.1, 0.7, 6, 11 Gyr after the start of disc formation, for the chemical evolution model in which we tested the effects of the density fluctuations by MCM13 model.} \label{1SFR} \end{figure*} As stated in the previous Section, the average modulation function over the azimuth $\phi$ at a fixed time $t$ and Galactocentric distance $R$ is null ($\langle M(\gamma) \rangle_{\phi}=0$). Therefore, in presence of a linear Schmidt (1959) law at a fixed Galactocentric distance the average value of $\Psi(R,t,\phi)_{d+s}$ over $\phi$ of the SFR defined in eq. (\ref{SFR_D}) is equal to the unperturbed SFR by the following expression: \begin{displaymath} \langle\Psi(R,t,\phi)_{d+s}\rangle_{\phi} =\Psi(R,t) \langle 1 + M(\gamma)\frac{ \chi(R)}{\Sigma_D(R,t_G)}\rangle_{\phi}= \end{displaymath} \begin{equation} =\Psi(R,t)\left(1+\langle M(\gamma) \rangle_{\phi}\frac{ \chi(R)}{\Sigma_D(R,t_G)}\right)=\Psi(R,t). \end{equation} Here, we do not adopt a linear Schmidt (1959) law, and we use the SFR proposed by Kennicutt (1998) which exhibits the exponent $k$=1.5. Hence, the SFR in the Galactic disc in presence of spiral arm density perturbations becomes: \begin{equation} \Psi_k(R,t,\phi)_{d+s} = \nu \Sigma_g(R,t)^k \delta_S(R,\phi,t)^{k}. \label{SFR_k} \end{equation} Roberts (1969) provided the exact shape of the steady gas distribution in spiral arms, finding an offset between the maximum of the stellar spiral arm and the maximum of the gas distribution driven by galactic shocks. In his Figure 7, it is shown that the regions of newly born luminous stars and the HII regions lie on the inner side of the observable gaseous spiral arm of HI. The presence of a small but noticeable offset between the gas and stellar spiral arms has been also found in the study of interactions between disc galaxies and perturbing companions in 3D N-body/smoothed hydrodynamical numerical simulations by Pettitt (2006). Because of uncertainties related to the real magnitude of this offset (small offsets are predicted by Pettitt 2006), in our work we do not consider it, and the SFR is more enhanced in correspondence of the total density perturbation peak (see eq. \ref{delta2} and the modulation function in Figure 1). We are aware that is true only near the corotation radius, however with our simpler approach we provide an upper limit estimate for the azimuthal abundance variations generated by steady spiral arms density perturbations. In presence of an off-set the density perturbation should be less ``concentrated'' and more smeared. Our model in the presence of analytical spiral arms must be considered as a first attempt to include spiral structure in a classical chemical evolution model. As stated in Section 2.2, we will also present results for the azimuthal abundance variations originated by chemodynamical Milky Way like simulation in the presence of spiral arms and bar in a self consistent way. Our analytical spiral arms model is meant to break down the problem to understand the reason for the causes of azimuthal variations. Assuming that modes add linearly, we can approximate a realistic galactic disk by adding several spiral sets with different pattern speeds, as seen in observations (e.g., Meidt et al. 2009) and simulations (e.g., Masset \& Tagger 1997, Quillen et al. 2011, Minchev et al. 2012a). \begin{figure*} \centering \includegraphics[scale=0.58]{BE.png} \includegraphics[scale=0.58]{AAE.png} \caption{Results for the chemical evolution model in which we consider the density fluctuation by the chemo-dynamical model by MCM13. {\it Upper Panel}: residuals of the ISM oxygen abundances as a function of Galactic azimuth computed with our chemical evolution model at 4, 8, 12, 16, and 18 kpc after subtracting the average radial gradient. {\it Lower Panel}: our mock observation to mimic S{\'a}nchez et al. (2015) results, in which we randomly plot residual ISM oxygen abundance predicted by our chemical evolution at 4, 8, 12, 16, and 18 kpc adding an uncertainty of 5$^{\circ}$ in the azimuthal component and taking into account [O/H] errors according to S{\'a}nchez et al. (2015); the color code is identical to line colors of the upper panel: innermost disc regions are associated with the green points, the outermost ones with the purple points. } \label{minchev_av} \end{figure*} \begin{figure*} \centering \includegraphics[scale=0.45]{GRAD.png} \caption{Results for the chemical evolution model in which we consider the density fluctuation by the chemo-dynamical model by MCM13. The present day Fe abundance gradient computed at different azimuthal coordinates. The shaded grey area limits are related to the maximum and minimun iron abundance values at the different Galactocentric distances. Observational data (light blue circles) are the Cepheids collected by Genovali at al. (2014). With the empty pentagons we report the average abundance values and relative errors of Genovali et al. (2014) when divided into six radial bins. In the zoomed region are presented the model lines computed between 16 and 18 kpc. } \label{minchev_grad} \end{figure*} \begin{figure} \includegraphics[scale=0.47]{ev_gradient_ivan.png} \caption{Results for the chemical evolution model in which we consider the density fluctuation by the chemo-dynamical model by MCM13. Time evolution of the oxygen abundance gradient at $\phi$=0$^{\circ}$. } \label{minchev_grad_ev} \end{figure} \begin{figure} \includegraphics[scale=0.56]{cep1_large.png} \includegraphics[scale=0.40]{ivan_cep_new.png} \caption{ {\it Upper Panel}: We present the average Fe abundances of Galactic Cepheids presented by Genovali et al. (2014) in bin of 15$^{\circ}$ for the azimuthal coordinate $\phi$ at different Galactocentric distances. {\it Lower left Panel}: Fe abundances as functions of the azimuthal coordinates computed at 6, 8, 10, 12 kpc predicted by the chemical evolution model in which we implemented the density fluctuation by the MCM13 model. {\it Lower right Panel}: residual of the Fe abundances predicted by our model computed after subtracting the average radial gradient. } \label{CEP1} \end{figure} \section{Results} In this section we apply our 2D model by using surface density fluctuations from the MCM13 chemo-dynamical model and from an analytical prescription. \subsection{Density fluctuation from the MCM13 chemo-dynamical model} In this section we present our results based on the new 2D chemical evolution model including the density mass fluctuation extracted from the chemo-dynamical model by MCM13. Fig. \ref{mSFR} shows the galactic disc SFR computed at 0.1, 0.7, 6, 11 Gyr after the start of disc formation, for the chemical evolution model in which we tested the effects of the density fluctuation by MCM13 in units of M$_{\odot}$ pc$^{-2}$ Gyr$^{-1}$. We notice that at early times (i.e the ``1 Gyr'' case reported in the upper left panel), the SFR is more concentrated in the inner Galactic regions, the SFR in the innermost regions decreases and the outer parts become more star forming active because of the ``inside-out'' prescription coupled with the inclusion of the density fluctuation. At the Galactic epoch of 1 Gyr after the start of disc formation, regions with the same Galactocentric distances have approximately the same SFR. Already after 0.7 Gyr of Galactic evolution, azimuthal star formation inhomogeneities are not negligible. Concerning the panel with the model results at 6 Gyr, azimuthal inhomogeneities are evident, in particular at 8 kpc the ratio between the maximum and the minimum values assumed by the SFR is SFR$_{max}$ /SFR$_{min}$=6.72. In Fig. \ref{mSFR} the bar and spiral arms features do not show up clearly, especially in early times. This is caused by the adopted inside out prescription (eq. \ref{tau}) which leads to huge differences between the SFRs computed in inner and outer regions. In Fig. \ref{1SFR}, the galactic disc SFR($R$, $\phi$) is normalized to the maximum value SFR$_{R, \ max}$ of the annular region located at the Galactocentric distance $R$, i.e SFR($R$, $\phi$)/SFR$_{R, max}$, computed at 0.1, 0.7, 6, 11 Gyr after the start of disc formation, respectively. Here, different features related to density perturbations originated by spiral arms and bar can be noted. In Fig. \ref{minchev_av} the main results related to the present day oxygen abundance azimuthal variation are presented. The top panel shows the azimuthal distribution of the residual of the oxygen abundances computed with our chemical evolution model at 4, 8, 12, 16, and 18 kpc after subtracting the average radial gradient (i.e. the one obtained with the reference model without any density perturbation). Throughout this paper we adopt the photospheric values of Asplund et al. (2009) as our solar reference. We see that the behavior is in excellent agreement with the observations by S{\'a}nchez et al. (2015); indeed, data show that outer regions display larger azimuthal variations, and the amplitude of the risidual variations are of the order of 0.1 dex (see Figure 7 by S{\'a}nchez et al. 2015) . In our model the maximum variations are $\sim$ 0.12 dex for the chemical evolution models computed at 18 kpc. Our results appears to have a bit less scatter. In the lower panel of Fig. \ref{minchev_av} we present our ``mock'' observations. We draw oxygen abundances of different ISM regions at different Galactocentric distance at random azimuthal coordinates $\phi$. Hence, we add an error of $\sigma_{\phi}$=5$^{\circ}$ to alleviate the fact that our model presents a resolution of 10$^{\circ}$ in the azimuthal component $\phi$. Moreover, the average observational uncertainty associated to the oxygen abundances of $\sigma_{[O/H]}$ = 0.05 dex provided by S{\'a}nchez et al. (2015) has been considered. We define the ``new'' oxygen abundance including these uncertainties as follows: \begin{equation} [\mbox{O/H}]_{new} = [\mbox{O/H}]+ U([-\sigma_{[O/H]}, \sigma_{[O/H]}]), \label{Err1} \end{equation} where $U$ is the random generator function. Similarly, we implement the uncertainty in the azimuthal component through the following relation: \begin{equation} \phi_{new} = \phi + U([-\sigma_{\phi}, \sigma_{\phi}]). \label{Err2} \end{equation} Here, it is clearly visible the similarity between the S{\'a}nchez et al. (2015) observations and our results. To summarize, the inclusions of density perturbations taken from a self-consistent dynamical model at different Galactic times, leads to significant variations in chemical abundances in the outer Galactic regions. In Fig. \ref{minchev_grad} we show results for the present day abundance gradient (after 11 Gyr of evolution) for iron computed for six azimuthal slices (as indicated) of width 10$^{\circ}$ at different azimuthal coordinates. In the same plot is indicated with a shaded grey area the maximum spread in the abundance ratio [Fe/H] obtained by the azimuthal coordinates we considered (0$^{\circ}$, 60$^{\circ}$, 120$^{\circ}$, 180$^{\circ}$, 240$^{\circ}$, and 300$^{\circ}$). As a consequence of the results presented above, the shaded area is larger towards external regions. We also overplot the data from Genovali et al. (2014) in order to compare to our model predictions. We notice that the predicted gradient is slightly steeper than the observed one in the external Galactic regions. However, we notice that the model lines pass within in the data standard deviation computed dividing the data by Genovali et al. (2014) in six radial bins. In Fig. \ref{minchev_grad_ev} we tested the effects of chemo-dynamical fluctuations on the time evolution of the oxygen abundance gradient at a fixed azimuth ($\phi$=0$^{\circ}$). In agreement with Minchev et al. (2018) the abundance gradient flattens in time, because of the chemical evolution model assumptions. As shown by Spitoni et al. (2015) and Grisoni et al. (2018), the inclusion of radial gas flows can in lead to even steeper gradients in time during the whole Galactic history. \begin{figure} \includegraphics[scale=0.43]{omega.png} \caption{ Spiral pattern speed $\Omega_s$ and disk angular velocity $\Omega_d$ computed by Roca-F{\`a}brega et al. (2014) are indicated with light blue and violette lines, respectively. With the vertical long dashed red line we show the position of the corotation radius located at the Galactocentric distance $R=8.31$ kpc. Outer and Inner Lindblad resonances extracted by Roca-F{\`a}brega et al. (2014) simulation are also drawn with dotted magenta and dotted purple lines, respectively. } \label{omega} \end{figure} \begin{table} \caption{ Different spiral arm models tested with our 2D chemical evolution model for the disc changing the number of spiral arms $m$ (second column), the the pitch angle $\alpha$ (third column), and finally the spiral pattern speed $\Omega_{s}$ is indicated in the last column.} \label{TMW} \begin{center} \begin{tabular}{c|cccc} \hline \\ Models &$m$ &$\alpha$& $\Omega_{s}$\\ & & & [km s$^{-1}$ kpc$^{-1}$]\\ \hline \\ S2A & 2 & 15$^{\circ}$ & 20 \\ S2B & 2 & 15$^{\circ}$ & 17.5\\ S2C & 2 & 15$^{\circ}$ & 15\\ S2D & 2 & 15$^{\circ}$ & 13.75\\ S2E & 2 & 15$^{\circ}$ & 12.5\\ S2F& 2 & 15$^{\circ}$ & 25\\ S2G & 2 & 7$^{\circ}$ & 20 \\ S2H & 2 & 30$^{\circ}$ & 20\\ S1A & 1 & 15$^{\circ}$ & 20 \\ S1B & 1 & 15$^{\circ}$ & 17.5\\ S1C &1 & 15$^{\circ}$ & 15\\ S1D & 1 & 15$^{\circ}$ & 13.75\\ S1E & 1 & 15$^{\circ}$ & 12.5\\ S1F& 1 & 15$^{\circ}$ & 25\\ \hline \hline \end{tabular} \end{center} \end{table} \begin{figure} \includegraphics[scale=0.44]{R3.png} \includegraphics[scale=0.44]{R3_t.png} \caption{ Results for the chemical evolution model in which we consider the density fluctuation associated with the analytical spiral arm formulation. {\it Upper Panel}: The azimuthal distribution of the residual of the oxygen abundances computed with our chemical evolution model at 4, 8, 12, 16, and 20 kpc (after subtracting the average radial gradient for a model with $R_S$=7, $R_D$=3.5, $\Sigma_0$=20, $\nu$=1.1, $\Omega_s$=20 km s$^{-1}$ kpc$^{-1}$, and $m$=2 spiral arm (model S2A in Table 1). {\it Lower Panel}: the time evolution of the [O/H] abundance as a function of the azimuthal coordinate computed at 8 kpc. } \label{SA} \end{figure} \begin{figure} \includegraphics[scale=0.48]{R3_SFR_1Gyr.png} \includegraphics[scale=0.48]{R3_SFR_11Gyr.png} \caption{ {\it Upper Panel}: Galactic disc SFR resulting from model S2A after 1 Gyr of evolution (see Table 1 and text for model details). The color code indicates the SFR in units of M$_{\odot}$ pc$^{-2}$ Gyr$^{-1}$. {\it Lower Panel}: same but computed at 11 Gyr.} \label{SASFR} \end{figure} \begin{figure} \includegraphics[scale=0.40]{DIF_OMEGA_S.png} \caption{ Disk angular velocity $\Omega_d$ computed by Roca-F{\`a}brega et al. (2014) is indicated with light blue and violette lines. With different horizontal solid lines are indicated the spiral pattern speed $\Omega_s$ adopted in our models (see text and Table 1 for model details). The vertical long dashed lines show the positions of the corotation radii assuming different $\Omega_s$ values.} \label{DOS} \end{figure} \begin{figure*} \centering \includegraphics[scale=0.5]{grad_sub_S2.png} \caption{ The present day oxygen abundance gradients for different azimuths, predicted by chemical models with spiral multiplicity $m = 2$ and different spiral pattern speed $\Omega_{s}$ (see Table 1 for model details). In each panel the dashed vertical line indicates the location of the corotation radius. It is clear that allowing for multiple spiral patterns propagating through the disk at the same time will affect the entire disk, similarly to the case of the MCM13 model.} \label{S2_grad} \end{figure*} \begin{figure} \includegraphics[scale=0.5]{cor_spir_2_lab.png} \caption{ Present day residual azimuthal variations in oxygen abundance for the corotation regions (as indicated) of the different pattern speeds shown in Fig. \ref{S2_grad}. An increase in the effect is found as the corotation shifts to larger radius, i.e., for slower spiral patterns. Such a set of spirals with progressively slower patterns speeds as radius increases, can be a realistic representation of a galactic disk. } \label{CS2} \end{figure} \begin{figure} \includegraphics[scale=0.47]{R04.png} \includegraphics[scale=0.47]{R04_t.png} \caption{ As in Fig. \ref{SA} but for model S1A, with with multiplicity $m$=1 of spiral arms.} \label{S1A} \end{figure} In Fig. \ref{CEP1} we compare the average iron abundance azimuthal variation in bins of $\phi$=15$^{\circ}$ presented by Genovali et al. (2014) computed at 6, 8, 10, and 12 kpc, respectively with our 2D chemical evolution model, resulting from the MCM13 density variations. We see that the observed azimuthal variations are for limited Galactocentric distances (6-12 kpc) and with a narrow range of azimuthal coordinates. Although it is evident that the observed amplitude of azimuthal variations are larger than the ones predicted by our models, more precise Galactic Cepheid data are required to make firm conclusions. Moreover, other dynamical processes that we have not considered in this work had maybe played important roles in the evolution and in the building up of the Galactic gradients and their azimuthal variations - radial migration processes can already introduce some variations in about a Gyr (Quillen et al. 2018). \begin{figure*} \centering \includegraphics[scale=0.5]{grad_sub_S1.png} \caption{ As Fig. \ref{S2_grad}, but for an $m$=1 spiral. } \label{S1_grad} \end{figure*} \begin{figure} \includegraphics[scale=0.5]{cor_spir_1_lab.png} \caption{ As in Fig. \ref{CS2} but for models with $m=1$ multiplicity (see Table 1).} \label{CS1} \end{figure} \subsection{Density fluctuations from an analytical spiral arm formulation} In this Section we discuss the results of chemical evolution models with only analytical prescriptions for spiral arm density perturbations without including any density fluctuations from chemo-dynamical models. The primary purpose here is to test the effect of regular perturbations (i.e. spiral arms evolution described by an analytical formulation) on the chemical evolution of a Milky Way like galaxy. We underline that the results showed in the previous Section reflect more closely the complex behavior of the Milky Way. However, we are also interested to explore different spiral arm configurations which could characterize external galactic systems by varying the free parameters of the analytical expression of the spiral arms. In particular, we will show the effects on the azimuthal variations of abundance gradients for oxygen by varying: \begin{enumerate}[i)] \item the multiplicity $m$ of spiral arms; \item the spiral pattern speed, $\Omega_s$; \item the pitch angle $\alpha$. \end{enumerate} For all model results that will be presented we assume the following Cox \& Gomez (2002) prescriptions: the radial scale length of the drop-off in density amplitude of the arms fixed at the value of $R_S=7$ kpc, the pitch angle is assumed constant at $\alpha=\ang{15}$, and the surface arm density $\Sigma_0$ is 20 M$_{\odot}$ pc$^{-2}$ at the fiducial radius $R_0=8$ kpc; finally we assume $\phi_p(R_0)=0$. The disk rotational velocity $\Omega_d(R)$ has been extracted from the simulation by Roca-F{\`a}brega et al. (2014) (see their left panel of Figure 1). The exponential fit of $\Omega_d(R)$ variations as a function of the Galactocentric distance $R$ (expressed in kpc) is: \begin{equation} \Omega_d(R)=98.93 \, e^{-0.29 \, R}+ 11.11 \mbox{ [km } \mbox{s } ^{-1}\mbox{kpc}^{-1}]. \label{eqom} \end{equation} We start by adopting the constant pattern angular velocity $\Omega_s$ = 20 km s$^{-1}$ kpc$^{-1}$ consistent with the Roca-F{\`a}brega et al. (2014) model. Similar value was first estimated from moving groups in the U-V plane by Quillen \& Minchev (2005, 18.1 $\pm$ 0.8 km s$^{-1}$ kpc$^{-1}$) and a summary of derived values for the Milky Way can be found in Bland-Hawthorn \& Gerhard (2016). In Fig. \ref{omega} we show the $\Omega_s$ and $\Omega_d(R)$ quantities as well as the Outer and Inner Lindblad resonances as a function of the Galactocentric distance, the corotation radius is located at 8.31 kpc. \subsubsection{Results with a single analytical spiral pattern} We begin our analysis discussing the results obtained with model S2A (see Table 1), which has a pattern speed of $\Omega_s$ = 20 km s$^{-1}$ kpc$^{-1}$, placing the corotation resonance at the solar radius. The upper panel of Fig. \ref{SA} shows the the oxygen abundance residual azimuthal variations after 11 Gyr of disc evolution for different Galactocentric distances. The average radial gradient is subtracted. As expected, larger abundance azimuthal variations are found near the corotation radius. In this region the chemical enrichment should be more efficient due to the lack of the relative gas-spiral motions. Higher SFR at the corotation radius caused by locally higher gas overdensity lasts for a longer time, therefore more massive stars can be created and more metals can be ejected into the local ISM under the spiral arm passage. At 8 kpc we have $\Delta$[O/H] $\approx$ 0.05 dex. For other Galactocentric distances, away from the corotation, variations are much smaller. In the lower panel of Fig. \ref {SA} we present the temporal evolution of the oxygen abundance azimuthal variations for the model S2A as a function of the azimuthal coordinate $\phi$ computed at 8 kpc. As expected, larger inhomogeneities are present at early times, decreasing in time. As discussed in Section 2.2, we assume that during the Galactic evolution the ratio between the amplitude of the spiral density perturbation and the total surface density computed at the same radius $R$, remains constant in time. However, this analytical approach is not capable to put constraints on the temporal evolution of pattern speed. Galactic chemical evolution is an integral process in time. The stronger spiral structure induced azimuthal variations at early times are, therefore, washed out by phase mixing. Fig. \ref{SASFR} depicts the SFR after 1 Gyr of evolution (upper panel) and at the present time (lower panel) on the galactic plane computed with the model S2A. Here, it is evident the way in which the spiral arm density perturbation affects and modulates SFR computed at the present time in unit of M$_{\odot}$ pc$^{-2}$ Gyr$^{-1}$. The shape of the two spiral arm over-densities is clearly visible in the SFR. This is in contrast to our results using the MCM13 density fluctuations (see Fig. \ref{mSFR}), where multiple spiral density waves were present. Moreover, we can appreciate the inside-out disk formation: at later times the external regions become star formation active. \subsubsection{The effect of different pattern speeds} In this Section we vary the spiral pattern speed, which has the effect of shifting the corotation resonance in radius. We argue that a combination of multiple spiral modes with different pattern speeds can be a realistic representation of a galactic disk. The horizontal and vertical lines in Fig. \ref{DOS} show the different pattern speeds and corresponding corotation radii, respectively, used in this Section: it is clear that smaller $\Omega_{s}$ values lead to a more external corotation radius. In Fig. \ref{S2_grad} we show the oxygen abundance gradients computed at different azimuths after 11 Gyr of disk evolution for models with spiral multiplicity $m$ = 2 and different spiral pattern speed $\Omega_{s}$ (see Table 1 for model details). We notice that the more the corotation radius is shifted towards the external Galactic regions the more the oxygen azimuthal abundance variations are amplified near the corotation radius. This result is reasonable in the light of our previous findings presented above with our model assuming chemo-dynamical fluctuations by MCM13. We recall that larger variations in the chemical abundance of outer galactic regions have been found by observations in external galaxies (S{\'a}nchez et al. 2015). In Fig. \ref{CS2} we show the present day azimuthal residual of the oxygen abundances after subtracting the average radial gradient computed for the Galactic annular regions which include the relative corotation radius for the following models with $m=2$ multiplicity: S2A, S2B, S2C, S2D, S2F (see Table 1 for other parameter details). The model S2D computed at $R=$13 kpc has $\Delta$[O/H]$\approx$ 0.32 dex. Already in regions not so far from the solar neighbourd, the variations are important, i.e., model S2C whose corotation resides in the annular region centered at $R=$ 11 kpc, presents an oxygen abundance variation of $\Delta$[O/H] is $\approx$ 0.20 dex. As discussed in Setion 2.3, it is well accepted that multiple patterns can be present in galactic disks (e.g., Meidt et al. 2009) including our own Milky Way (Minchev \& Quillen 2006, Quillen et al. 2011), with slower patterns shifted to outer radii. This will have the effect of placing the corotation regions very similarly to what Fig. \ref{CS2} presents and having corotating arms at all radii as found by Grand et al. (2012), Hunt et al. (2019). Therefore, the increasing scatter in abundance with galactic radius can be explained as the effect of multiple patterns propagating at the same time. Note that radial migration will introduce additional scatter, that can in principle be accounted for. \begin{figure} \includegraphics[scale=0.5]{R1_SFR_11.png} \includegraphics[scale=0.5]{R2_SFR_11.png} \caption{ {\it Upper Panel}: The Galactic disc SFR related to the model S2G computed after 11 Gyr of Galactic evolution (see Table 1 and text for model details) with a pitch angle $\alpha=7^{\circ} $. The color code indicates the SFR in units of M$_{\odot}$ pc$^{-2}$ Gyr$^{-1}$. {\it Lower Panel}: as the upper panel but for the model S2H where the pitch angle $\alpha$ is 30$^{\circ}$.} \label{SESFSFR} \end{figure} \begin{figure} \includegraphics[scale=0.47]{R1.png} \includegraphics[scale=0.47]{R2.png} \caption{ Effects of different pitch angles $\alpha$ on the azimuthal distribution of the residual of the oxygen abundances computed with our chemical evolution model at 4, 8, 12, 16, and 20 kpc In the upper panel the pitch able is set at the value of 7$^{\circ}$ (SE model in Table 1), while in the lower panel $\alpha=30 ^{\circ}$ (SF model in Table 1). } \label{SE_SF} \end{figure} \subsubsection{Results with an $m$=1 spiral pattern} We want to test whether the intensity of the amplitude of the azimuthal chemical abundance variations is dependant on the number $m$ of spiral arms. In Table 1 we label as model S1A a model identical to the model S2A but with an $m$=1 spiral structure, i.e., having only one spiral arm. Such a mode arises naturally from the coupling of $m$=2 and $m$=3 modes as found by Quillen et al. (2011) and Minchev et al. (2012a) using pure N-body and SPH simulations, and is seen in external galaxies (Zaritsky \& Rix 1997). In the upper panel of Fig. \ref{S1A} we notice that the abundance variations are larger than the ones obtained with the same model but $m$=2 (upper panel of Fig. \ref{SA}): a fluctuation of about $\Delta$[O/H]=0.1 dex is seen at the corotation radius ($\sim$ 8 kpc). In the same Figure is presented the time evolution of azimuthal abundance inhomogeneities for oxygen computed at 8 kpc with the model S1A at 2, 4, 6, 8, and 11 Gyr. In Fig. \ref{S1_grad} we have the oxygen abundance gradients computed at different azimuths after 11 Gyr of disk evolution for models with spiral multiplicity $m$ = 1 and the same spiral pattern speeds $\Omega_{s}$ as in Fig. \ref{S2_grad} (see Table 1 for model details). We notice that around the corotation radii the azimuthal abundance variations are generally more evident for models with one spiral arm compared to ones with spiral multiplicity $m$ = 2. In Fig. \ref{CS1} we show the present day azimuthal residual of the oxygen abundances after subtracting the average radial gradient computed in annular regions which contain the corotation radii for models with $m=1$ multiplicity: S1A, S1B, S1C, S1D, S1F (see Table 1 for other parameter details). For the model S1D at the Galactic distance of 13 kpc we have $\Delta$[O/H]$\approx$ 0.40 dex, which is about $\approx 25$\% larger than the S2D case. As found for the model with $m=2$, the oxygen abundance variations become important in regions not so far from the solar vicinity, i.e., model S1C whose corotation resides at $R=$ 11 kpc, $\Delta$[O/H] $\approx$ 0.23 dex. \subsubsection{Results for different pitch angles} In this Section we consider different pitch angles $\alpha$ for the spiral arms in our Milky Way galaxy. Recent work by Quillen et al. (2018) and Laporte et al. (2018) suggest that tightly wound spiral structure should be considered, based on modeling of phase-space structure found in the second Gaia data release (Gaia collaboration et al. 2018). A smaller pitch angle gives rise to more tightly wound spiral structure. The upper panel of Fig. \ref{SESFSFR} depicts the present time SFR computed with a pitch angle $\alpha=7^{\circ}$ (model S2G in Table 1), whereas the lower panel shows the case of $\alpha=30^{\circ}$ (model S2H in Table 1). For both panels the other model parameters as the same as model S2A. The spiral pattern is clearly visible in the SFR, and for the model S2G a tighter wound spiral structure is present. In Fig \ref{SE_SF} we compare the azimuthal variations for models S2G and S2H. We see that the chemical variations are identical at the corotation radius and simply azimuthally shifted for other Galactocentric distances. \section{Conclusions} In this paper we presented a new 2D chemical evolution model, able to trace azimuthal variations in the galactic disc density. We applied this model to (i) the density fluctuations arising in a disc formation simulation by Martig et al. (2012), used for the MCM13 Milky Way chemo-dynamical model, and (ii) the density perturbations originating from an analytical spiral arm formulation. The main conclusions for density perturbation from Milky Way chemo-dynamical model by MCM13 can be summarized as follows: \begin{itemize} \item We found that the density fluctuations produce significant oxygen azimuthal variations in the abundance gradients of the order of 0.1 dex. \item The azimuthal variations are more evident in the external galactic regions, in agreement with the recent observations of the galaxy NGC 6754, using MUSE data (S{\'a}nchez et al. 2015). \end{itemize} In an effort to understand the above findings, we constructed simple analytical spiral arm models, for which we varied the pattern speed, multiplicity and pitch angle with the following main findings: \begin{itemize} \item The larger fluctuations in the azimuthal abundance gradients are found near the corotation radius, where the relative velocity with respect to the disk is close to zero. \item Larger azimuthal variations are found at corotation radii shifted to larger radii, i.e., slower pattern speeds. \item The variation is more enhanced for the model with only one spiral arm, which is expected to result from the combination of an $m$=2 and $m$=3 spiral structure. \item We found that the more significant azimuthal abundance variations seen at early times in presence of a regular, periodic perturbation tend to quench at later times. This is expected, as galactic chemical evolution is cumulative process and phase-mixing and radial migration tends to wipe structure with time. \end{itemize} Combining the effect of corotaton radii by assuming the simultaneous propagation of multiple spiral modes through galactic disks, we can obtain a realistic picture of azimuthal variations induced at stellar birth found in self-consistent models, such as the MCM13. Material spiral arms propagating near the corotation at all galactic radii have been described by a number of recent numerical work with different interpretations (see Grand et al. 2012, Comparetta \& Quillen 2012, Hunt et al. 2019). In future work we will improve the new 2D chemical evolution model introduced here by taking into account stellar radial migration of long-lived stars and the pollution to the ISM abundance introduced by them at radii and azimuths different than their birth places. We will also use this model to update the Galactic habitable zone results presented by Spitoni et al. (2014, 2017) and study the effect of spiral structure and the Galactic bar. \section*{Acknowledgement} We thank the anonymous referee for various suggestions that improved the paper. E. Spitoni and V. Silva Aguirre acknowledge support from the Independent Research Fund Denmark (Research grant 7027-00096B). V. Silva Aguirre acknowledges support from VILLUM FONDEN (Research Grant 10118). G. Cescutti acknowledges financial support from the European Union Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 664931. This work has been partially supported by the EU COST Action CA16117 (ChETEC). I. Minchev acknowledges support by the Deutsche Forschungsgemeinschaft under the grant MI 2009/1-1. F. Matteucci acknowledges research funds from the University of Trieste (FRA2016).
1,116,691,501,390
arxiv
\section{Introduction} Supersymmetry (SUSY) provides the most natural solution to the gauge hierarchy problem in the Standard Model (SM). In supersymmetric SMs (SSMs) with $R$ parity, the gauge couplings for $SU(3)_C$, $SU(2)_L$ and $U(1)_Y$ gauge symmetries are unified at about $2\times 10^{16}$~GeV~\cite{Ellis:1990zq}, the lightest supersymmetric particle (LSP) like neutralino can be cold dark matter candidate~\cite{Ellis:1983wd, Goldberg:1983nd}, and the electroweak precision constraints can be evaded, etc. Especially, gauge coupling unification~\cite{Ellis:1990zq} strongly suggests Grand Unified Theories (GUTs), which can explain the quantum numbers of the SM fermions and charge quantization elegantly. Thus, the SSMs are the most promising new physics beyond the SM. However, the recent LHC searches for supersymmetry~\cite{Aad:2011ib, Aad:2011qa, Chatrchyan:2011zy} and Higgs boson~\cite{Collaboration:2012si, Chatrchyan:2012tx} have considerably shrinken the viable parameter spaces. Thus, to explore the phenomenologically inspired SSMs, we briefly review the phenomenological constraints in the following: \begin{itemize} \item The colored supersymmetric particles (sparticles) such as squarks and gluinos (at least the first two generation squarks) must have masses around the 1 TeV or larger from the ATLAS ~\cite{Aad:2011ib, Aad:2011qa} and CMS~\cite{Chatrchyan:2011zy} Collaborations at the LHC. \item The ATLAS and CMS Collaborations have reported an excess of events for the SM-like Higgs boson with mass around $126$~GeV and $124$~GeV, respectively~\cite{Collaboration:2012si, Chatrchyan:2012tx}. The corresponding global significances are respectively $2.2\sigma$ and $1.5\sigma$, and the corresponding local significances without taking into account the look-elsewhere-effect (LEE) are $3.5\sigma$ and $3.1\sigma$, respectively. The viable light Higgs boson mass range at the $95\% $ CL is from 115.5~GeV to 127~GeV~\cite{Collaboration:2012si, Chatrchyan:2012tx}. Moreover, the Higgs boson mass around 125 GeV gives very strong constraints on the viable supersymmetry parameter space, which have been studied extensively recently~\cite{Hall:2011aa, Baer:2011ab, Li:2011ab, Heinemeyer:2011aa, Arbey:2011ab, Arbey:2011aa, Carena:2011aa, Akula:2011aa, Kadastik:2011aa, Ellwanger:2011aa, Buchmueller:2011ab, Cao:2011sn, Gunion:2012zd, King:2012is, Kang:2012tn, Chang:2012gp, Aparicio:2012iw, Baer:2012uy}. Especially, the squark and/or gluino masses will be about a few TeV in general in the Minimal Supersymmetric Standard Model (MSSM) and the Next to the MSSM (NMSSM) with simple supersymmetry mediation mechanisms. \item The cold dark matter relic density is $0.112\pm 0.0056$ from the seven-year WMAP measurements~\cite{Larson:2010gs}. \item The spin-independent elastic dark matter-nucleon scattering cross-sections are smaller than about $7\times 10^{-45}~{\rm cm}^2$ for the dark matter mass around 50~GeV~\cite{Aprile:2011hi}. \item The experimental limit on the Flavor Changing Neutral Current (FCNC) process, $b \rightarrow s\gamma$. The results from the Heavy Flavor Averaging Group (HFAG)~\cite{Barberio:2007cr}, in addition to the BABAR, Belle, and CLEO results, are: ${\rm BR}(b \rightarrow s\gamma) = (355 \pm 24^{+9}_{-10} \pm 3) \times 10^{-6}$. There is also a theoretical estimate in the SM~\cite{Misiak:2006zs} of ${\rm BR}(b \rightarrow s\gamma) = (3.15 \pm 0.23) \times 10^{-4}$. The limits, where the experimental and theoretical errors are added in quadrature, are $2.86 \times 10^{-4} \leq {\rm BR}(b \rightarrow s\gamma) \leq 4.18 \times 10^{-4}$. \item The anomalous magnetic moment of the muon $(g_{\mu} - 2)/2$. The experimental value of the muon $(g_{\mu} - 2)/2$ deviates from the SM prediction by about $3.3\sigma$, {\it i.e.}, $\Delta a_{\mu} = a^{\rm exp}_{\mu} - a^{\rm SM}_{\mu} =(26.1\pm 8.0) \times 10^{-10}$~\cite{Hagiwara:2011af}. \item The experimental limit on the process $B_{s} \rightarrow \mu^+ \mu^-$. The upper bound on ${\rm BR}(B_{s} \rightarrow \mu^+ \mu^-)$ is $1.1 \times 10^{-8}$ from the CMS and LHCb collaborations~\cite{CMS-LHCb}. \item The experimental limit on the process $B_{u} \rightarrow \tau {\bar \nu}_{\tau}$ is $0.85 \leq {\rm BR}(B_{u} \rightarrow \tau {\bar \nu}_{\tau})/{\rm SM} \leq 1.65 $~\cite{Buchmueller:2009fn}. \end{itemize} In addition, from the theoretical point of view, we usually have the family universal squark and slepton soft masses in the string model building, for example, the heterotic $E_8\times E_8$ string theory with Calabi-Yau compactifications~\cite{Braun:2005ux, Bouchard:2005ag}, the intersecting D-brane model building~\cite{Berkooz:1996km, Ibanez:2001nd, Blumenhagen:2001te, CSU, Cvetic:2002pj, CLL, Chen:2005ab, Chen:2005mj, Blumenhagen:2005mu}, and the F-theory model building~\cite{Donagi:2008ca, Beasley:2008dc, Beasley:2008kw, Donagi:2008kj, Font:2008id, Jiang:2009zza, Blumenhagen:2008aw, Li:2009cy}, etc. Therefore, based on the above phenomenological constraints and theoretical considerations, we propose the electroweak supersymmetry around the electroweak scale: {\it the squarks and/or gluinos are around a few TeV while the sleptons, sneutrinos, bino and winos are within one TeV.} The Higgsinos (or say the Higgs bilinear $\mu$ term) can be either heavy or light. We emphasize that gluinos can be within one TeV because squarks are heavy. Therefore, the constraints from the current ATLAS and CMS supersymmetry and Higgs searches and the $b \rightarrow s\gamma$, $B_{s} \rightarrow \mu^+ \mu^-$, and $B_{u} \rightarrow \tau {\bar \nu}_{\tau}$ processes can be satisfied automatically due to the heavy squarks. Also, the dimension-five proton decays in supersymmetric GUTs can be relaxed as well. Moreover, the $(g_{\mu} - 2)/2$ experimental result can be explained due to the light sleptons. Also, we will assume that the dominant component of the LSP neutralino is bino. Interestingly, the observed dark matter relic density can be realized via the LSP neutralino and light stau coannihilations, and the XENON experiment~\cite{Aprile:2011hi} will not give any constraint on such viable parameter spaces due to the heavy squarks. For simplicity, we will call the {\it electroweak supersymmetry around the electroweak scale} as the {\it electroweak supersymmetry}. In this paper, we consider the simple Generalized Minimal Supergravity (GmSUGRA)~\cite{Li:2010xr, Balazs:2010ha} (For previous studies on non-universal gaugino masses in the supersymmetric GUTs, see Refs.~\cite{Ellis:1985jn, Drees:1985bx, Anderson:1999uia, Chamoun:2001in, Chakrabortty:2008zk, Martin:2009ad, Bhattacharya:2009wv, Feldman:2009zc, Chamoun:2009nd, Li:2010mra, Gogoladze:2011aa, Younkin:2012ui}.). We show explicitly that the electroweak supersymmetry can be realized naturally, and gauge coupling unification can be preserved. To be concrete, we consider two Scenarios for the gaugino mass ratios: Scenario I has $M_1 : M_2 : M_3 = 1 : (-1) : 4$ and Scenario II has $M_1 : M_2 : M_3 = \frac{5}{3} : 1 : \frac{8}{3}$, where $M_1$, $M_2$ and $M_3$ are bino mass, wino mass, and gluino mass, respectively. We discuss two cases for the supersymmetry breaking scalar masses and trilinear soft $A$ terms: (A) The universal scalar mass $m_0$, and universal/non-universal trilinear $A$ terms. This case is similar to the mSUGRA/CMSSM; (B) The universal squark and slepton mass $m_0$, universal/non-universal trilinear $A$ terms, and especially non-universal Higgs scalar masses. This case is similar to the NUHM2. Choosing a moderate $\tan\beta = 13$ where $\tan\beta$ is the ratio of the Higgs vacuum expectation values (VEVs) in the SSMs, we scan the viable parameter spaces which satisfy all the current phenomenological constraints. Also, we present the concrete benchmark points where the squarks, gluinos and Higgsinos are about a few TeV while the sleptons, bino and winos are several hundreds of GeV. For the universal trilinear soft $A$ term, we can fit all the experimental constraints very well except the $(g_{\mu}-2)/2$. And the deviations of $(g_{\mu}-2)/2$ from the central value is about 2.6$\sigma$. Interestingly, with non-universal trilinear soft $A$ terms, we can fit all the experimental constraints very well, especially, the deviations of $(g_{\mu}-2)/2$ from the central value is within 1 or 2$\sigma$. Moreover, we comment on the fine-tuningg problem as well as the LHC searches. \section{Electroweak Supersymmetry from the GmSUGRA} First, we explain our conventions. In SSMs, we denote the left-handed quark doublets, right-handed up-type quarks, right-handed down-type quarks, left-handed lepton doublets, right-handed neutrinos and right-handed charged leptons as $Q_i$, $U^c_i$, $D^c_i$, $L_i$, $N^c_i$, and $E^c_i$, respectively. Also, we denote one pair of Higgs doublets as $H_u$ and $H_d$, which give masses to the up-type quarks/neutrinos and the down-type quarks/charged leptons, respectively. We consider the simple GmSUGRA where the GUT group is $SU(5)$ and the Higgs field is in the $SU(5)$ adjoint representation~\cite{Li:2010xr, Balazs:2010ha}. The gauge coupling relation and gaugino mass relation at the GUT scale are the following~\cite{Li:2010xr, Ellis:1985jn, Li:2010mra} \begin{eqnarray} {{1}\over {\alpha_2}} - {{1}\over {\alpha_3}} ~=~k \left( {{1}\over {\alpha_1}} - {{1}\over {\alpha_3}} \right) ~,~\, \label{GCRelation} \end{eqnarray} \begin{eqnarray} {{M_2}\over {\alpha_2}} - {{M_3}\over {\alpha_3}} ~=~k \left( {{M_1}\over {\alpha_1}} - {{M_3}\over {\alpha_3}} \right) ~,~\, \label{GMRelation} \end{eqnarray} where $k$ is the index of these relations and is equal to $5/3$. Such gauge coupling relation and gaugino mass relation at the GUT scale can be realized in the F-theory $SU(5)$ models where the gauge symmetry is broken down to the SM gauge symmetry by turning on the $U(1)_Y$ flux, and the F-theory $SO(10)$ models where the gauge symmetry is broken down to the $SU(3)_C \times SU(2)_L \times SU(2)_R \times U(1)_{B-L}$ gauge symmetry by turning on the $U(1)_{B-L}$ flux~\cite{Li:2010mra}. At the GUT scale, we assume $\alpha_1 \simeq \alpha_2 \simeq \alpha_3$ for simplicity, and then the gaugino mass relation becomes \begin{eqnarray} M_2 - M_3 ~=~\frac{5}{3} \left( M_1 - M_3 \right) ~.~\, \end{eqnarray} So there are two free parameters in gaugino masses. To realize the electroweak supersymmetry, we require that $M_3$ be larger than $M_1$ and $M_2$. In the next Section, we shall consider the following two simple Scenarios for gaugino masses at the GUT scale \begin{eqnarray} {\rm Scenario~ I:}~~~M_1 ~=~ M_{1/2}~,~~~M_2 ~=~ - M_{1/2}~,~~~ M_3 ~=~ 4 M_{1/2}~,~\, \label{Scenario-I} \end{eqnarray} \begin{eqnarray} {\rm Scenario~ II:}~~~M_1 ~=~ \frac{5}{3}M_{1/2}~,~~~M_2 ~=~ M_{1/2}~,~~~ M_3 ~=~ \frac{8}{3} M_{1/2}~,~\, \label{Scenario-II} \end{eqnarray} where $M_{1/2}$ is the normalized gaugino mass scale. Thus, the gluino mass will be much larger than the bino and wino masses at low energy. In addition, the supersymmetry breaking scalar masses at the GUT scale are~\cite{Balazs:2010ha} \begin{eqnarray} m_{\widetilde{Q}_i}^2&=&(m_0^{U})^2+\sqrt{\frac{3}{5}}\beta'_{\bf 10}\frac{1}{6}(m_0^{N})^2 ~,\\ m_{\widetilde{U}_i^c}^2&=&(m_0^{U})^2-\sqrt{\frac{3}{5}}\beta'_{\bf 10}\frac{2}{3}(m_0^{N})^2 ~,\\ m_{\widetilde{E}_i^c}^2&=&(m_0^{U})^2+\sqrt{\frac{3}{5}}\beta'_{\bf 10}(m_0^{N})^2 ~,\\ m_{\widetilde{D}_i^c}^2&=&(m_0^{U})^2+\sqrt{\frac{3}{5}} \beta'_{\bf \bar{5}}\frac{1}{3}(m_0^{N})^2 ~,\\ m_{\widetilde{L}_i}^2&=&(m_0^{U})^2-\sqrt{\frac{3}{5}}\beta'_{\bf \bar{5}}\frac{1}{2}(m_0^{N})^2 ~, \\ m_{\widetilde{H}_u}^2&=&(m_0^{U})^2+\sqrt{\frac{3}{5}}\beta'_{Hu}\frac{1}{2}(m_0^{N})^2 ~, \\ m_{\widetilde{H}_d}^2&=&(m_0^{U})^2-\sqrt{\frac{3}{5}}\beta'_{Hd}\frac{1}{2}(m_0^{N})^2 ~, \end{eqnarray} where $i$ is generation index, $\beta'_{\bf 10}$, $\beta'_{\bf \bar{5}}$, $\beta'_{Hu}$ and $\beta'_{Hd}$ are coupling constants, and $m_0^{U}$ and $m_0^{N}$ are the scalar masses related to the universal and non-universal parts, respectively. Especially, the squark masses can be much larger than the slepton masses since the cancellations between the two terms in the slepton masses $m_{\widetilde{E}_i^c}^2$ and $m_{\widetilde{L}_i^c}^2$ can be realized by fine-tuning respectively $\beta'_{\bf 10}$ and $\beta'_{\bf \bar{5}}$ a little bit. Also, the supersymmetry breaking soft masses $m_{\widetilde{H}_u}^2$ and $m_{\widetilde{H}_d}^2$ can be free parameters as well. Interestingly, we can derive the scalar mass relations at the GUT scale \begin{eqnarray} {3m_{\widetilde{D}_i^c}^2+2m_{\widetilde{L}_i}^2}={4m_{\widetilde{Q}_i}^2+m_{\widetilde{U}_i^c}^2} ={6m_{\widetilde{Q}_i}^2-m_{\widetilde{E}_i^c}^2}={2m_{\widetilde{E}_i^c}^2+3m_{\widetilde{U}_i^c}^2}~.~\, \label{SMass-R} \end{eqnarray} Choosing slepton masses as input parameters, we can parametrize the squark masses as follows \begin{eqnarray} m_{\widetilde{Q}_i}^2 &=& \frac{5}{6} (m_0^{U})^2 + \frac{1}{6} m_{\widetilde{E}_i^c}^2~,~~~\\ m_{\widetilde{U}_i^c}^2 &=& \frac{5}{3}(m_0^{U})^2 -\frac{2}{3} m_{\widetilde{E}_i}^2~,~~~\\ m_{\widetilde{D}_i^c}^2 &=& \frac{5}{3}(m_0^{U})^2 -\frac{2}{3} m_{\widetilde{L}_i}^2~.~\, \end{eqnarray} In short, the squark masses can be parametrized by the slepton masses and the universal scalar mass. If the slepton masses are much smaller than the universal scalar mass, we obtain $2 m_{\widetilde{Q}_i}^2 \sim m_{\widetilde{U}_i^c}^2 \sim m_{\widetilde{D}_i^c}^2$. Moreover, we can calculate the supersymmetry breaking trilinear soft $A$ terms $A_U$, $A_D$, and $A_E$ respectively for the SM fermion Yukawa superpotential terms of the up-type quarks, down-type quarks, and charged leptons~\cite{Balazs:2010ha} \begin{eqnarray} A_{U} &=& A^U_0 + (2 \gamma_U + \gamma'_U) A^{N}_0~,~~~\\ A_{D} &=& A^U_0 +\frac{1}{6} \gamma_D A^{N}_0~,~~~\\ A_{E} &=& A^U_0 + \gamma_D A^{N}_0~,~ \end{eqnarray} where $\gamma_U$, $\gamma'_U$ and $\gamma_D$ are coupling constants, and $A_0^{U}$ and $A_0^{N}$ are the corresponding trilinear soft $A$ terms related to the universal and non-universal parts, respectively. Therefore, $A_U$, $A_D$ and $A_E$ can be free parameters in general in the GmSUGRA. In short, we can parametrize the generic supersymmetry breaking soft mass terms in our simple GmSUGRA as following: two parameters in the gaugino masses, three parameters for the squark and slepton soft masses, three parameters in the trilinear soft A terms, and two parameters for the Higgs soft masses. We propose the electroweak supersymmetry: {\it the squarks and/or glunios are heavy around a few TeV while the sleptons, bino and winos are light and within one TeV}. The Higgsinos (or $\mu$ term) can be either heavy or light. Thus, both the gaugino masses $M_1$ and $M_2$ and the slepton/sneutrino soft masses are smaller than one TeV. Also, there are three cases for the gaugino mass $M_3$ and squark soft masses: (1) $M_3$ is about a few TeV while the squark soft masses are small; (2) $M_3$ is small while the squark soft masses are about a few TeV; (3) Both $M_3$ and squark soft masses are heavy. In this paper, for simplicity, we only consider the first case. The comprehensive study will be presented elsewhere. Interestingly, we can show that the gauge coupling unification can be preserved in the electroweak supersymmetry even if the squarks and/or gluinos are about one or two orders heavier than the sleptons, bino and winos. The point is that the gauge coupling relation at the GUT scale is given by Eq.~(\ref{GCRelation}). The worst case is that the Higgsinos are light while the gluinos are heavy. So we discuss it as an example. For simplicity, we assume that the masses for the sleptons, bino, winos and Higgsinos are universersal, and the masses for the squarks and gluinos are universal. To prove the gauge coupling unification, we only need to calculate the one-loop beta functions for the renormalization scale from the slepton mass to the squark mass. The one-loop beta functions $b_1$, $b_2$, and $b_3$ respectively for $U(1)_Y$, $SU(2)_L$ and $SU(3)_C$ are $b_1=27/5$, $b_2=-4/3$, $b_3=-7$. Because $b_1-b_2 = 101/15$ is larger than $b_2-b_3=17/3$, the gauge coupling relation at the GUT scale in Eq.~(\ref{GCRelation}) can be realized properly. Especially, the discrepancies among the SM gauge couplings at the GUT scale are less than a few percents~\cite{Huo:2010fi}. Let us briefly comment on the fine-tuning problem on electroweak gauge symmetry breaking in the SSMs. The radiative electroweak gauge symmetry breaking gives the minimization condition at tree level \begin{eqnarray} \frac{1}{2} M^2_Z &=& -\mu^2 + {{m_{H_d}^2-m_{H_u}^2 \tan^2\beta} \over\displaystyle {\tan^2\beta -1}}~,~\, \end{eqnarray} where $M_Z$ is the $Z$ boson mass. For the moderate and large values of $\tan\beta$, this condition can be simplified to \begin{eqnarray} \frac{1}{2} M^2_Z & \simeq & -\mu^2 - m_{H_u}^2 ~.~\, \end{eqnarray} The electroweak-scale $m_{H_u}^2$ depends on the GUT-scale supersymmetry breaking soft terms such as gaugino masses, scalar masses, and trilinear soft $A$ terms, etc, via the renormalization group equation (RGE) running. Thus, if the squarks/gluinos are heavy and $A$ terms are large, the low energy $m_{H_u}^2$ will be large as well. And then we need to fine-tune the large $\mu$ term to realize the correct electroweak gauge symmetry breaking. Such fine-tuning problem does exist in electroweak supersymmetry, and one of the solution is to employ the idea of focus point/hyperbolic branch supersymmetry~\cite{Feng:1999mn, Feng:1999zg, Chan:1997bi}, which will be studied elsewhere. \section{Low Energy Supersymmetry Phenomenology} \begin{figure}[htb] \centering \includegraphics[scale=0.95]{tan13-cheng.eps} \caption{The viable parameter spaces in Scenario IA are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the blue region with Higgs boson mass larger than 127~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The dark khaki region, khaki region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIA1} \end{figure} \begin{figure}[htb] \centering \includegraphics[scale=0.95]{tan13.eps} \caption{The viable parameter spaces in Scenario IB are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the up blue region with Higgs boson mass larger than 127~GeV while the down blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The yellow region, grey region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIB1} \end{figure} We study two Scenarios for gaugino masses, as given in Eqs.~(\ref{Scenario-I}) and (\ref{Scenario-II}). For simplicity, we will consider two cases for the scalar masses and trilinear soft $A$ terms: (A) The unversal scalar mass $m_0$ and universal/non-universal trilinear soft $A$ terms. This case is similar to the mSUGRA. (B) The universal squark and slepton soft mass $m_0$ and universal/non-universal trilinear soft $A$ terms while the non-universal Higgs soft masses. This case is similar to the NUHM2. Therefore, we will study four kinds of Scenarios: Scenario IA, Scenario IB, Scenario IIA, and Scenario IIB. In our numerical study, we will use the {\tt SuSpect} program~\cite{Djouadi:2002ze} to calculate the supersymmetric particle spectra, and use the {\tt MicrOMEGAs} program~\cite{Belanger:2006is, Belanger:2010gh} to calculate the phenomenological constraints, the LSP neutralino relic density, and the direct detection cross-sections. We will focus on the lightest CP-even Higgs boson mass from 123~GeV to 127~GeV in the numerical results, and choose the benchmark points with Higgs boson mass only from 125.0 GeV to 126.0 GeV. The current top quark mass $m_t$ is $173.2\pm 0.9$~GeV~\cite{Lancaster:2011wr}. Because the lightest CP-even Higgs boson mass is sensitive to the top quark mass, we take the upper bound $m_t=174.1$~GeV in our numerical study. We emphasize that the viable parameter spaces with Higgs boson mass larger than 127~GeV in the following discussions are still fine since we can choose a smaller value for top quark mass within its uncertainty. We employ the following experimental constraints: (1) The cold dark matter relic density is $0.05 \leq \Omega_{\chi_1^0} h^2 \leq 0.135$; (2) The $b \rightarrow s\gamma$ branch ratio is $2.77 \times 10^{-4} \leq Br(b \rightarrow s\gamma) \leq 4.27 \times 10^{-4}$; (3) The $3\sigma $ $(g_{\mu} - 2)/2$ constraint is $2.1 \times 10^{-10} < \Delta a_{\mu} < 40.1 \times 10^{-10}$; (4) The upper bound on ${\rm BR}(B_{s} \rightarrow \mu^+ \mu^-)$ is $1.1 \times 10^{-8}$; (5) The experimental limit on the process $B_{u} \rightarrow \tau {\bar \nu}_{\tau}$ is $0.85 \leq {\rm BR}(B_{u} \rightarrow \tau {\bar \nu}_{\tau})/{\rm SM} \leq 1.65 $; (6) The LEP low bound on the lightest CP-even Higgs boson mass is $ 114.4$ GeV~\cite{Barate:2003sz}, which is close to the current low bound 115.5~GeV from ATLAS Collaboration~\cite{Collaboration:2012si}. In our electroweak supersymmetry, the dominant component of the LSP neutralino will be bino, thus, the constaints from the XENON100 experiment~\cite{Aprile:2011hi} can be evaded automatically due to the heavy squarks. First, let us discuss the Scenario I. To scan the viable parameter spaces in the $M_{1/2}-m_0$ plane, we consider the universal trilinear soft $A$ term $A_0$, and we choose $\tan\beta =13$ and $A_0=-4000$~GeV. We present the viable parameter space in Scenarios IA and IB respectively in Fig.~\ref{fig-SIA1} and Fig.~\ref{fig-SIB1}. We emphasize again that the viable parameter spaces with Higgs boson mass larger than 127~GeV in all the figures are still fine because we can choose the smaller value for top quark mass within its uncertainty. It is easy to understand that Scenario IB has larger viable parameter spaces since the Higgs scalar masses are hidden variables in Fig.~\ref{fig-SIB1}. Interestingly, in Scenario IA, we find the narrow viable range for $m_0$, which is about from 410~GeV to 440~GeV. This narrow $m_0$ range is obtained in the electroweak supersymmetry since the observed dark matter relic density is realized from the LSP neutralino-stau coannihilations. Moreover, we present the benchmark points in Tables~\ref{tab:SIA1} and \ref{tab:SIB1} for Scenarios IA and IB, respectively. In these benchmark points, the squarks, gluinos, and Higgsinos are heavy while the sleptons, bino and winos are light. Thus, the electroweak supersymmetry is realized. Similar results are held for all the following benchmark points in this paper. In particular, the LSP neutralino has $99.99\%$ bino component due to the heavy Higgsinos. However, the deviations of $(g_{\mu}-2)/2$ from the central value are about 2.88$\sigma$ and 2.63$\sigma$ for the benchmark points respectively in Tables~\ref{tab:SIA1} and \ref{tab:SIB1}. ~~~ \begin{table}[ht] \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\widetilde{\chi}_{1}^{0}$&$114$&$\widetilde{\chi}_{1}^{\pm}$&$262$& $\widetilde{e}_{R}/\widetilde{\mu}_{R}$&$426$&$\widetilde{t}_{1}$&$1161$& $\widetilde{u}_{R}/\widetilde{c}_{R}$&$2150$&$h^0$&$125.0$\\ \hline $\widetilde{\chi}_{2}^{0}$&$262$&$\widetilde{\chi}_{2}^{\pm}$&$2166$ &$\widetilde{e}_{L}/\widetilde{\mu}_{L}$&$447$&$\widetilde{t}_{2}$&$1755$ &$\widetilde{u}_{L}/\widetilde{c}_{L}$&$2150$&$A^0/H^0$&$2132$\\ \hline $\widetilde{\chi}_{3}^{0}$&$2165$&$\widetilde{\nu}_{e/\mu}$&$440$ &$\widetilde{\tau}_{1}$&$129$&$\widetilde{b}_{1}$&$1730$ &$\widetilde{d}_{R}/\widetilde{s}_{R}$&$2152$&$H^{\pm}$&$2134$\\ \hline $\widetilde{\chi}_{4}^{0}$&$2165$&$\widetilde{\nu}_{\tau}$&$353$ &$\widetilde{\tau}_{2}$&$395$&$\widetilde{b}_{2}$&$2097$ &$\widetilde{d}_{L}/\widetilde{s}_{L}$&$2152$&$\widetilde{g}$&$2436$\\ \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IA with $\tan\beta =13$, $M_{1/2}=280$~GeV, $m_0=411$~GeV and $A_0=-4000$~GeV. In this benchmark point, we have $\Omega_{\chi_1^0} h^2=0.0942$, ${\rm BR}(b \rightarrow s\gamma)=3.22\times 10^{-4}$, $\Delta a_{\mu} = 3.07 \times 10^{-10}$, ${\rm BR}(B_{s}^{0} \rightarrow \mu^+ \mu^-) =3.15\times 10^{-9}$, and ${\rm BR}(B_u \rightarrow \tau \bar{\nu})/{\rm SM}=0.998 $. Moreover, the LSP neutralino is $99.99\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $5.1\times 10^{-12}$~pb and $3.9\times 10^{-12}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $5.2\times 10^{-12}$~pb and $2.4\times 10^{-9}$~pb.} \label{tab:SIA1} \end{table} ~~~ \begin{table}[ht] \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\widetilde{\chi}_{1}^{0}$&$164$&$\widetilde{\chi}_{1}^{\pm}$&$375$& $\widetilde{e}_{R}/\widetilde{\mu}_{R}$&$488$&$\widetilde{t}_{1}$&$2043$& $\widetilde{u}_{R}/\widetilde{c}_{R}$&$2937$&$h^0$&$125.2$\\ \hline $\widetilde{\chi}_{2}^{0}$&$375$&$\widetilde{\chi}_{2}^{\pm}$&$2598$ &$\widetilde{e}_{L}/\widetilde{\mu}_{L}$&$411$&$\widetilde{t}_{2}$&$2558$ &$\widetilde{u}_{L}/\widetilde{c}_{L}$&$2949$&$A^0/H^0$&$2792$\\ \hline $\widetilde{\chi}_{3}^{0}$&$2597$&$\widetilde{\nu}_{e/\mu}$&$403$ &$\widetilde{\tau}_{1}$&$182$&$\widetilde{b}_{1}$&$2543$ &$\widetilde{d}_{R}/\widetilde{s}_{R}$&$2952$&$H^{\pm}$&$2794$\\ \hline $\widetilde{\chi}_{4}^{0}$&$2597$&$\widetilde{\nu}_{\tau}$&$302$ &$\widetilde{\tau}_{2}$&$397$&$\widetilde{b}_{2}$&$2899$ &$\widetilde{d}_{L}/\widetilde{s}_{L}$&$2950$&$\widetilde{g}$&$3394$\\ \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IB with $\tan\beta =13$, $M_{1/2}=400$~GeV, $m_0=380$~GeV, $A_0=-4000$~GeV, $m_{H_u}=1200$~GeV, and $m_{H_d}=0.0$~GeV. In this benchmark point, we have $\Omega_{\chi_1^0} h^2=0.111$, ${\rm BR}(b \rightarrow s\gamma)=3.26\times 10^{-4}$, $\Delta a_{\mu} = 5.06 \times 10^{-10}$, ${\rm BR}(B_{s}^{0} \rightarrow \mu^+ \mu^-) =3.13\times 10^{-9}$, and ${\rm BR}(B_u \rightarrow \tau \bar{\nu})/ {\rm SM} =0.999 $. Moreover, the LSP neutralino is $99.99\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $3.4\times 10^{-12}$~pb and $2.2\times 10^{-10}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $3.5\times 10^{-12}$~pb and $1.5\times 10^{-9}$~pb.} \label{tab:SIB1} \end{table} ~~~ \begin{table} \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\tilde{\chi}_{1}^{0}$ & $160$ & $\tilde{\chi}_{1}^{\pm}$ & $365$ & $\tilde{e}_{R}/\tilde{\mu}_{R}$ & $268$ & $\tilde{t}_{1}$ & $1967$ & $\tilde{u}_{R}/\tilde{c}_{R}$ & $2862$ & $h^{0}$ & $125.4$\tabularnewline \hline $\tilde{\chi}_{2}^{0}$ & $365$ & $\tilde{\chi}_{2}^{\pm}$ & $2548$ & $\tilde{e}_{L}/\tilde{\mu}_{L}$ & $332$ & $\tilde{t}_{2}$ & $2475$ & $\tilde{u}_{L}/\tilde{c}_{L}$ & $2863$ & $A^{0}/H^{0}$ & $2507$\tabularnewline \hline $\tilde{\chi}_{3}^{0}$ & $2547$ & $\tilde{\nu}_{e/\mu}$ & $322$ & $\tilde{\tau}_{1}$ & $176$ & $\tilde{b}_{1}$ & $2459$ & $\tilde{d}_{R}/\tilde{s}_{R}$ & $2864$ & $H^{\pm}$ & $2508$\tabularnewline \hline $\tilde{\chi}_{4}^{0}$ & $2547$ & $\tilde{\nu}_{\tau}$ & $321$ & $\tilde{\tau}_{2}$ & $385$ & $\tilde{b}_{2}$ & $2813$ & $\tilde{d}_{L}/\tilde{s}_{L}$ & $2864$ & $\tilde{g}$ & $3311$\tabularnewline \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IA with $\tan\beta =13$, $M_{1/2}=390$~GeV, $m_{0}=225$~GeV, $A_Q=-4000$~GeV and $A_E=-400$~GeV. In this benchmark point, we have $\Omega_{\chi_{1}^{0}}h^{2}=0.1105$, ${\rm BR}(b\rightarrow s\gamma)=3.227\times10^{-4}$, $\Delta a_{\mu}=19.3\times10^{-10}$, ${\rm BR}(B_{s}^{0}\rightarrow\mu^{+}\mu^{-})=3.13\times10^{-9}$, and ${\rm BR}(B_{u}\rightarrow\tau\bar{\nu})/{\rm SM}=0.999$. Moreover, the LSP neutralino is $99.98\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $3.6\times10^{-12}$~pb and $2.2\times10^{-10}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $3.7\times10^{-12}$~pb and $1.6\times10^{-9}$~pb.} \label{tab:SIA2} \end{table} ~~~ \begin{table}[ht] \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\widetilde{\chi}_{1}^{0}$&$121.7$&$\widetilde{\chi}_{1}^{\pm}$&$279.4$& $\widetilde{e}_{R}/\widetilde{\mu}_{R}$&$269.2$&$\widetilde{t}_{1}$&$1279.2$& $\widetilde{u}_{R}/\widetilde{c}_{R}$&$2256.4$&$h^0$&$125.2$\\ \hline $\widetilde{\chi}_{2}^{0}$&$279.4$&$\widetilde{\chi}_{2}^{\pm}$&$2188.0$ &$\widetilde{e}_{L}/\widetilde{\mu}_{L}$&$270.0$&$\widetilde{t}_{2}$&$1862.4$ &$\widetilde{u}_{L}/\widetilde{c}_{L}$&$2259.5$&$A^0/H^0$&$2272$\\ \hline $\widetilde{\chi}_{3}^{0}$&$2186.9$&$\widetilde{\nu}_{e/\mu}$&$258.6$ &$\widetilde{\tau}_{1}$&$140.6$&$\widetilde{b}_{1}$&$1839.0$ &$\widetilde{d}_{R}/\widetilde{s}_{R}$&$2261.3$&$H^{\pm}$&$2274$\\ \hline $\widetilde{\chi}_{4}^{0}$&$2187.2$&$\widetilde{\nu}_{\tau}$&$252.1$ &$\widetilde{\tau}_{2}$&$340.0$&$\widetilde{b}_{2}$&$2207$ &$\widetilde{d}_{L}/\widetilde{s}_{L}$&$2260.9$&$\widetilde{g}$&$2593.7$\\ \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IB with $\tan\beta =13$, $M_{1/2}=300$~GeV, $m_0=210$~GeV, $A_Q=-4000$~GeV, $A_E=-400$~GeV, $m_{H_u}=600$~GeV and $m_{H_d}=800$~GeV. In this benchmark point, we have $\Omega_{\chi_1^0} h^2=0.114$, ${\rm BR}(b \rightarrow s\gamma)=3.32\times 10^{-4}$, $\Delta a_{\mu} = 26.4 \times 10^{-10}$, ${\rm BR}(B_{s}^{0} \rightarrow \mu^+ \mu^-) =3.14\times 10^{-9}$, and ${\rm BR}(B_u \rightarrow \tau \bar{\nu})/{\rm SM}=0.998 $. Moreover, the LSP neutralino is $99.99\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $5.2\times 10^{-12}$~pb and $6.28\times 10^{-11}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $5.3\times 10^{-12}$~pb and $2.46\times 10^{-9}$~pb.} \label{tab:SIB2} \end{table} ~~~ \begin{table} \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\tilde{\chi}_{1}^{0}$ & $299$ & $\tilde{\chi}_{1}^{\pm}$ & $341$ & $\tilde{e}_{R}/\tilde{\mu}_{R}$ & $537$ & $\tilde{t}_{1}$ & $1076$ & $\tilde{u}_{R}/\tilde{c}_{R}$ & $2180$ & $h^{0}$ & $125.2$\tabularnewline \hline $\tilde{\chi}_{2}^{0}$ & $341$ & $\tilde{\chi}_{2}^{\pm}$ & $2245$ & $\tilde{e}_{L}/\tilde{\mu}_{L}$ & $549$ & $\tilde{t}_{2}$ & $1747$ & $\tilde{u}_{L}/\tilde{c}_{L}$ & $2181$ & $A^{0}/H^{0}$ & $2223$\tabularnewline \hline $\tilde{\chi}_{3}^{0}$ & $2244$ & $\tilde{\nu}_{e/\mu}$ & $543$ & $\tilde{\tau}_{1}$ & $308$ & $\tilde{b}_{1}$ & $1724$ & $\tilde{d}_{R}/\tilde{s}_{R}$ & $2178$ & $H^{\pm}$ & $2225$\tabularnewline \hline $\tilde{\chi}_{4}^{0}$ & $2245$ & $\tilde{\nu}_{\tau}$ & $461$ & $\tilde{\tau}_{2}$ & $495$ & $\tilde{b}_{2}$ & $2118$ & $\tilde{d}_{L}/\tilde{s}_{L}$ & $2182$ & $\tilde{g}$ & $2453$\tabularnewline \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IIA with $\tan\beta =13$, $M_{1/2}=424$~GeV, $m_{0}=468$~GeV and $A_0=-4000$~GeV. In this benchmark point, we have $\Omega_{\chi_{1}^{0}}h^{2}=0.1110$, ${\rm BR}(b\rightarrow s\gamma)=3.16\times10^{-4}$, $\Delta a_{\mu}=5.67\times10^{-10}$, ${\rm BR}(B_{s}^{0}\rightarrow\mu^{+}\mu^{-})=3.15\times10^{-9}$, and ${\rm BR}(B_{u}\rightarrow\tau\bar{\nu})/{\rm SM}=0.998$. Moreover, the LSP neutralino is $99.97\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $9.7\times10^{-12}$~pb and $7.9\times10^{-12}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $9.9\times10^{-12}$~pb and $2.4\times10^{-9}$~pb.} \label{tab:SIIA1} \end{table} ~~~ \begin{table}[ht] \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\widetilde{\chi}_{1}^{0}$&$310.0$&$\widetilde{\chi}_{1}^{\pm}$&$353.0$& $\widetilde{e}_{R}/\widetilde{\mu}_{R}$&$657.0$&$\widetilde{t}_{1}$&$1120.1$& $\widetilde{u}_{R}/\widetilde{c}_{R}$&$2229.5$&$h^0$&$125.5$\\ \hline $\widetilde{\chi}_{2}^{0}$&$353.0$&$\widetilde{\chi}_{2}^{\pm}$&$2251.9$ &$\widetilde{e}_{L}/\widetilde{\mu}_{L}$&$473.8$&$\widetilde{t}_{2}$&$1818.7$ &$\widetilde{u}_{L}/\widetilde{c}_{L}$&$2257.3$&$A^0/H^0$&$2798$\\ \hline $\widetilde{\chi}_{3}^{0}$&$2250.4$&$\widetilde{\nu}_{e/\mu}$&$467.4$ &$\widetilde{\tau}_{1}$&$320.1$&$\widetilde{b}_{1}$&$1795.6$ &$\widetilde{d}_{R}/\widetilde{s}_{R}$&$2260.1$&$H^{\pm}$&$2798$\\ \hline $\widetilde{\chi}_{4}^{0}$&$2251.5$&$\widetilde{\nu}_{\tau}$&$348.4$ &$\widetilde{\tau}_{2}$&$511.0$&$\widetilde{b}_{2}$&$2195$ &$\widetilde{d}_{L}/\widetilde{s}_{L}$&$2258.6$&$\widetilde{g}$&$2539.0$\\ \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IIB with $\tan\beta =13$, $M_{1/2}=440$~GeV, $m_0=460$~GeV, $A_0=-4000$~GeV, $m_{H_u}=600$~GeV and $m_{H_d}=1800$~GeV. In this benchmark point, we have $\Omega_{\chi_1^0} h^2=0.12$, ${\rm BR}(b \rightarrow s\gamma)=3.16\times 10^{-4}$, $\Delta a_{\mu} = 5.58 \times 10^{-10}$, ${\rm BR}(B_{s}^{0} \rightarrow \mu^+ \mu^-) =3.14\times 10^{-9}$, and ${\rm BR}(B_u \rightarrow \tau \bar{\nu})/{\rm SM}=0.999 $. Moreover, the LSP neutralino is $99.99\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $9.23\times 10^{-12}$~pb and $2.92\times 10^{-11}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $9.40\times 10^{-12}$~pb and $2.41\times 10^{-9}$~pb.} \label{tab:SIIB1} \end{table} ~~~ \begin{table} \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\tilde{\chi}_{1}^{0}$ & $318$ & $\tilde{\chi}_{1}^{\pm}$ & $362$ & $\tilde{e}_{R}/\tilde{\mu}_{R}$ & $396$ & $\tilde{t}_{1}$ & $1210$ & $\tilde{u}_{R}/\tilde{c}_{R}$ & $2275$ & $h^{0}$ & $125.7$\tabularnewline \hline $\tilde{\chi}_{2}^{0}$ & $362$ & $\tilde{\chi}_{2}^{\pm}$ & $2312$ & $\tilde{e}_{L}/\tilde{\mu}_{L}$ & $416$ & $\tilde{t}_{2}$ & $1849$ & $\tilde{u}_{L}/\tilde{c}_{L}$ & $2276$ & $A^{0}/H^{0}$ & $2281$\tabularnewline \hline $\tilde{\chi}_{3}^{0}$ & $2311$ & $\tilde{\nu}_{e/\mu}$ & $408$ & $\tilde{\tau}_{1}$ & $327$ & $\tilde{b}_{1}$ & $1827$ & $\tilde{d}_{R}/\tilde{s}_{R}$ & $2272$ & $H^{\pm}$ & $2284$\tabularnewline \hline $\tilde{\chi}_{4}^{0}$ & $2312$ & $\tilde{\nu}_{\tau}$ & $405$ & $\tilde{\tau}_{2}$ & $463$ & $\tilde{b}_{2}$ & $2213$ & $\tilde{d}_{L}/\tilde{s}_{L}$ & $2277$ & $\tilde{g}$ & $2597$\tabularnewline \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IIA with $\tan\beta =13$, $M_{1/2}=452$~GeV, $m_{0}=280$~GeV, $A_Q=-4000$~GeV and $A_E=-400$~GeV. In this benchmark point, we have $\Omega_{\chi_{1}^{0}}h^{2}=0.1125$, ${\rm BR}(b\rightarrow s\gamma)=3.18\times10^{-4}$, $\Delta a_{\mu}=10.6\times10^{-10}$, ${\rm BR}(B_{s}^{0}\rightarrow\mu^{+}\mu^{-})=3.15\times10^{-9}$, and ${\rm BR}(B_{u}\rightarrow\tau\bar{\nu})/{\rm SM}=0.998$. Moreover, the LSP neutralino is $99.97\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $9.2\times10^{-12}$~pb and $2.0\times10^{-11}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $9.39\times10^{-12}$~pb and $2.2\times10^{-9}$~pb.} \label{tab:SIIA2} \end{table} ~~~ \begin{table}[ht] \begin{center} \begin{tabular}{|c|c||c|c||c|c||c|c||c|c||c|c|} \hline $\widetilde{\chi}_{1}^{0}$&$309.1$&$\widetilde{\chi}_{1}^{\pm}$&$351.8$& $\widetilde{e}_{R}/\widetilde{\mu}_{R}$&$449.7$&$\widetilde{t}_{1}$&$1045.5$& $\widetilde{u}_{R}/\widetilde{c}_{R}$&$2214.8$&$h^0$&$125.0$\\ \hline $\widetilde{\chi}_{2}^{0}$&$351.8$&$\widetilde{\chi}_{2}^{\pm}$&$2144.9$ &$\widetilde{e}_{L}/\widetilde{\mu}_{L}$&$376.2$&$\widetilde{t}_{2}$&$1765.9$ &$\widetilde{u}_{L}/\widetilde{c}_{L}$&$2224.8$&$A^0/H^0$&$2498$\\ \hline $\widetilde{\chi}_{3}^{0}$&$2143.3$&$\widetilde{\nu}_{e/\mu}$&$368.2$ &$\widetilde{\tau}_{1}$&$315.8$&$\widetilde{b}_{1}$&$1742.6$ &$\widetilde{d}_{R}/\widetilde{s}_{R}$&$2223.6$&$H^{\pm}$&$2499$\\ \hline $\widetilde{\chi}_{4}^{0}$&$2144.5$&$\widetilde{\nu}_{\tau}$&$352.2$ &$\widetilde{\tau}_{2}$&$457.6$&$\widetilde{b}_{2}$&$2159.4$ &$\widetilde{d}_{L}/\widetilde{s}_{L}$&$2226.1$&$\widetilde{g}$&$2533.7$\\ \hline \end{tabular} \end{center} \caption{Supersymmetric particle and Higgs boson mass spectrum (in GeV) for a benchmark point in Scenario IIB with $\tan\beta =13$, $M_{1/2}=440$~GeV, $m_0=280$~GeV, $A_Q=-4000$~GeV, $A_E=-400$~GeV, $m_{H_u}=1000$~GeV, and $m_{H_d}=1400$~GeV. In this benchmark point, we have $\Omega_{\chi_1^0} h^2=0.09$, ${\rm BR}(b \rightarrow s\gamma)=3.14\times 10^{-4}$, $\Delta a_{\mu} = 10.3 \times 10^{-10}$, ${\rm BR}(B_{s}^{0} \rightarrow \mu^+ \mu^-) =3.15\times 10^{-9}$, and ${\rm BR}(B_u \rightarrow \tau \bar{\nu})/{\rm SM}=0.999 $. Moreover, the LSP neutralino is $99.99\%$ bino. The LSP neutralino-proton spin independent and dependent cross sections are respectively $1.11\times 10^{-11}$~pb and $1.82\times 10^{-10}$~pb, and the LSP neutralino-neutron spin independent and dependent cross sections are respectively $1.14\times 10^{-11}$~pb and $3.25\times 10^{-9}$~pb.} \label{tab:SIIB2} \end{table} In order to have the viable parameter spaces with better values for $(g_{\mu}-2)/2$, we need to decrease the smuon masses. Thus, we consider the non-universal trilinear soft $A$ terms. We assume that $A_U=A_D\equiv A_Q$ is much larger than $A_E$. To scan the viable parameter spaces in the $M_{1/2}-m_0$ plane, we choose $\tan\beta =13$, $A_Q=-4000$~GeV, and $A_E =-400$~GeV. We present the viable parameter space in Scenarios IA and IB respectively in Fig.~\ref{fig-SIA2} and Fig.~\ref{fig-SIB2}. Moreover, we present the benchmark points in Tables~\ref{tab:SIA2} and \ref{tab:SIB2} for Scenarios IA and IB, respectively. Similar to the above, the LSP neutralinos have $99.98\%$ and $99.99\%$ bino components respectively in Tables~\ref{tab:SIA2} and \ref{tab:SIB2}. Especially, the deviations of $(g_{\mu}-2)/2$ from the central value are within 1$\sigma$ in both benchmark points. \begin{figure}[htb] \centering \includegraphics[scale=1]{m0m121m14splitA13.eps} \caption{The viable parameter spaces in Scenario IA are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the up blue region with Higgs boson mass larger than 127~GeV while the down blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The dark khaki region, khaki region, and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIA2} \end{figure} \begin{figure}[htb] \centering \includegraphics[scale=0.95]{114amu.eps} \caption{The viable parameter spaces in Scenario IB are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The yellow region, grey region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIB2} \end{figure} Second, we discuss the Scenario II. To scan the viable parameter spaces in the $M_{1/2}-m_0$ plane, we consider the universal trilinear soft $A$ term $A_0$, and we choose $\tan\beta =13$ and $A_0=-4000$~GeV. We present the viable parameter spaces in Scenarios IIA and IIB respectively in Fig.~\ref{fig-SIIA1} and Fig.~\ref{fig-SIIB1}. Moreover, we present the benchmark points in Tables~\ref{tab:SIIA1} and \ref{tab:SIIB1} for Scenarios IIA and IIB, respectively. In particular, the LSP neutralinos have $99.97\%$ and $99.99\%$ bino components due to the heavy Higgsinos respectively in Tables~\ref{tab:SIIA1} and \ref{tab:SIIB1}. However, the deviations of $(g_{\mu}-2)/2$ from the central value are about 2.6$\sigma$ for both benchmark points. \begin{figure}[htb] \centering \includegraphics[scale=1]{m0m12538uniA13.eps} \caption{The viable parameter spaces in Scenario IIA are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The dark khaki region, khaki region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIIA1} \end{figure} \begin{figure}[htb] \centering \includegraphics[scale=0.95]{538.eps} \caption{The viable parameter spaces in Scenario IIB are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the up blue region with Higgs boson mass larger than 127~GeV while the down blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The yellow region, grey region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIIB1} \end{figure} Moreover, we consider the non-universal trilinear soft $A$ terms. To scan the viable parameter spaces in the $M_{1/2}-m_0$ plane, we choose $\tan\beta =13$, $A_Q=-4000$~GeV, and $A_E =-400$~GeV. We present the viable parameter spaces in Scenarios IIA and IIB respectively in Fig.~\ref{fig-SIIA2} and Fig.~\ref{fig-SIIB2}. Moreover, we present the benchmark points in Tables~\ref{tab:SIIA2} and \ref{tab:SIIB2} for Scenarios IIA and IIB, respectively. Similar to the above, the LSP neutralinos respectively have $99.97\%$ and $99.99\%$ bino components respectively in Tables~\ref{tab:SIIA2} and \ref{tab:SIIB2}. Especially, the deviations of $(g_{\mu}-2)/2$ from the central value are within 2$\sigma$ in both benchmark points. \begin{figure}[htb] \centering \includegraphics[scale=1]{m0m12538splitA13.eps} \caption{The viable parameter spaces in Scenario IIA are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The dark khaki region, khaki region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIIA2} \end{figure} \begin{figure}[htb] \centering \includegraphics[scale=0.95]{538amu.eps} \caption{The viable parameter spaces in Scenario IIB are the red region with Higgs boson mass from 124~GeV to 126~GeV, the green region with Higgs boson mass from 126~GeV to 127~GeV, the dark blue region with Higgs boson mass from 123~GeV to 124~GeV, and the up blue region with Higgs boson mass larger than 127~GeV while the down blue region with Higgs boson mass from 114.4~GeV to 123~GeV. The white region is excluded because there is no RGE solution or $\chi_1^0$ is not a LSP. The yellow region, grey region and light grey region are excluded by the $(g_{\mu}-2)/2$ constraint, the cold dark matter relic density, and the LEP constraints, respectively. } \label{fig-SIIB2} \end{figure} The LHC searches for electroweak supersymmetry are to look for the trilepton plus missing transverse energy signals, which arise from the first chargino $\chi_1^+$ and second neutralino $\chi_2^0$ pair productions and decays~\cite{Eckel:2011pw}. In quite a few benchmark points of our electroweak supersymmetry, only the light stau is lighter than $\chi_1^+$ and $\chi_2^0$. Thus, it is different from the previous work~\cite{Eckel:2011pw}, and definitely deserved further detail study. \section{Conclusion} We proposed the electroweak supersymmetry around the electroweak scale: the squarks and/or gluinos are around a few TeV while the sleptons, sneutrinos, bino and winos are within one TeV. The Higgsinos can be either heavy or light. Thus, the constraints from the ATLAS and CMS supersymmetry and Higgs searches and the $b \rightarrow s\gamma$, $B_{s} \rightarrow \mu^+ \mu^-$, and $B_{u} \rightarrow \tau {\bar \nu}_{\tau}$ processes can be satisfied automatically due to the heavy squarks. Also, the dimension-five proton decays in the supersymmetric GUTs can be relaxed as well. In addition, the $(g_{\mu} - 2)/2$ experimental result can be explained due to the light sleptons. With bino as the dominant component of the LSP neutralino, we obtained the observed dark matter relic density via the neutralino-stau coannihilations, and the XENON experimental constraint can be evaded due to the heavy squarks as well. Considering the GmSUGRA, we showed explicitly that the electroweak supersymmetry can be realized, and the gauge coupling unification can be preserved. With two Scenarios, we presented the viable parameter spaces that satisfy all the current phenomenological constraints. Furthermore, we commented on the fine-tuning problem and LHC searches. \begin{acknowledgments} This research was supported in part by the Natural Science Foundation of China under grant numbers 10821504 and 11075194 (TC, JL, TL and CT), and by the DOE grant DE-FG03-95-Er-40917 (TL and DVN). \end{acknowledgments}
1,116,691,501,391
arxiv
\section{Introduction} Predictability in turbulence~\cite{Bohr1998,Cencini2009} is an important aspect of certain practical problems, such as weather forecasting~\cite{Lorenz1969,Leith1971,Leith1972}. This is because turbulence does not lose its predictability due to its inherent chaotic multiscale dynamics, even though it is regarded as a highly chaotic phenomenon. This enables one to make long-range weather predictions. Although the predictability in three-dimensional (3D) and two-dimensional (2D) turbulence has been extensively studied~\cite{Boffetta1997,Boffetta2001,Boffetta2017,Berera2018}, the study of predictability in rotating turbulence~\cite{Godeferd2015,Greenspan1968}, which is observed in many natural systems~\cite{Barnes2001,Cho2008,Aurnou2015} and is a key factor in weather prediction models~\cite{Bartello1995,Vannitsem2017}, is relatively limited\cite{Ngan2009} and thus requires further investigation. {The predictability of the future state of a chaotic system with complete knowledge of the evolution laws is severely limited by the sensitive dependence on initial conditions and hence by an imperfect knowledge of the present state. For example, the predictability in turbulence is limited by the error in the state caused by the velocity fluctuations---induced by the thermal fluctuations representing the chaotic molecular motions in the fluid---at the fastest timescales near the dissipation scale~\cite{Ruelle1979,Crisanti1993a}.} {In the phase space of the chaotic system, the distance $\delta(t)$ between two trajectories initially separated by an infinitesimal error $\delta_0$ increases exponentially as $\delta(t) \simeq \delta_0 \exp(\lambda t)$ with a typical growth rate $\lambda$ known as the maximum Lyapunov exponent (in the limit of $t \rightarrow \infty$). Therefore, the future state of the chaotic system is expected to be predictable within a tolerance error $\delta_{\rm max}$ of interest up to a time, the predictability time $T$, of the order of $\lambda^{-1}$: \begin{equation*} T \sim \frac{1}{\lambda} \ln \norm{\frac{\delta_{\rm max}}{\delta_0}}. \end{equation*} } In many situations, such as turbulence, the predictability time based on the maximum Lyapunov exponent becomes irrelevant and insignificant as compared to the observed predictability time~\cite{Bohr1998}. Turbulence has many degrees of freedom with different characteristic lengthscales and timescales (eddy turnover times)~\cite{Aurell1996,Lorenz1969}, which interact nonlinearly with each other: the infinitesimally small error at the Kolmogorov scale grows exponentially and propagates to the larger scales due to nonlinear interactions. Thus, the error at relatively large scales does not remain infinitesimally small, and hence the predictability time determined by the maximum Lyapunov exponent is insufficient and irrelevant for predicting the state of a turbulent system at large scales of interest~\cite{Aurell1996a}. Lorenz~\cite{Lorenz1969} proposed, based on his physical arguments stemmed from the assumptions in the energy cascade picture of turbulence, that the time $\tau(k)$ it takes a non-infinitesimal error at scale $l_0 \sim 1/2k$ to cause complete uncertainty (by growing and transferring) at the larger scale $l \sim 1/k$ is proportional to the characteristic eddy turnover time at scale $l$. In simpler terms, the evolution of non-infinitesimal error at different scales is governed by the corresponding characteristic timescales~\cite{Lorenz1969,Leith1972,Ruelle1979}. A small perturbation (error) initially at Kolmogorov scale $\eta \sim 1/k_d$, where $k_d$ is dissipation wavenumber, grows and propagates to the scale of interest $L \sim 1/k_L$ of large energy-containing eddies through the process of an inverse cascade, and hence the predictability time $T_L$ at large scale $L$ is the sum of all the characteristic times required for the small perturbation to propagate from Kolmogorov scale $\eta$ to the large scale of interest $L$: $T_L = \sum_{p=0}^{P} \tau(2^{p} k_L )$, where $P = \log_2 (k_d/k_L)$. For the turbulent flows with high Reynolds number and spectral velocity $\norm{u(k)} \sim k^{-\alpha}$, the eddy turnover timescale $ \tau(k) = \left[k \norm{u(k)}\right]^{-1} \sim k^{\alpha -1}$ and hence the predictability time $T_L$ is estimated~\cite{Bohr1998,Ditlevsen2010} as \begin{eqnarray} T_L \sim \sum_{p=0}^{\infty} \tau(2^{p} k_L ) = k_L^{\alpha -1} \frac{1}{1-2^{\alpha -1}}. \label{eqn:Lorenz_approach} \end{eqnarray} In case of 3D fully developed turbulence, $\alpha = 1/3$ because $\norm{u(k)} \sim k^{-1/3}$ and so $T_L \sim k_L^{-2/3} \sim \tau(k_L)$. This immediately implies that the major contribution of predictability time in Eq.~(\ref{eqn:Lorenz_approach}) comes from the term corresponding to $p = 0$. Therefore, the predictability time in 3D turbulence, from the algebraic expression in Eq.~(\ref{eqn:Lorenz_approach}), is determined by the eddy turnover time of the largest scale of interest, and the predictability time corresponding to the maximum Lyapunov exponent becomes irrelevant. The more energy there is at small scales, faster the error propagates to the large scales, resulting in a shorter predictability time. In 2D turbulence, $\alpha = 1$ because the energy spectrum in the small scales $E(k) \sim k^{-3}$ and $\norm{u(k)}^2 = \int E(k) {d}k$; this results in diverging predictability time $T_L$. As a consequence, long-range predictability is realizable in large-scale atmospheric flow that is modeled as a 2D turbulent system. The predictability behaviour of rotating turbulence is expected to be different from 3D homogeneous isotropic and 2D turbulence because rotation introduces a new timescale and a characteristic lengthscale called the Zeman scale~\cite{Zeman1994} in the problem, and new phenomena emerge in presence of rotation~\cite{Smith1999,Bartello1994,Sreenivasan2008}. The energy spectrum $E(k) \sim k^{-2}$ for the scales in the Zeman range $(k < k_\Omega)$, where $k_\Omega$ is the Zeman wavenumber corresponding to the Zeman scale, and the energy spectrum $E(k) \sim k^{-5/3}$ for small scales $(k > k_\Omega)$~\cite{Mininni2012,Biferale2016,Rathor2020}. The predictability in rotating turbulence is unlikely to change significantly considering it is limited by the small-scale dynamics as in 3D turbulence. However, because $\alpha = 1/2$ in large scales, the predictability time is expected to modify to $T_L \sim k_L^{-1/2} \sim \tau_{rot}(k_L)$, where $k_L < k_\Omega$ and $\tau_{rot}(k) \sim k^{-1/2}$ is the eddy turnover time in the scales ($k < k_\Omega$). Since $\tau_{rot}/\tau \sim k^{1/6}$, this immediately implies that the predictability time at large scales in the Zeman range $k < k_\Omega$ in rotating turbulence is \textit{larger} than that in 3D homogeneous isotropic turbulence for the same scales. In this work, we investigate the behaviour of large-scale predictability, using our extensive shell model simulations, by measuring the growth of the error between two initially close trajectories in the state space of the shell model, and thus test the aforementioned predictions about predictability in rotating turbulence. First, we look at the large-scale predictability of the full system that takes all components (scales) into consideration, and how it relates to the Rossby number, a non-dimensional parameter that characterises the rotating turbulence. We then look into the scale dependence of predictability for different Rossby numbers. Finally, we compute the finite size Lyapunov exponent as a measure of predictability and validate the dimensional predictions. \section{Simulation Details} We consider a turbulent flow, under a solid body rotation $\mb{\Omega}$, described by the Navier--Stokes equation for the velocity field $\mb{u}$ of a 3D incompressible ($\mb{\nabla}\cdot \mb{u} = 0$) flow \begin{equation} \frac{\partial \mb{u}}{\partial t} + (\mb{u}.\mb{\nabla}) \mb{u} = -\frac{1}{\rho} \mb{\nabla} p + \nu \nabla^2 \mb{u} - 2 \mb{\Omega} \times \mb{u} + \mb{f}, \label{eqn:NSE} \end{equation} where $\rho$ is the fluid density and $\nu$ denotes the kinematic viscosity of the fluid. The centrifugal force, which is absorbed in the pressure field $p$, and the Coriolis force $-2 \mb{\Omega} \times \mb{u}$ are generated by the solid body rotation. Furthermore, turbulence in the flow is sustained by the external force $\mb{f}$, which pumps constant energy $\langle \mb{u}\cdot \mb{f} \rangle$ at large scales into the system. To study the predictability aspects of the aforementioned rotating turbulent flow, we numerically simulate shell models of turbulence modified appropriately to take the Coriolis force into account. Shell models are innately low-dimensional dynamical systems that resemble the spectral Navier--Stokes equation but actually are not derived from it~\cite{Frisch1995,Bohr1998,Biferale2003,Pandit2009}. The dynamical system is constructed by dividing the 3D Fourier space into discrete non-overlapping shells such that the wave vectors in the shell $n$ are represented by a single wavenumber $k_n$ that live on a one-dimensional logarithmically-spaced lattice: $k_n = k_0 \lambda^n$, where $k_0$ and the shell spacing $\lambda$ are real constants. A complex dynamical variable $u_n$, which represents the velocity difference associated with a lengthscale $\sim k_n^{-1}$ in the Navier--Stokes equation, is associated with the shell $n$ in the lattice. The nonlinear interactions that satisfy the important symmetries of the Navier-Stokes equations are chosen and restricted to the nearest and next-nearest neighbour shells. Because of this logarithmic construction on a one-dimensional lattice truncated with a total of $N$ shells and restricted nonlinear interactions, extremely high Reynolds numbers that are not possible in DNSs are achieved in shell models even with a few shells, i.e., small $N$. Shell models have extensively been used to discover and reproduce many characteristics of predictability in turbulence~\cite{Crisanti1993,Crisanti1993a,Crisanti1994,Aurell1996,Aurell1996a,Bohr1998}. However, the use of shell models in the study of rotating turbulence is scarce and relatively recent~\cite{Hattori2004,Chakraborty2010,Rathor2020,Rathor2021}. Furthermore, shell models that are structurally isotropic are able to reproduce and predict many features of rotating turbulence, such as the dual scaling of energy spectrum, two-dimensionalisation, and the scaling of equal-time structure functions~\cite{Hattori2004,Chakraborty2010,Rathor2020}. We, therefore, resort to shell models to study predictability in rotating turbulence and consider the following so-called GOY shell model~\cite{Gledzer1973,Ohkitani1982} modified for rotating turbulence \begin{eqnarray}\label{goy} \frac{du_n }{dt}= \mathcal{N}_n -\nu k_n^2 u_n +f_n -i\Omega u_n, \end{eqnarray} where $\mathcal{N}_n$ is the nonlinear term modeled as \begin{eqnarray} \mathcal{N}_n& = & i \left[a k_{n+1} u_{n+2} u_{n+1} + b k_{n} u_{n+1}u_{n-1} \right.\nonumber \\ && \left.+ c k_{n-1} u_{n-1} u_{n-2}\right]^{*} \nonumber \end{eqnarray} with real coefficients $a, b$ and $c$, and the superscript $^*$ denoting a complex conjugate. {We set the coefficient $a = 1$ for time rescaling without any loss of generality, and we determine the other interaction coefficients as $b = -1/2$ and $-1/2$ from the conservation of quadratic invariant quantities, namely energy and helicity, for inviscid and unforced 3D turbulence.} In Eq.(\ref{goy}), the second term on the right is the viscous dissipative term with viscosity denoted by $\nu$, and $f_n$ is the forcing term to sustain the turbulence. We take the following forcing scheme to supply constant energy and zero helicity in the system~\cite{Ditlevsen2001}: $f_n = \epsilon (1+i) \left(\frac{u_n}{\norm{u_n}^2}\delta_{n,2} + \frac{u_n}{2\norm{u_n}^2}\delta_{n,3}\right)$, where $\epsilon$ determines the energy injection rate. {The total mean energy injection rate is $\bar{\epsilon} = 1.5 \epsilon$ and, in our simulations, $\epsilon = 0.01$.} Finally, the last term with rotation rate $\Omega$ acts as Coriolis force. This term is specifically made imaginary so that it does not explicitly contribute to the kinetic energy of the system. We simulate the aforementioned GOY model of rotating turbulence with a total of $N = 30$ shells, $\lambda = 2$ shell spacing, and $k_0 = 1/16$. A high Reynolds number $Re\sim 10^{10}$ is achieved by choosing the viscosity $\nu = 10^{-9}$ such that the Kolmogorov shell $n_\eta = 24$. Because the shell model is a set of stiff ordinary differential equations, the integration is performed carefully using the RK4 scheme with a time step as small as $\delta t = 10^{-5}$. We take the initial velocity field $u_n = \sqrt{k_n} \exp(i\theta)$ for $n\le 4$ and $u_n = \sqrt{k_n} \exp(-k_n^2) \exp(i\theta)$ for $n \ge 5$, where $\theta \in [0,2\pi]$ is a random phase. We evolve the system to bring it to a statistically steady state before starting the simulations for predictability. We simultaneously and independently evolve two initially close trajectories of reference velocity $u_n(t)$ and perturbed velocity $u_n^\prime(t)$ in the $2N-$dimensional state space of the shell model. The initial perturbed velocity $u_n^\prime(t = 0)$ is obtained by perturbing initial reference velocity $u_n(t = 0)$ that is in a steady state, in accordance with the following scheme\cite{Aurell1996}: \begin{equation} u_n^\prime = \begin{cases} \exp(i\phi_n) u_n, & n \geq n_{\eta}\\ u_n, & n < n_{\eta}, \end{cases} \end{equation} where $n_{\eta}$ is the shell number corresponding to the Kolmogorov wavenumber $k_\eta$, $\phi_n \in [0, \theta_c] $ is a random number, and $\theta_c$ quantifies correlation between the two fields. Initially, the energy contents $\frac{1}{2}\sum_{n=1}^{N} \norm{u_n}^2$ and $\frac{1}{2}\sum_{n=1}^{N} \norm{u_n^\prime}^2$ are equal, however, the error measured as the (Euclidean) distance between $u_n$ and $u_n^\prime$ is non-zero. To study the predictability, we track the evolution of the error field $\delta u_n(t) = \left(u_n^\prime(t) -u_n(t)\right)/\sqrt{2}$, where the factor $1/\sqrt{2}$ is for normalization convenience. The \textit{error} between the two trajectories is measured from $\delta u_n(t)$ in terms of the energy norm, defined as \begin{equation} E_\Delta(t) = \int_{0}^{\infty} E_\Delta(k,t) \mathrm{d}k = \frac{1}{2} \sum_{n=1}^{N} \norm{\delta u_n(t)}^2, \end{equation} where $E_\Delta(k,t)$ is the error spectrum. In shell model, the error spectrum can be defined as \begin{equation} E_\Delta(k_n,t) = \frac{\norm{\delta u_n(t)}^2}{k_n}. \end{equation} The initially correlated trajectories $u_n(t)$ and $u_n^\prime(t)$ evolve to completely decorrelate in time, such that the error $E_\Delta(t)$ grows to eventually saturate to $E(t)$, where $ E(t) = \sum_{n=1}^{N} \norm{u_n(t)}^2 = \sum_{n=1}^{N} \norm{u_n^\prime(t)}^2$. In our simulations, we choose fairly small $\theta_c (= 10^{-6})$ to generate initially strongly correlated trajectories $u_n(t)$ and $u_n^\prime(t)$. We repeat our simulations with various rotation rates, turning on the Coriolis force term simultaneously for both the reference and the perturbed trajectories. A flow in a rotating frame is characterised by a non-dimensional Rossby number in addition to the Reynolds number. The Rossby number corresponding to rotation rate $\Omega$ is defined as $\mathrm{Ro}:=U/\Omega L_{0}$, where $U= (\sum_{n} \mid u_n \mid ^ 2)^{1/2}$ is the root-mean-square velocity and $L_{0}= 1/k_0$. For finite $\mathrm{Ro}$, we determine $U$ at late times when the system is reached in (asymptotically) steady state. We report our results for moderate Rossby numbers: $\mathrm{Ro} = 0.137, 0.049, 0.025$, and $0.018$ corresponding to the rotation rates $\Omega = 1, 4, 10$, and $16$ respectively, in addition to $\mathrm{Ro} = \infty$ (i.e., no rotation). The statistics, for each $\mathrm{Ro}$, is performed over an ensemble of $5000$ different realizations of the pairs of reference and perturbed trajectories in the state space of the shell model. The characteristic timescale of the system, the large eddy turnover time $T = L_0/U$, varies in the simulations with different Ro because the timescales in the Zeman range are governed by the Coriolis force. However, the timescale of the dynamics at the Kolmogorov scale is not significantly affected for moderate Ro. Therefore, we consider the Kolmogorov timescale $t_\eta = \sqrt{\nu/\varepsilon}$, where $\varepsilon$ ($= \bar{\epsilon}$, in steady state) is the energy dissipation rate, as the characteristic timescale for all of the simulations and report our results in units of the constructed timescale $T_{\eta} = 10^4 t_\eta$. In our simulations, for $\mathrm{Ro} = \infty$, $T \simeq 6T_{\eta}$. \section{Results} \subsection{Error Dynamics} In Fig.~\ref{fig:error_energy}, we plot the time evolution of error in terms of the relative error $E_\Delta(t)/E(t)$, defined as the ensemble average of error $E_\Delta(t)$ normalized with the ensemble average of energy $E(t)$ (of reference trajectories). The solid lines show the mean relative error, and the respective shaded region represents the fluctuations of the relative error within half a standard deviation from the mean. The mean relative error displays an initial exponential growth followed by an algebraic growth before it saturates to $ E_\Delta(t)/E(t) \simeq 1$. As the error approaches saturation, the fluctuations in the relative error increase to become enormously large. This results from the kinetic energy fluctuations, which vary on the same timescale as the saturated error and are related to the large-scale dynamics. The forcing mechanism at large scales could also contribute to these large-scale fluctuations. Moreover, we observe a decrease in the fluctuations with a decreasing Ro. This is perhaps the result of the \textit{linear} Coriolis force dominated dynamics at large scales such that the nonlinear fluctuations are suppressed. \begin{figure} \includegraphics[scale=0.98]{fig1_error_growth.pdf} \caption[Plots of the relative error growth with time.]{Plots of the relative error $E_\Delta(t)/E(t)$ versus time $t$ normalized with $T_{\eta} = 10^4 t_\eta$, where $t_\eta$ is the Kolmogorov time, for various $\mathrm{Ro}$ values (see legend). Solid lines with colors corresponding to the respective $\mathrm{Ro}$ represent the mean relative error, whereas the respective shaded region represents the fluctuations of the relative error within half a standard deviation (for clarity to the eye) from the mean. } \label{fig:error_energy} \end{figure} Figure~\ref{fig:Tp_Leith} presents the time evolution of the relative error on a semilogarithmic scale. For all Ro, we observe an initial stage of exponential growth of error that lasts for a relatively short time period $\sim T_{\eta}$. For $\mathrm{Ro} = \infty$, this is followed by a short-lived linear growth stage before the error energy saturates to $ E_\Delta(t) \simeq E(t)$ in its later stage of evolution. However, peculiar to finite Ro, the error growth intriguingly freezes, after the exponential stage, for a time period that depends on the Rossby number; smaller the $\mathrm{Ro}$, longer the time period. This stage of frozen error evolution corresponds to the plateaus at the relative error $\sim 10^{-3}$ (see Fig.~\ref{fig:Tp_Leith}). Furthermore, after this frozen stage, the relative error begins to develop again, but algebraically. This is as though the dynamics of the reference and the perturbed trajectories synchronise in the state space of the shell model for an Ro-dependent time period before the trajectories desynchronise and separate again but algebraically. Predictability time associated with a scale is defined as the time a small error (at the Kolmogorov scale) takes to grow and propagate to the scale to induce complete uncertainty at the scale~\cite{Bohr1998}. Moreover, predictability time for the full system is defined as the time it takes for a small initial error to grow to a predetermined threshold set on the relative errors. The classical prescription due to Leith~\cite{Leith1971} for fixing the threshold is $E_\Delta(T_p^L)/E(T_p^L) = 1/4$ such that $T_p^L$ denotes the predictability time for the full system. In Fig.~\ref{fig:Tp_Leith}, the vertical dashed lines with colors corresponding to the respective $\mathrm{Ro}$ determine the predictability time $T_p^L$ by intersecting the respective relative error curve at the threshold $E_\Delta(T_p^L)/E(T_p^L) = 1/4$. Interestingly, we observe that the time the relative error freezes for forms a considerable part of the predictability time $T_p^L$(see Fig.~\ref{fig:Tp_Leith}). For instance, for Ro $= 0.018$, the duration of frozen stage $\simeq 100 T_\eta$ is about $30\%$ of the predictability time $T_p^L \simeq 337T_\eta$. In the inset of Fig.~\ref{fig:Tp_Leith}, we plot the predictability time $T_p^L$ against Rossby number $\mathrm{Ro}$ on a loglog scale and find that $T_p^L$ increases with decreasing Ro. Surprisingly, for finite $\mathrm{Ro}$, the dependence of the predictability time $T_p^L$ on $\mathrm{Ro}$ satisfies a power law $T_p^L \sim \mathrm{Ro}^{\beta}$ with the measured value of exponent $\beta = -0.68(\simeq -2/3)$, as shown by the linear best fit line (see blue dashed line in the inset of Fig.~\ref{fig:Tp_Leith}). \begin{figure} \includegraphics[scale=0.98]{fig2_predictability_time.pdf} \caption[Semilog plots of error growth and predictability in inset figure.]{Semilog plots of the mean relative error energy $E_\Delta(t)/E(t)$ versus time $t$ normalized with $T_{\eta} = 10^4 t_\eta$, where $t_\eta$ is the Kolmogorov time, for different Rossby numbers (see legend) from our simulations of shell model. The horizontal black dashed line represents $E_\Delta(t)/E(t)=1/4$. The vertical dashed lines with colors corresponding to the respective $\mathrm{Ro}$ determine the large-scale predictability time. Inset: Loglog plot of predictability time $T_p^L$ versus Rossby number $\mathrm{Ro}$. The blue dashed line is the linear best fit with slope $= -0.68$.} \label{fig:Tp_Leith} \end{figure} \subsection{Scale-by-scale Predictability} \begin{figure*}[ht] \includegraphics[scale = 1]{fig3_error_spectrum.pdf} \caption{(a) Loglog plots of the ensemble averaged error spectra $E_\Delta(k, t)$ for $\mathrm{Ro} = \infty$ (i.e., no rotation) at different times $t/T_{\eta} = 0.23, 0.43, 0.5 , 0.58, 0.99, 1.97, 3.94, 7.88$, and $15.76$ (from bottom to top). The solid and the dashed lines represent the error spectra in the exponential and the linear growth stages, respectively. The thick dashed black line represents the energy spectrum $E(k) \sim k^{-5/3}$. (b) Loglog plots of the ensemble averaged error spectra compensated with $k^{5/3}$ for different $\mathrm{Ro}$ values (see legend) at a representative time $t/T_{\eta} = 16$. The dashed line with colors corresponding to the respective $\mathrm{Ro}$ represent the energy spectra $E(k) \sim k^{-2}$ ($k < k_\Omega$) and $E(k) \sim k^{-5/3}$ ($k > k_\Omega$), where $k_\Omega$ is the respective Zeman wavenumber. (c) Loglog plots of the predictability time $T_p(k)$ for different $\mathrm{Ro}$ (see legend). The black dashed line represents the scaling $T_p(k) \sim k^{-1/3}$ and the black dotted line scales as $T_p(k) \sim k^{-0.07}$. } \label{fig:error_spectra} \end{figure*} After investigating the predictability for the full system, we turn to investigate the scale-by-scale predictability of rotating turbulence. In Fig.~\ref{fig:error_spectra}(a), we plot the evolution of ensemble averaged error spectrum $E_\Delta(k, t)$ for $\mathrm{Ro} = \infty$ (non-rotating case). The solid lines represent the error spectra at different times during the exponential growth of the error. The error initially localized at the small scales $k_n \ge k_\eta$ grows exponentially to the orders of energy at the Kolmogorov scale in a short time $t \sim 0.5 T_{\eta}$, as shown by the error spectrum represented by the black solid line in Fig.~\ref{fig:error_spectra}(a). Then the error further grows to cascade towards the large scales---through the nonlinear interactions of the scales---as shown by the error spectra represented by the dashed lines at different times. The error spectrum saturates to the energy spectrum $E(k) \sim k^{-5/3}$ (shown by thick dashed black line) for the wavenumbers $k > k_L$ such that the reference and the perturbed trajectories completely decorrelate at the scales $ < k_L^{-1}$. Furthermore, we observe that the error spectrum at large scales is significantly affected and decorrelated in a relatively large amount of time due to the cascading of the error through nonlinear mechanisms: The true predictability time is much longer than the timescale of the exponential stage, rendering the Lyapunov exponent based predictability time irrelevant and insufficient for the scales in the inertial range. For example, the error spectrum at the large scales close to the forcing scales decorrelate in a large time $t \simeq 16 T_{\eta}$, making the dynamics at these large scale much more predictable. Figure ~\ref{fig:error_spectra}(b) shows the ensemble averaged error spectra for various $\mathrm{Ro}$ values at the time $t = 16 T_\eta$ when the trajectories for $\mathrm{Ro} = \infty$ are completely decorrelated. We also plot, for reference, the energy spectra (represented by the respective colored dashed lines) $E(k) \sim k^{-2}$ ($k < k_\Omega$) and $E(k) \sim k^{-5/3}$ ($k > k_\Omega$), where $k_\Omega = \sqrt{\Omega^3/\varepsilon}$ is the respective Zeman wavenumber~\cite{Zeman1994} for the same $\mathrm{Ro}$ values. We observe that the error spectra for different finite $\mathrm{Ro}$ are of the same shape in the scales smaller than the Zeman scale ($k > k_\Omega$), though not decorrelated completely with the respective energy spectra. However, the error spectra in the Zeman range ($k < k_\Omega$) are different such that the time taken to saturate the error to the energy spectra increases with the decreasing $\mathrm{Ro}$. This also hints at the possible enhancement of the predictability due to the dynamics in the Zeman range. The time it takes for an error at a small scale (say, the Kolmogorov scale) to decorrelate the trajectories at a larger scale $L \sim k_L^{-1}$ is the predictability time associated with the scale $L$. We measure the predictability time at a given scale $l \sim k^{-1}$ by determining the time $T_p(k)$ such that the relative error spectrum $E_\Delta(k, T_p)/E(k) = \gamma$, where $\gamma \simeq 1$. Using the aforementioned definition, we compute an ensemble averaged predictability time $T_p(k)$ with $\gamma = 0.8$. In Fig.~\ref{fig:error_spectra}(c), The predictability timescales as $k^{-1/3}$ for $\mathrm{Ro} = \infty$, confirming the scaling reported in DNS study of homogeneous isotropic turbulence in an Eulerian approach~\cite{Berera2018}. This is, however, in contrast with the scaling $k^{-2/3}$ inferred from the Lorenz argument~\cite{Lorenz1969} and reported in DNS studies of 2D and 3D turbulence~\cite{Boffetta2001,Boffetta2017}. For finite Ro, however, the predictability time tends to become scale independent, predominantly in the Zeman range, as Ro decreases. As we see in Fig.~\ref{fig:error_spectra}(c), the predictability time approaches a plateau, particularly in the Zeman range, as the Rossyb number is decreased. This is evidenced by the close to \textit{zero} scaling exponent of the scaling $T_p(k) \sim k^{-0.07}$ for small $\mathrm{Ro} = 0.018$ [see the dotted line in Fig.~\ref{fig:error_spectra}(c)]. Furthermore, the predictability time $T_p(k)$, for small Ro, exhibits a transition from small to large predictability time around the Zeman scale. \subsection{Finite Size Lyapunov Exponent} The large-scale predictability is better described in terms of the \textit{finite size Lyapunov exponent} (FSLE) $\Lambda(\delta)$, which is the average divergence rate of two trajectories initially separated by a \textit{finite} measure $\delta$ that is of the order of velocities in the inertial range~\cite{Aurell1996a,Aurell1996,Aurell1997,Cencini2013}. In particular, $\Lambda(\delta)$ is measured in terms of the (ensemble) average of the time $T_r(\delta)$ (the $r-$folding time) it takes for a separation $\delta \in \delta u(t) = \sqrt{2E_\Delta(t)}$ to grow by a factor $r$ to $r\delta$: \begin{equation} \Lambda(\delta) \equiv \frac{\ln r}{\langle T_r(\delta) \rangle}, \label{eqn:fsle} \end{equation} where $r$ is typically $O(1)$ $(r > 1)$, and the averaging $\langle \rangle$ is over an ensemble of many realizations. The FSLE was conceptualized to measure chaos and predictability in extended systems with many characteristic timescales and systems whose evolution involves nonlinear interactions among their degrees of freedom, such as turbulence~\cite{Aurell1996a,Boffetta2002a}. In the limit of $\delta \rightarrow 0$, the usual maximum Lyapunov exponent is recovered from the FSLE. We compute $\Lambda(\delta)$ by fixing a sequence of separations $\delta_m = \delta_0 r^m$, where $m$ is an integer less than $M$ determined by $\delta_M = \delta u(t)$ at saturation, $r = \sqrt{2}$, and $\delta_0 \sim 10^{-4}$ is the initial separation to construct the sequence along $\delta u(t)$. We measure the time it takes to increase the separation from $\delta_m$ to $\delta_{m+1}$ and average it over the ensemble to obtain the FSLE $\Lambda(\delta_m)$, for each m, from the Eq.~\ref{eqn:fsle}. The predictability time from the initial separation $\delta_0$ to a given tolerance $\delta$ in uncertainty is then calculated as follows: \begin{equation} T_p(\delta_0, \delta) = \int_{\delta_0}^{\delta} \frac{\mathrm{d} \ln \delta^\prime}{\Lambda(\delta^\prime)}. \label{eqn:pt_fsle} \end{equation} In Fig.~\ref{fig:fsle_shellmodel}, we plot the variation of the FSLE $\Lambda(\delta)$ with $\delta$. For $Ro = \infty$, we recover the standard scaling of the FSLE $\Lambda(\delta) \sim \delta^{-2}$, for $\delta$ in the inertial range of 3D turbulence\cite{Aurell1996a,Cencini2013}. For finite Rossby numbers, the $\Lambda(\delta)$ shows a dip for small $\delta$ in the inertial range, which corresponds to the newly developed frozen stage of error evolution in the rotating turbulence. This dip makes a significant contribution to the integral of the predictability time $T_p(\delta_0, \delta)$ in Eq.~\ref{eqn:pt_fsle} for $\delta$ in the inertial range, which results in a much longer predictability even for small $\delta$ in the inertial range, i.e., at small scales, as observed in the non-Zeman range of wavenumbers ($k > k_\Omega$) in Fig.~\ref{fig:error_spectra}(c). Furthermore, we find that the FSLE scales as $\delta^{-1}$ for large $\delta$ of the order of velocities in the Zeman range, for small values of Ro. From dimensional considerations and Lorenz argument, the FSLE $\Lambda(\delta) \sim k_L \norm{u(k_L)}$ for a separation $\delta$ of the order of $\norm{u(k_L)}$ in the Zeman range. Because $\norm{u(k)} = k^{-1/2}$ from the energy spectrum $E(k) \sim k^{-2}$ in the Zeman range, the FSLE $\Lambda(\delta) \sim \delta^{-1} $ and thus the scaling obtained from our shell model simulations is consistent with the aforementioned dimensional prediction. It is worth noting that the predictability time for small $\delta$ increases as the Ro decreases, but this has very little effect on the large-scale predictability (see Fig.~\ref{fig:fsle_shellmodel}). \begin{figure} \includegraphics[scale=0.98]{fig4_fsle.pdf} \caption{Loglog plots of the finite size Lyapunov exponent (FSLE) $\Lambda(\delta)$ obtained from the $r-$folding time averaged over $5000$ different realizations from our simulations of shell model. The black dashed line scales as $\delta^{-2}$ and the black dotted line as $\delta^{-1}$.} \label{fig:fsle_shellmodel} \end{figure} \section{Conclusion} In conclusion, we studied the statistical properties of predictability in rotating turbulence by simulating two initially close realizations of velocity trajectories in the state space of a GOY shell model of rotating turbulence. Because of the chaotic nature of the flow, the trajectories separate at an exponential rate dependent on the Rossby number for an initial short time. For finite Ro, the error growth freezes for a time period determined by the Rossby number, and then the error grows algebraically until the trajectories completely decorrelate; this is peculiar to rotating turbulence. The large-scale predictability in rotating turbulence is found to increase with increasing rotation rate as predicted by the Lorenz argument that considers the growth of an uncertainty occurring on the nonlinear timescales. Interestingly, the predictability time for the full system is a power law in the Rossby number Ro. Furthermore, the predictability for large rotation rates tends to become scale independent, which contradicts the previously discussed prediction $T_L \sim k_L^{-1/2}$ based on the Lorenz argument. For finite Ro, the frozen stage of the error evolution is captured as a sudden dip in the FSLE plot, indicating that the time period of the frozen stage accounts for a considerable part of the predictability time. The dimensional predictions of $\Lambda(\delta) \sim \delta^{-1}$ for large $\delta$ in the Zeman range is validated for small Rossby numbers. The frozen stage in the error evolution is perhaps the manifestation of the globally imposed timescale by the Coriolis force term modifying the character of the slow and fast dynamics in the state space of the shell model. The dip and the subsequent peak in the FSLE plot are signatures of the slow and the fast dynamics in the inertial range, respectively\cite{Mitchell2012}. We speculate that the enhancement of predictability can be attributed to the coherent dynamics in the columnar structures formed in real rotating turbulence as predicted in theory (Taylor-Proudman theorem) and observed in direct numerical simulations~\cite{Sharma2018,Thiele2009,Godeferd2015}. The scale independence of predictability in the Zeman range for small Rossby numbers requires closer examination. This is because, for the increase and the scale independence of the predictability to happen simultaneously, the latter suggests that there is a nonlocal transfer of error among the large scales, although at a much slower rate. In the future, we want to investigate this property of the predictability in the Zeman range and the form of the algebraic growth of the error in detail. Finally, we think that our findings on the error dynamics and the predictability of rotating turbulence are of general interest in the study of extended dynamical systems with an imposed global timescale, such as stratified turbulence with a global timescale imposed by buoyancy. \section*{Acknowledgment} The author thanks to Sagar Chakraborty, Samriddhi Sankar Ray, and Manohar Kumar Sharma for valuable discussions and suggestions. The author would also like to thank Anando Gopal Chatterjee for his help in developing an alternative DNS code for this work. The simulations were performed on HPC2013 clusters of IIT Kanpur and using the resources provided by PARAM Sanganak under the National Supercomputing Mission, Government of India at IIT Kanpur.
1,116,691,501,392
arxiv
\section{Introduction} \vspace{-1mm} Deep Neural Networks (DNNs) are one of the main tools of modern machine learning. They are consistently proven to be powerful function approximators, able to model a wide variety of functional forms -- from image recognition~\cite{he2016deep,simonyan2014very}, through audio synthesis~\cite{van2016wavenet}, to human-beating policies in the ancient game of GO~\cite{silver2016mastering}. In many applications the process of training a neural network consists of receiving a dataset of input-output pairs from a ground truth function, and minimising some loss with respect to the network's parameters. This loss is usually designed to encourage the network to produce the same output, for a given input, as that from the target ground truth function. Many of the ground truth functions we care about in practice have an unknown analytic form, \emph{e.g.} because they are the result of a natural physical process, and therefore we only have the observed input-output pairs for supervision. However, there are scenarios where we do know the analytic form and so are able to compute the ground truth gradients (or higher order derivatives), alternatively sometimes these quantities may be simply observable. A common example is when the ground truth function is itself a neural network; for instance this is the case for distillation~\cite{hinton2015distilling,rusu2015policy}, compressing neural networks~\cite{han2015deep}, and the prediction of synthetic gradients~\cite{jaderberg2016decoupled}. Additionally, if we are dealing with an environment/data-generation process (vs. a pre-determined set of data points), then even though we may be dealing with a black box we can still approximate derivatives using finite differences. In this work, we consider how this additional information can be incorporated in the learning process, and what advantages it can provide in terms of data efficiency and performance. We propose Sobolev Training (ST) for neural networks as a simple and efficient technique for leveraging derivative information about the desired function in a way that can easily be incorporated into any training pipeline using modern machine learning libraries. The approach is inspired by the work of Hornik~\cite{hornik1991approximation} which proved the universal approximation theorems for neural networks in Sobolev spaces -- metric spaces where distances between functions are defined both in terms of their differences in values and differences in values of their derivatives. In particular, it was shown that a sigmoid network can not only approximate a function's value arbitrarily well, but that the network's derivatives with respect to its inputs can approximate the corresponding derivatives of the ground truth function arbitrarily well too. Sobolev Training exploits this property, and tries to match not only the output of the function being trained but also its derivatives. \begin{figure}[t] \centering \includegraphics[width=0.95\textwidth]{sobnets.pdf} \caption{a) Sobolev Training of order 2. Diamond nodes $m$ and $f$ indicate parameterised functions, where $m$ is trained to approximate $f$. Green nodes receive supervision. Solid lines indicate connections through which error signal from loss $l$, $l_1$, and $l_2$ are backpropagated through to train $m$. b) Stochastic Sobolev Training of order 2. If $f$ and $m$ are multivariate functions, the gradients are Jacobian matrices. To avoid computing these high dimensional objects, we can efficiently compute and fit their projections on a random vector $v_j$ sampled from the unit sphere. } \label{fig:models} \end{figure} There are several related works which have also exploited derivative information for function approximation. For instance Wu et al. \cite{wu2017exploiting} and antecedents propose a technique for Bayesian optimisation with Gaussian Processess (GP), where it was demonstrated that the use of information about gradients and Hessians can improve the predictive power of GPs. In previous work on neural networks, derivatives of predictors have usually been used either to penalise model complexity (e.g. by pushing Jacobian norm to 0~\cite{rifai2011higher}), or to encode additional, hand crafted invariances to some transformations (for instance, as in Tangentprop~\cite{simard1991tangent}), or estimated derivatives for dynamical systems~\cite{gallant1992learning} and very recently to provide additional learning signal during attention distillation~\cite{zagoruyko2016paying}\footnote{Please relate to Supplementary Materials, section 5 for details}. Similar techniques have also been used in critic based Reinforcement Learning (RL), where a critic's derivatives are trained to match its target's derivatives~\cite{werbos1992approximate,miller1995neural,fairbank2012simple,fairbank2012value,tassa2007least} using small, sigmoid based models. Finally, Hyv{\"a}rinen proposed Score Matching Networks~\cite{hyvarinen2009estimation}, which are based on the somewhat surprising observation that one can model unknown derivatives of the function without actual access to its values -- all that is needed is a sampling based strategy and specific penalty. However, such an estimator has a high variance~\cite{vincent2011connection}, thus it is not really useful when true derivatives are given. To the best of our knowledge and despite its simplicity, the proposal to directly match network derivatives to the true derivatives of the target function has been minimally explored for deep networks, especially modern ReLU based models. In our method, we show that by using the additional knowledge of derivatives with Sobolev Training we are able to train better models -- models which achieve lower approximation errors and generalise to test data better -- and reduce the sample complexity of learning. The contributions of our paper are therefore threefold: (\textbf{1}): We introduce Sobolev Training -- a new paradigm for training neural networks. (\textbf{2}): We look formally at the implications of matching derivatives, extending previous results of Hornik~\cite{hornik1991approximation} and showing that modern architectures are well suited for such training regimes. (\textbf{3}): Empirical evidence demonstrating that Sobolev Training leads to improved performance and generalisation, particularly in low data regimes. Example domains are: regression on classical optimisation problems; policy distillation from RL agents trained on the Atari domain; and training deep, complex models using synthetic gradients -- we report the first successful attempt to train a large-scale ImageNet model using synthetic gradients. \vspace{-1mm} \section{Sobolev Training} \label{sec:model} \vspace{-1mm} We begin by introducing the idea of training using Sobolev spaces. When learning a function $f$, we may have access to not only the output values $f(x_i)$ for training points $x_i$, but also the values of its $j$-th order derivatives with respect to the input, $D^j_\mathbf{x} f(x_i)$. In other words, instead of the typical training set consisting of pairs $\{(x_i, f(x_i))\}_{i=1}^N$ we have access to $(K+2)$-tuples $\{(x_i, f(x_i), D_{\mathbf{x}}^{1} f (x_i), ..., D_{\mathbf{x}}^{K} f (x_i))\}_{i=1}^N$. In this situation, the derivative information can easily be incorporated into training a neural network model of $f$ by making derivatives of the neural network match the ones given by $f$. Considering a neural network model $m$ parameterised with $\theta$, one typically seeks to minimise the empirical error in relation to $f$ according to some loss function $\ell$ $$ \sum_{i=1}^N \ell (m(x_i|\theta), f(x_i)). $$ When learning in Sobolev spaces, this is replaced with: \begin{equation} \label{eq:sobolev} \sum_{i=1}^N \left [ \ell (m(x_i|\theta), f(x_i)) + \sum_{j=1}^K \ell_j \left ( D_{\mathbf{x}}^{j} m(x_i|\theta), D_{\mathbf{x}}^{j} f(x_i) \right) \right ], \end{equation} where $\ell_j$ are loss functions measuring error on $j$-th order derivatives. This causes the neural network to encode derivatives of the target function in its own derivatives. Such a model can still be trained using backpropagation and off-the-shelf optimisers. A potential concern is that this optimisation might be expensive when either the output dimensionality of $f$ or the order $K$ are high, however one can reduce this cost through stochastic approximations. Specifically, if $f$ is a multivariate function, instead of a vector gradient, one ends up with a full Jacobian matrix which can be large. To avoid adding computational complexity to the training process, one can use an efficient, stochastic version of Sobolev Training: instead of computing a full Jacobian/Hessian, one just computes its projection onto a random vector (a direct application of a known estimation trick ~\cite{rifai2011higher}). In practice, this means that during training we have a random variable $v$ sampled uniformly from the unit sphere, and we match these random projections instead: \begin{equation} \label{eq:sobolevapprox} \sum_{i=1}^N \left [ \ell (m(x_i|\theta), f(x_i)) + \sum_{j=1}^K \mathbb{E}_{v^j}\left [ \ell_j \left ( \left \langle D_{\mathbf{x}}^{j} m(x_i|\theta), v^j \right \rangle , \left \langle D_{\mathbf{x}}^{j} f(x_i), v^j \right \rangle\right) \right ] \right ]. \end{equation} Figure~\ref{fig:models} illustrates compute graphs for non-stochastic and stochastic Sobolev Training of order 2 \vspace{-1mm} \section{Theory and motivation} \label{sec:theory} \vspace{-1mm} While in the previous section we defined Sobolev Training, it is not obvious that modeling the derivatives of the target function $f$ is beneficial to function approximation, or that optimising such an objective is even feasible. In this section we motivate and explore these questions theoretically, showing that the Sobolev Training objective is a well posed one, and that incorporating derivative information has the potential to drastically reduce the sample complexity of learning. Hornik showed~\cite{hornik1991approximation} that neural networks with non-constant, bounded, continuous activation functions, with continuous derivatives up to order $K$ are universal approximators in the Sobolev spaces of order $K$, thus showing that sigmoid-networks are indeed capable of approximating elements of these spaces arbitrarily well. However, nowadays we often use activation functions such as ReLU which are neither bounded nor have continuous derivatives. The following theorem shows that for $K=1$ we can use ReLU function (or a similar one, like leaky ReLU) to create neural networks that are universal approximators in Sobolev spaces. We will use a standard symbol $\mathcal{C}^1(S)$ (or simply $\mathcal{C}^1$) to denote a space of functions which are continuous, differentiable, and have a continuous derivative on a space $S$~\cite{krantz2012handbook}. All proofs are given in the Supplementary Materials (SM). \begin{thm} Let $f$ be a $\mathcal{C}^1$ function on a compact set. Then, for every positive $\varepsilon$ there exists a single hidden layer neural network with a ReLU (or a leaky ReLU) activation which approximates $f$ in Sobolev space $\mathcal{S}_1$ up to $\epsilon$ error. \end{thm} \vspace{-0.2cm} This suggests that the Sobolev Training objective is achievable, and that we can seek to encode the values and derivatives of the target function in the values and derivatives of a ReLU neural network model. Interestingly, we can show that if we seek to encode an arbitrary function in the derivatives of the model then this is impossible not only for neural networks but also for any arbitrary differentiable predictor on compact sets. \begin{thm} Let $f$ be a $\mathcal{C}^1$ function. Let $g$ be a continuous function satisfying $\|g - \tfrac{\partial f}{\partial x}\|_{\infty} > 0$. Then, there exists an $\eta > 0$ such that for any $\mathcal{C}^1$ function $h$ either $\|f - h\|_{\infty} \ge \eta$ or $\left\|g - \frac{\partial h}{\partial x} \right\|_{\infty} \ge \eta$. \end{thm} \vspace{-0.2cm} However, when we move to the regime of finite training data, we can encode any arbitrary function in the derivatives (as well as higher order signals if the resulting Sobolev spaces are not degenerate), as shown in the following Proposition. \begin{prop} Given any two functions $f :S \rightarrow \mathbb{R} $ and $g :S \rightarrow \mathbb{R}^d$ on $S \subseteq \mathbb{R}^d$ and a finite set $\Sigma \subset S$, there exists neural network $h$ with a ReLU (or a leaky ReLU) activation such that $\forall x \in \Sigma: f(x) = h(x)$ and $g(x) = \tfrac{\partial h}{\partial x}(x)$ (it has 0 training loss). \end{prop} \vspace{-0.2cm} Having shown that it is possible to train neural networks to encode both the values and derivatives of a target function, we now formalise one possible way of showing that Sobolev Training has lower sample complexity than regular training. \begin{figure}[t] \centering \begin{tabular}{c|c} \includegraphics[width=0.25\textwidth,valign=t]{piecewise.png}\;\;\;\;\;& \includegraphics[width=0.65\textwidth,valign=t]{toy_reg.png} \end{tabular} \caption{\emph{Left:} From top: Example of the piece-wise linear function; Two (out of a continuum of) hypotheses consistent with 3 training points, showing that one needs two points to identify each linear segment; The only hypothesis consistent with 3 training points enriched with derivative information. \emph{Right:} Logarithm of test error (MSE) for various optimisation benchmarks with varied training set size (20, 100 and 10000 points) sampled uniformly from the problem's domain. } \label{fig:toy_reg} \label{fig:piecewise} \end{figure} Let $\mathcal{F}$ denote the family of functions parametrised by $\omega$. We define $K_{reg} = K_{reg}(\mathcal{F})$ to be a measure of the amount of data needed to learn some target function $f$. That is $K_{reg}$ is the smallest number for which there holds: for every $f_\omega \in \mathcal{F}$ and every set of distinct $K_{reg}$ points $(x_1, ..., x_{K_{reg}})$ such that $\forall_{i=1,...,K_{reg}} f(x_i) = f_\omega(x_i) \Rightarrow f = f_\omega$. $K_{sob}$ is defined analogously, but the final implication is of form $ f(x_i) = f_\omega(x_i) \wedge \frac{\partial f}{\partial x}(x_i) = \frac{\partial f_\omega}{\partial x}(x_i) \Rightarrow f = f_\omega$. Straight from the definition there follows: \begin{prop} \label{prop::} For any $\mathcal{F}$, there holds $K_{sob}(\mathcal{F}) \leq K_{reg}(\mathcal{F})$. \end{prop} \vspace{-0.2cm} For many families, the above inequality becomes sharp. For example, to determine the coefficients of a polynomial of degree $n$ one needs to compute its values in at least $n+1$ distinct points. If we know values and the derivatives at $k$ points, it is a well-known fact that only $\lceil \frac{n}{2} \rceil$ points suffice to determine all the coefficients. We present two more examples in a slightly more formal way. Let $\mathcal{F}_{\rm{G}}$ denote a family of Gaussian PDF-s (parametrised by $\mu$, $\sigma$). Let $\mathbb{R}^d \supset D = D_1 \cup \ldots \cup D_n$ and let $\mathcal{F}_{\rm{PL}}$ be a family of functions from $D_1 \times ... \times D_n$ (Cartesian product of sets $D_i$) to $\mathbb{R}^n$ of form $f(x) = [A_1x_1 + b_1, …, A_nx_n + b_n]$ (linear element-wise) (Figure~\ref{fig:piecewise} Left). \begin{prop} \label{prop::examples} There holds $K_{sob}\left(\mathcal{F}_{\rm{G}}\right) < K_{reg}(\mathcal{F}_{\rm{G}})$ and $K_{sob}(\mathcal{F}_{\rm{PL}}) < K_{reg}(\mathcal{F}_{\rm{PL}})$. \end{prop}\vspace{-0.2cm} This result relates to Deep ReLU networks as they build a hyperplanes-based model of the target function. If those were parametrised independently one could expect a reduction of sample complexity by $d+1$ times, where $d$ is the dimension of the function domain. In practice parameters of hyperplanes in such networks are not independent, furthermore the hinges positions change so the Proposition cannot be directly applied, but it can be seen as an intuitive way to see why the sample complexity drops significantly for Deep ReLU networks too. \vspace{-1mm} \section{Experimental Results} \label{sec:experiments} \vspace{-1mm} We consider three domains where information about derivatives is available during training\footnote{All experiments were performed using TensorFlow~\cite{abadi2016tensorflow} and the Sonnet neural network library~\cite{sonnet}.}. \vspace{-1mm} \subsection{Artificial Data} \vspace{-1mm} First, we consider the task of regression on a set of well known low-dimensional functions used for benchmarking optimisation methods. We train two hidden layer neural networks with 256 hidden units per layer with ReLU activations to regress towards function values, and verify generalisation capabilities by evaluating the mean squared error on a hold-out test set. Since the task is standard regression, we choose all the losses of Sobolev Training to be L2 errors, and use a first order Sobolev method (second order derivatives of ReLU networks with a linear output layer are constant, zero). The optimisation is therefore: $$ \min_\theta \tfrac{1}{N} \sum_{i=1}^N \| f(x_i) - m(x_i|\theta) \|^2_2 + \| \nabla_x f(x_i) - \nabla_x m(x_i|\theta)\|_2^2. $$ \begin{figure}[t] \centering \begin{tabular}{c|cc|cc} Dataset &\multicolumn{2}{c}{20 training samples}&\multicolumn{2}{c}{100 training samples} \\ \includegraphics[width=0.18\textwidth]{toy4.png}& \includegraphics[width=0.18\textwidth]{20_r.png}& \includegraphics[width=0.18\textwidth]{20_sbv.png}& \includegraphics[width=0.18\textwidth]{styblinski_100_r.png}& \includegraphics[width=0.18\textwidth]{styblinski_100_sbv.png} \\ & Regular & Sobolev & Regular & Sobolev \\ \includegraphics[width=0.18\textwidth]{toy4d.png}& \includegraphics[width=0.18\textwidth]{20_r_d.png}& \includegraphics[width=0.18\textwidth]{20_sbv_d.png}& \includegraphics[width=0.18\textwidth]{styblinski_100_r_d.png}& \includegraphics[width=0.18\textwidth]{styblinski_100_sbv_d.png}\\ \end{tabular} \caption{Styblinski-Tang function (on the left) and its models using regular neural network training (left part of each plot) and Sobolev Training (right part). We also plot the vector field of the gradients of each predictor underneath the function plot.} \label{fig:toy_reg_data} \end{figure} Figure~\ref{fig:toy_reg} right shows the results for the optimisation benchmarks. As expected, Sobolev trained networks perform extremely well -- for six out of seven benchmark problems they significantly reduce the testing error with the obtained errors orders of magnitude smaller than the corresponding errors of the regularly trained networks. The stark difference in approximation error is highlighted in Figure~\ref{fig:toy_reg_data}, where we show the Styblinski-Tang function and its approximations with both regular and Sobolev Training. It is clear that even in very low data regimes, the Sobolev trained networks can capture the functional shape. Looking at the results, we make two important observations. First, the effect of Sobolev Training is stronger in low-data regimes, however it does not disappear even in the high data regime, when one has 10,000 training examples for training a two-dimensional function. Second, the only case where regular regression performed better is the regression towards Ackley's function. This particular example was chosen to show that one possible weak point of our approach might be approximating functions with a very high frequency signal component in the relatively low data regime. Ackley's function is composed of exponents of high frequency cosine waves, thus creating an extremely bumpy surface, consequently a method that tries to match the derivatives can behave badly during testing if one does not have enough data to capture this complexity. However, once we have enough training data points, Sobolev trained networks are able to approximate this function better. \vspace{-1mm} \subsection{Distillation} \vspace{-1mm} Another possible application of Sobolev Training is to perform model distillation. This technique has many applications, such as network compression~\cite{sau2016deep}, ensemble merging~\cite{hinton2015distilling}, or more recently policy distillation in reinforcement learning~\cite{rusu2015policy}. We focus here on a task of distilling a policy. We aim to distill a target policy $\pi^*(s)$ -- a trained neural network which outputs a probability distribution over actions -- into a smaller neural network $\pi(s|\theta)$, such that the two policies $\pi^*$ and $\pi$ have the same behaviour. In practice this is often done by minimising an expected divergence measure between $\pi^*$ and $\pi$, for example, the Kullback–Leibler divergence $D_{KL}(\pi(s) \| \pi^*(s))$, over states gathered while following $\pi^*$. Since policies are multivariate functions, direct application of Sobolev Training would mean producing full Jacobian matrices with respect to the $s$, which for large actions spaces is computationally expensive. To avoid this issue we employ a stochastic approximation described in Section~\ref{sec:model}, thus resulting in the objective $$ \min_\theta D_{KL}(\pi(s|\theta) \| \pi^*(s)) + \alpha \mathbb{E}_{v}\left [ \| \nabla_s \langle \log \pi^*(s) , v \rangle - \nabla_s \langle \log \pi(s|\theta) , v \rangle \| \right ], $$ where the expectation is taken with respect to $v$ coming from a uniform distribution over the unit sphere, and Monte Carlo sampling is used to approximate it. As target policies $\pi^*$, we use agents playing Atari games~\cite{mnih2013playing} that have been trained with A3C~\cite{mnih2016asynchronous} on three well known games: Pong, Breakout and Space Invaders. The agent's policy is a neural network consisting of 3 layers of convolutions followed by two fully-connected layers, which we distill to a smaller network with 2 convolutional layers and a single smaller fully-connected layer (see SM for details). Distillation is treated here as a purely supervised learning problem, as our aim is not to re-evaluate known distillation techniques, but rather to show that if the aim is to minimise a given divergence measure, we can improve distillation using Sobolev Training. \begin{figure}[t] \begin{tabular}{cc} Test action prediction error & Test $D_{KL}$ \\ \includegraphics[width=0.485\textwidth]{acc.png}& \includegraphics[width=0.485\textwidth]{dkl.png}\\ \multicolumn{2}{c}{ \tikz\draw[white,fill=black!90] (0,0) circle (.5ex); Regular distillation \;\; \tikz\draw[white,fill=blue!90] (0,0) circle (.5ex); Sobolev distillation} \end{tabular} \caption{Test results of distillation of RL agents on three Atari games. Reported test action prediction error (left) is the error of the most probable action predicted between the distilled policy and target policy, and test D$_{KL}$ (right) is the Kulblack-Leibler divergence between policies. Numbers in the column title represents the percentage of the 100K recorded states used for training (the remaining are used for testing). In all scenarios the Sobolev distilled networks are significantly more similar to the target policy.} \label{fig:distill} \end{figure} Figure~\ref{fig:distill} shows test error during training with and without Sobolev Training\footnote{Testing is performed on a held out set of episodes, thus there are no temporal nor causal relations between training and testing}. The introduction of Sobolev Training leads to similar effects as in the previous section -- the network generalises much more effectively, and this is especially true in low data regimes. Note the performance gap on Pong is small due to the fact that optimal policy is quite degenerate for this game\footnote{For majority of the time the policy in Pong is uniform, since actions taken when the ball is far away from the player do not matter at all. Only in crucial situations it peaks so the ball hits the paddle.}. In all remaining games one can see a significant performance increase from using our proposed method, and as well as minor to no overfitting. Despite looking like a regularisation effect, we stress that Sobolev Training is not trying to find the simplest models for data or suppress the expressivity of the model. This training method aims at matching the original function's smoothness/complexity and so reduces overfitting by effectively extending the information content of the training set, rather than by imposing a data-independent prior as with regularisation. \vspace{-1mm} \subsection{Synthetic Gradients} \vspace{-1mm} \begin{table}[t] \caption{Various techniques for producing synthetic gradients. Green shaded nodes denote nodes that get supervision from the corresponding object from the main network (gradient or loss value). We report accuracy on the test set $\pm$ standard deviation. Backpropagation results are given in parenthesis. } \begin{tabular}{l} \toprule \hspace{1.8cm} \includegraphics[height=4cm]{sntable1.pdf}\hspace{-1.25cm} \includegraphics[height=4cm]{sntable2.pdf}\hspace{-0.25cm} \includegraphics[height=4cm]{sntable3.pdf}\hspace{-0.75cm} \includegraphics[height=4cm]{sntable4.pdf}\hspace{-0.75cm} \includegraphics[height=4cm]{sntable5.pdf}\\ \end{tabular} \begin{tabular}{L{2.0cm}C{1.8cm}C{2.1cm}C{2cm}C{2cm}C{2cm}} \midrule ~ & Noprop & Direct SG~\cite{jaderberg2016decoupled} & VFBN~\cite{vfbn} & Critic & Sobolev\\ \midrule \multicolumn{6}{l}{\bf CIFAR-10 \small{\bf with 3 synthetic gradient modules}}\\ Top 1 (94.3\%) & ~54.5\% \scriptsize{$\pm 1.15$}& 79.2\% \scriptsize{$\pm 0.01$} & 88.5\% \scriptsize{$\pm 2.70$} & 93.2\% \scriptsize{$\pm 0.02$}& 93.5\% \scriptsize{$\pm 0.01$}\\ \midrule \multicolumn{6}{l}{\bf ImageNet \small{\bf with 1 synthetic gradient module}} \\ Top 1 (75.0\%) & ~54.0\% \scriptsize{$\pm 0.29$} & - & 57.9\% \scriptsize{$\pm 2.03$} & 71.7\% \scriptsize{$\pm 0.23$} & 72.0\% \scriptsize{$\pm 0.05$} \\ Top 5 (92.3\%) & ~77.3\% \scriptsize{$\pm 0.06$} & - & 81.5\% \scriptsize{$\pm 1.20$} & 90.5\% \scriptsize{$\pm 0.15$}& 90.8\% \scriptsize{$\pm 0.01$} \\ \midrule \multicolumn{6}{l}{\bf ImageNet \small{\bf with 3 synthetic gradient modules}}\\ Top 1 (75.0\%) & 18.7\% \scriptsize{$\pm 0.18$} & - & 28.3\% \scriptsize{$\pm 5.24$} & 65.7\% \scriptsize{$\pm 0.56$} & 66.5\% \scriptsize{$\pm 0.22$}\\ Top 5 (92.3\%) & 38.0\% \scriptsize{$\pm 0.34$} & - & 52.9\% \scriptsize{$\pm 6.62$} & 86.9\% \scriptsize{$\pm 0.33$} & 87.4\% \scriptsize{$\pm 0.11$} \\ \bottomrule \end{tabular} \label{tab:unification} \label{tab:imagenet} \end{table} The previous experiments have shown how information about the derivatives can boost approximating function values. However, the core idea of Sobolev Training is broader than that, and can be employed in both directions. Namely, if one ultimately cares about approximating derivatives, then additionally approximating values can help this process too. One recent technique, which requires a model of gradients is Synthetic Gradients (SG)~\cite{jaderberg2016decoupled} -- a method for training complex neural networks in a decoupled, asynchronous fashion. In this section we show how we can use Sobolev Training for SG. The principle behind SG is that instead of doing full backpropagation using the chain-rule, one splits a network into two (or more) parts, and approximates partial derivatives of the loss $L$ with respect to some hidden layer activations $h$ with a trainable function $SG(h, y | \theta)$. In other words, given that network parameters up to $h$ are denoted by $\Theta$ $$ \frac{\partial L}{\partial \Theta} = \frac{\partial L}{\partial h}\frac{\partial h}{\partial \Theta} \approx SG(h, y|\theta)\frac{\partial h}{\partial \Theta}. $$ In the original SG paper, this module is trained to minimise $ L_{SG}(\theta) = \left \| SG(h, y|\theta) - \tfrac{\partial L(p_h,y)}{\partial h} \right \|^2_2, $ where $p_h$ is the final prediction of the main network for hidden activations $h$. For the case of learning a classifier, in order to apply Sobolev Training in this context we construct a loss predictor, composed of a class predictor $p(\cdot|\theta)$ followed by the log loss, which gets supervision from the true loss, and the gradient of the prediction gets supervision from the true gradient: $$ m(h, y | \theta) := L(p(h| \theta),y), \;\;\;\; SG(h, y | \theta) := \partial m(h, y | \theta) / \partial h, $$ $$ L_{SG}^{sob}(\theta) = \ell( m(h, y | \theta) , L(p_h, y)) ) + \ell_1 \left ( \tfrac{\partial m(h, y | \theta)}{\partial h} , \tfrac{\partial L(p_h,y)}{\partial h} \right ). $$ In the Sobolev Training framework, the target function is the loss of the main network $L(p_h, y)$ for which we train a model $m(h, y | \theta)$ to approximate, and in addition ensure that the model's derivatives $\partial m(h, y | \theta)/{\partial h}$ are matched to the true derivatives $\partial L(p_h,y)/\partial h$. The model's derivatives $\partial m(h, y | \theta)/\partial h$ are used as the synthetic gradient to decouple the main network. This setting closely resembles what is known in reinforcement learning as critic methods~\cite{konda1999actor}. In particular, if we do not provide supervision on the gradient part, we end up with a loss critic. Similarly if we do not provide supervision at the loss level, but only on the gradient component, we end up in a method that resembles VFBN~\cite{vfbn}. In light of these connections, our approach in this application setting can be seen as a generalisation and unification of several existing ones (see Table~\ref{tab:unification} for illustrations of these approaches). We perform experiments on decoupling deep convolutional neural network image classifiers using synthetic gradients produced by loss critics that are trained with Sobolev Training, and compare to regular loss critic training, and regular synthetic gradient training. We report results on CIFAR-10 for three network splits (and therefore three synthetic gradient modules) and on ImageNet with one and three network splits \footnote{N.b. the experiments presented use learning rates, annealing schedule, etc. optimised to maximise the backpropagation baseline, rather than the synthetic gradient decoupled result (details in the SM). }. The results are shown in Table~\ref{tab:imagenet}. With a naive SG model, we obtain 79.2\% test accuracy on CIFAR-10. Using an SG architecture which resembles a small version of the rest of the model makes learning much easier and led to 88.5\% accuracy, while Sobolev Training achieves 93.5\% final performance. The regular critic also trains well, achieving 93.2\%, as the critic forces the lower part of the network to provide a representation which it can use to reduce the classification (and not just prediction) error. Consequently it provides a learning signal which is well aligned with the main optimisation. However, this can lead to building representations which are suboptimal for the rest of the network. Adding additional gradient supervision by constructing our Sobolev SG module avoids this issue by making sure that synthetic gradients are truly aligned and gives an additional boost to the final accuracy. For ImageNet~\cite{deng2009imagenet} experiments based on ResNet50~\cite{he2016deep}, we obtain qualitatively similar results. Due to the complexity of the model and an almost 40\% gap between no backpropagation and full backpropagation results, the difference between methods with vs without loss supervision grows significantly. This suggests that at least for ResNet-like architectures, loss supervision is a crucial component of a SG module. After splitting ResNet50 into four parts the Sobolev SG achieves 87.4\% top 5 accuracy, while the regular critic SG achieves 86.9\%, confirming our claim about suboptimal representation being enforced by gradients from a regular critic. Sobolev Training results were also much more reliable in all experiments (significantly smaller standard deviation of the results). \vspace{-1mm} \section{Discussion and Conclusion} \vspace{-1mm} In this paper we have introduced Sobolev Training for neural networks -- a simple and effective way of incorporating knowledge about derivatives of a target function into the training of a neural network function approximator. We provided theoretical justification that encoding both a target function's value as well as its derivatives within a ReLU neural network is possible, and that this results in more data efficient learning. Additionally, we show that our proposal can be efficiently trained using stochastic approximations if computationally expensive Jacobians or Hessians are encountered. In addition to toy experiments which validate our theoretical claims, we performed experiments to highlight two very promising areas of applications for such models: one being distillation/compression of models; the other being the application to various meta-optimisation techniques that build models of other models dynamics (such as synthetic gradients, learning-to-learn, etc.). In both cases we obtain significant improvement over classical techniques, and we believe there are many other application domains in which our proposal should give a solid performance boost. In this work we focused on encoding true derivatives in the corresponding ones of the neural network. Another possibility for future work is to encode information which one believes to be highly correlated with derivatives. For example curvature~\cite{pascanu2013revisiting} is believed to be connected to uncertainty. Therefore, given a problem with known uncertainty at training points, one could use Sobolev Training to match the second order signal to the provided uncertainty signal. Finite differences can also be used to approximate gradients for black box target functions, which could help when, for example, learning a generative temporal model. Another unexplored path would be to apply Sobolev Training to \emph{internal derivatives} rather than just derivatives with respect to the inputs. \footnotesize \bibliographystyle{plain}
1,116,691,501,393
arxiv
\section{Introduction} Much remains unclear about the relationship between neural network architecture, i.e. the number and arrangement of network nodes, and trained model performance. In practise, much of neural architecture optimisation is done empirically. Deeper and, particularly, wider neural networks have been found to be resilient to overfitting \cite{caruana2001overfitting, sagun2014explorations}, improving model capacity to approximate continuous functions \cite{cybenko1989approximation, hornik1989multilayer, lu2017advances} and to consistently converge \cite{bosman2020loss, springer2020s}. Recent findings have shown that larger networks rely on favourably seeded subnetworks \cite{frankle2018lottery, springer2020s}. Lacking \textit{a priori} knowledge about what makes performant subnetworks special, however, such findings may be interpreted as a prescription to train overparameterised networks~\cite{springer2020s}, containing far more parameters than inherently required for optimal training. The gain in model performance comes at a significant computational cost, making deep neural networks time and energy inefficient. This provides motivation to seek new methods with which to characterise the neural architecture space.\looseness=-1 Solutions in a search space and the relationships between them can be characterised by introducing a topology using the notion of a fitness landscape \cite{malan2013survey}, with appropriate definitions of a solution's fitness and its neighbourhood. Ochoa \textit{et al.} \cite{ochoa2008study} introduced a derivative of the fitness landscape, a local optima network (LON), narrowing the focus to local optima, which comprise the LON nodes. Nodes are connected in a directed graph by edges weighted based on the probability of a perturbation from the starting node resulting in an escape to the basin of the destination node \cite{ochoa2008study, mostert2019insights}. LONs have been used to study NK-landscapes \cite{ochoa2008study}, difficulty phase transitions in combinatorial optimisation \cite{ochoa2017understanding}, and to characterise feature selection algorithms \cite{mostert2019insights}. The use of a LON may enhance analysis of the global landscape structure by aiding visual inspection and yielding a range of numerical landscape features. Combined feature assessment of landscape features enhances search space characterisation, search hardness estimation and optimisation. \looseness=-1 As an initial proof of concept of the application of LON analysis to the neural architecture space, the search space considered in this study comprises all possible feedforward neural network architectures with a depth of up to 3 hidden layers, each with up to 10 neurons, trained on a given data set. The fitness of all solutions is measured by taking the mean performance of the corresponding trained model on test data over 30 runs. The evaluation is repeated over 9 different data sets comprising classification and regression tasks, and the results are compared. From each resulting fitness landscape, a LON is derived and its structure investigated to establish the viability of the use of LONs for neural architecture search and optimisation. The following novel results are established:\looseness=-1 \begin{itemize} \item A high local optimum fitness standard deviation is related to low modality, which improves the relative ease of fitness optimisation. Low standard deviation at low fitness levels, related to high modality, signals that a model is not sufficiently powerful for a given task.\looseness=-1 \item Node and edge counts are linearly correlated beyond low extremes. Higher modality involves a higher number of smaller basins, so that a higher proportion of basins are out of reach of any particular candidate solution. \item Edges are more likely to be fitness improving than deteriorating, coinciding with a general increase in local optima basin size.\looseness=-1 \item LONs differ notably between data sets in terms of node and edge count, fitness distribution, basin size distribution and global optimum incoming strength. The solution space is thus heterogeneous and dependent on the particular data set.\looseness=-1 \item Despite the diverging LON characteristics, all data sets except one produce a single global sink. The simple funnel structure suggests that simple iterative local optimisers may effectively be used to find near optimal solutions.\looseness=-1 \end{itemize} This study maintains a focus on densely connected feedforward neural networks and does not consider alternative architectural arrangements, such as recurrent or partial connections. The search space is restricted to the neural architecture, excluding the weight space and selection of activation function, while acknowledging that both have been shown to have great bearing on model performance. To control for variance in performance due to randomised weight initialisation, the mean fitness over 30 models trained per architecture is used. The range of architectures evaluated and the data sets used are relatively small in size, due to computational constraints. These selections serve to establish initial viability of local optima network analysis in the present domain by fully enumerating the space over multiple data sets.\looseness=-1 The rest of the paper is organised as follows. Section~\ref{sec:meth:fl} discusses fitness landscapes. Section~\ref{sec:lons:lon} provides the background on LONs. Section~\ref{sec:meth:nn} details the neural network configuration used in the study. Section~\ref{sec:meth:data} discusses the datasets used. Section~\ref{sec:results} presents the empirical results of the study. Finally, Section~\ref{sec:con:res} concludes the paper.\looseness=-1 \section{Fitness Landscapes} \label{sec:meth:fl} A fitness landscape is formally defined by three components, namely a set of solutions $S$, a notion of a neighbourhood $N(s)$ of each solution $s \in S$, and a fitness function $f(s)$ that assigns a fitness value to each solution \cite{malan2013survey}. The approach of assigning fitness values to candidate solutions has been adapted for optimisation problems. To analyse the interrelation between solutions, the notion of a neighbourhood is introduced based on a suitable distance measure. Thus a topology is constructed, producing a fitness landscape. A landscape is uniquely defined by the tuple $(S, N, f)$. If either the neighbourhood or the fitness function is changed, a different fitness landscape results, even if the set of solutions is maintained. Various landscape features can be extracted, such as the fitness distribution, the funnel structure, and the number and structure of optima.\looseness=-1 Intuitively, a fitness landscape corresponds to a physical landscape, where the relative `heights' of fitness values shape hills, plateaus, sinks and valleys. In the context of search algorithms, the fitness landscape can be formalised as a graph with solutions as its nodes and edges between neighbours \cite{malan2013survey}. The neighbourhood of a solution $s$ is the set of solutions obtained by applying a search operator, starting with $s$ \cite{moser2017identifying}. In discrete space, as is the case with neural architectures, the operator may be defined as a reconfiguration of solution parameters, such as a bit flip in a binary string \cite{ochoa2008study, ochoa2017understanding}. In continuous space, the operator may be described using a distance metric, such as Euclidean distance \cite{malan2013survey}. \looseness=-1 In order to consider neural architecture space from the fitness landscape analysis perspective, definitions of candidate solutions, their fitness and neighbourhoods, as well as what constitutes a local minimum, must be provided. These are given in the subsections below.\looseness=-1 \subsection{Candidate solutions} \label{sec:meth:fl:sol} The solution space $S$ considered in this study comprises all possible feedforward neural network architectures with a depth of up to 3 hidden layers, each comprising between 1 and 10 neurons, trained on a given data set. For ease of reference, neural networks with $n$ \textit{hidden layers} are here simply referred to as having $n$ \textit{layers}, while the full network invariably contains input and output layers. Only the network architecture is evaluated, randomly initialising weights and keeping other parameters, such as activation functions and their configurations, fixed. The neural architecture can be depicted as the set of 1-, 2- and 3-tuples of numbers 1 to 10. More formally, given $A = \{1, 2, \dots, 10\}$, the solution space is parametrised as $S' = (A)\; \cup\; (A \times A)\; \cup\; (A \times A \times A)$ \footnote{where $\times$ denotes the cartesian product and $\cup$ set union}. For example, $(3) \in (A)$, $(4, 6) \in (A \times A)$ and $(5, 8, 2) \in (A \times A \times A)$.\looseness=-1 The architecture search space is combinatorial, being discrete and finite. Within a defined maximum layer depth $d$ and individual layer width $w$, $|S| = \sum_{l=1}^{d}w^l$. With $d=3, w=10$ the total amounts to $|S| = |S'| = 1110$. $S$ is the set of feedforward networks parametrised as $S'$, which can be expressed as $S = \mathcal{X}(S')$, where $\mathcal{X}$ represents the derivation of the architecture instantiations corresponding to parametrised solutions in $S'$. Denote positional digits of $n$-tuple $s' \in S'$ by $(s'_1,\dots, s'_d)$, a hidden layer containing $m$ neurons by $\bs{h}[m]$ and a directional connection between layers by $\bs{h}\rightarrow\bs{h}$. Accordingly, a solution $s \in S$ can be constructed as \begin{equation*} \begin{split} s = \mathcal{X}(s') & = \mathcal{X}((s'_1, \dots, s'_d)) \\ & = \bs{x} \rightarrow \bs{h}[s'_1] \rightarrow \dots \rightarrow \bs{h}[s'_d] \rightarrow \bs{y}. \end{split} \end{equation*} For example, if $s' = (4, 3)$, then $s = \mathcal{X}((4, 3)) = \bs{x} \rightarrow \bs{h}[4] \rightarrow \bs{h}[3] \rightarrow \bs{y}$, as depicted in \fref{fig:ffnn43}. The architecture space is fully enumerated by fitting all possible configurations within the defined bounds, training the corresponding model and evaluating its test performance. \begin{figure} \centering{\includegraphics[width=0.5\columnwidth]{FFNN43.pdf}} \caption{Model with layers containing 4 and 3 hidden neurons} \label{fig:ffnn43} \end{figure} \subsection{Fitness evaluation} \label{sec:meth:fl:fit} As the fitness value of a candidate solution, $f(s)$, we take the performance per configuration, computed as the R-squared ($R^2$) statistic for both regression and classification tasks, defined as \begin{equation} R^2 = 1 - \frac{\sum_i(y^{(i)} - \hat{y}^{(i)})^2}{\sum_i(y^{(i)} - \bar{y})^2}, \end{equation} computed over $N$ predictions, where $y^{(i)}$ are the actual values of the response variable, $\bar{y}$ is the mean of the response values, and $\hat{y}^{(i)}$ are the corresponding prediction values. $R^2$ values typically range from a maximum of 1 for perfect predictions to 0, but exceptionally large errors can yield arbitrarily large negative values. $R^2$ is selected due to its scale invariance, which aids interpretation of results. To facilitate the use of $R^2$ in classification tasks, the uniform average for multiple outputs (target classes) is used per prediction to obtain residuals. A consistent fitness measure is, therefore, used throughout the study. For each architecture, a batch of 30 models are trained and evaluated to control for variability in model performance due to random weight initialisation. The first model per batch is identically seeded, and subsequent training procedures correspondingly inherit from it. Thus the first 30 models trained under the same conditions will always produce the same set of results, while individual runs within a batch produce varying results. Controlled seeding is necessary for commensurability between solutions. The mean $R^2$ value per batch is used as the final fitness value per solution. \subsection{Neighbourhoods} \label{sec:meth:fl:nbh} Let the neighbourhood $\mathcal{N}(s)$ of a solution $s \in S$ be defined by an operation which either offsets the width of one layer by one or alternatively prunes or clones a layer, within allowed bounds. That is, the neighbourhood of solution $s$ includes all arrangements with either the number of neurons incremented or decremented in one layer, or one layer duplicated or removed, without adjusting layer-wise neuron counts. \subsubsection{Width Offsets} Width offsets increment or decrement one layer's neuron count. Let $\mathcal{W}(s)$ denote the set of all possible layer-wise neural decrements and increments applied to a solution $s$. With $\mathcal{W'}(s')$ as the set of offsets of the corresponding $n$-tuple $s' \in S'$, let $N = \{1, \dots, n\}$, width offsets are formalised by $\mathcal{W}(s) = \mathcal{W}(\mathcal{X}(s)) = \mathcal{X}(\mathcal{W'}(s'))$, and \begin{equation} \label{eq:woff} \begin{split} \mathcal{W'}(s') & = D \cup I, \text{ where} \\ D & = \{ t' \in S' \mid \exists ! i \in N \text{ s.t. } t_i' = s'_i-1, t_{j \neq i}' = s'_j \}, \\ I & = \{ t' \in S' \mid \exists ! i \in N \text{ s.t. } t_i' = s'_i+1, t_{j \neq i}' = s'_j \}. \end{split} \end{equation} where $t' \in S'$ implies that width bounds are not exceeded and the corresponding architecture must be a valid solution in $S$, and $\exists!i$ denotes \textit{`there exists a unique $i$'}. \subsubsection{Depth Offsets} Depth is offset via either cloning or pruning. Cloning involves replicating the neuron count in an existing layer and adjacently inserting an additional layer. Pruning is done by removing a selected layer, provided at least one layer remains. Due to pruning, neighbourhoods are not necessarily symmetric, e.g. $(2, 2)$ is a neighbour of $(2, 2, 8)$ by virtue of pruning the third layer, but no reverse permutation is permitted. Let $\mathcal{D}(s)$ denote the set of all single layer pruning and cloning permutations to a solution $s$, and $\mathcal{D'}(s')$ the corresponding set resulting from adding to or removing an entry from the $n$-tuple $s'$. The depth offset operation is formalised as $\mathcal{D}(s) = \mathcal{D}(\mathcal{X}(s)) = \mathcal{X}(\mathcal{D'}(s'))$, where \begin{equation} \label{eq:doff} \begin{split} \mathcal{D'}(s') & = \{t' \in S' \mid \exists ! i \in N \text{ s.t. } t' = s'_{1,\dots,i,i,\dots,n} \} \\ & \cup \{t' \in S' \mid \exists ! i \in N \text{ s.t. } t' = s'_{1,\dots,i-1,i+1,\dots,n} \}. \end{split} \end{equation} The resulting set of $n$-tuples in $S'$ must correspond to valid solutions in $S$. Using \eref{eq:woff} and \eref{eq:doff}, the neighbourhood of a solution $s$ is defined as \begin{equation} \label{eq:nbh} \begin{split} \mathcal{N}(s) & = \mathcal{W}(s) \cup \mathcal{D}(s) \\ & = \mathcal{X}(\mathcal{W'}(s')) \cup \mathcal{X}(\mathcal{D'}(s')). \end{split} \end{equation} \fref{fig:ns} shows an example of the neighbourhood of the solution $s' = (4, 3)$ (illustrated in \fref{fig:ffnn43}), differentiating $W(s)$ and $D(s)$. The neighbourhood operation produces 5879 edges per landscape, and the average number of neighbours per solution\footnote{every edge adds a neighbour to both solutions or nodes it connects, thus twice the edge count is used when computing the average: $5879(2) / 1110$} is around 10.6. \begin{figure*} \makebox[\textwidth][c]{\includegraphics[width=0.95\textwidth]{NsAlt.pdf}} \caption[Neighbourhood of solution $s$ with (4, 3) architecture]{$\mathcal{N}(s)$, where $s' = (4, 3)$} \label{fig:ns} \end{figure*} \subsection{Local Optima} \label{sec:lons:lop} A local optimum (LO) is a solution with a fitness value that matches or exceeds that of all solutions in its neighbourhood. Each local optimum $i$ is surrounded by a basin of attraction, which is the set of solutions from which $i$ will be reached by following a local search procedure. Formally, $b_i = \{ s \in S | h(s) = i \}$, and $|b_i|$ is the cardinality of the basin. A best-improvement hill climbing local search is used in this study to locate local optima, defined in Algorithm~\ref{alg:best}. \begin{algorithm} \caption{Maximising best-improvement hill climbing} \label{alg:best} \begin{algorithmic} \STATE {\bfseries Input:} initial solution $x$ \REPEAT \STATE $x' \leftarrow argmax_{y\in\mathcal{N}(x)}f(y)$ \IF{$f(x') > f(x)$} \STATE $x \leftarrow x'$ \ENDIF \UNTIL{$x$ is a local optimum} \end{algorithmic} \end{algorithm} \section{Local Optima Networks} \label{sec:lons:lon} This study aimed to construct and investigate LONs for the neural architecture space. For each data set considered, the architecture space was fully enumerated, and the hill climbing terminus starting with each solution was determined, exhaustively populating LON \textit{nodes}. To draw \textit{edges}, the neighbourhood operation defined in \eref{eq:nbh} was selected as a basis, applied by perturbation strength $D = 2$, which is consistent with related literature \cite{ochoa2017understanding, mostert2019insights}, and is also empirically found to be sufficient for escape, particularly from suboptimal solutions. An edge thus represents the relative likelihood of moving into a neighbouring basin of attraction from a local optimum after a controlled perturbation of up to two random moves. The value of $D$ has a marked effect on the connectedness of the LON graph, since more random moves allow for farther reaching escape routes. Directed LON edges are weighted by the number of distinct paths resulting in a move between basins of the two local optima it connects. When calculating edge weights the edge counts between nodes are not scaled to a proportion of all distinct edges from the source node. Thus weights do not represent probabilities, but rather the relative likelihood of escape. \subsection{Funnels} \label{sec:lons:funn} In a monotonic LON (MLON) \cite{ochoa2017understanding}, non-deterioration is required for an edge to be drawn between nodes. MLONs yield the notion of funnels, which are the set of monotonic sequences leading to a particular local optimum. A funnel is a basin at the local optima level \cite{thomson2020inferring}, with a sink node as its terminus. MLON sinks have out degree zero, and sources have in degree zero. \section{Neural Network Configuration} \label{sec:meth:nn} In the present context, the selection of hyper-parameter values as such is not as important as their consistency, to ensure commensurability of solutions between data sets. Apart from output activation and loss, all parameters were consistent between classification and regression tasks. All configuration settings used in the experiments are presented in \tref{tbl:nn}. \begin{table}[t] \caption{Neural Network Parameters} \label{tbl:nn} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lll} \toprule \textbf{Parameter} & \textbf{Classification} & \textbf{Regression} \\ \midrule Weight init. & \textit{He uniform var.} & \textit{He uniform var.} \\ Hid. layer act. & \textit{ReLU} & \textit{ReLU} \\ Output act. & \textit{Softmax} & \textit{Linear} \\ Loss & \textit{S.c. cross-entropy} & \textit{MSE} \\ Max epochs & 100 & 100 \\ Early stop $\Delta$ & $<0.0001 \times 10$ & $<0.0001 \times 10$ \\ Optimiser & \textit{Adam} (0.01 l.r.) & \textit{Adam} (0.01 l.r.) \\ Train-val-test & 0.7 / 0.15 / 0.15 & 0.7 / 0.15 / 0.15 \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \section{Data} \label{sec:meth:data} Models were trained on 9 different data sets: 6 classification tasks (\tref{tbl:cls}) and 3 regression tasks (\ref{tbl:reg}). While presenting a small sample, the intention was to analyse the fitness landscapes produced for a range of relatively common data sets. Classification sets included commonly used tabular and image recognition data. While these are relatively small data sets, the two categories differ significantly in kind and difficulty. All three regression sets are tabular, but have diverging input features and pattern counts. With a limited model and data set size, each model was trained using a single CPU. Over an average of 50.86 epochs, the time required per model was an average of 30.77 seconds, ranging from 1.71 second for Iris to over 112 seconds for MNIST. Full enumeration of the solution space is, however, non-negligible. For each data set, each of the 1110 architectures was trained 30 times, totalling 33300 training procedures per set. To train the full compliment, a computing cluster\footnote{\url{https://www.chpc.ac.za/index.php/resources/lengau-cluster}} was used to maximise the number of simultaneous training procedures. This consideration limits the number and size of the sets that can feasibly be included. \begin{table}[t] \caption{Classification Data Sets} \label{tbl:cls} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{llccc} \toprule \textbf{Name} & \textbf{Type} & \textbf{Patterns} & \textbf{Inputs} & \textbf{Cls} \\ \midrule Iris\cite{fisher1936use} & Tab & 150 & 4 & 3 \\ Wine\cite{forina1988parvus} & Tab & 178 & 13 & 3 \\ MNIST\cite{lecun1998gradient} & Img & 70000 & $28\times28$ & 10 \\ F. MNIST\cite{xiao2017fashion} & Img & 70000 & $28\times28$ & 10 \\ CIFAR-10\cite{krizhevsky2017imagenet} & Img & 60000 & $32\times32$ & 10 \\ CIFAR-100\cite{krizhevsky2017imagenet} & Img & 60000 & $32\times32$ & 100 \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \begin{table}[t] \caption{Regression Data Sets} \label{tbl:reg} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{llcc} \toprule \textbf{Name} & \textbf{Type} & \textbf{Patterns} & \textbf{Inputs} \\ \midrule Wine Quality\cite{cortez2009modeling} & Tabular & 4898 & 11 \\ Insurance\cite{choi2017med} & Tabular & 1338 & 6 \\ Housing\cite{belsley2005regression} & Tabular & 506 & 13 \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} Image data arrays were flattened by concatenating the rows into a single input vector. Colour values were first converted to greyscale before being flattened. Numeric input variables were $z$-score normalised or standardised. Binary input variables were encoded as $-1$ and $1$, and multiple class inputs were one-hot encoded. Regression labels were left unscaled. \section{Results}\label{sec:results} Salient results relating to the extracted fitness landscapes are presented in \sref{sec:res:fl}, and local optima networks in \sref{sec:res:lon}. Data set names are abbreviated. \subsection{Fitness Landscapes} \label{sec:res:fl} A bird's eye view of the fitness values produced by the trained neural networks, as represented by the box plots in \fref{fig:fitdisp}, reveals a wide range of outcomes across the different data sets. Only \textit{iris} and \textit{wine} have solutions that achieved an $R^2$ score of 1. Fitness values varied significantly between data sets in terms of mean and median statistics, range and distribution. For all data sets, the bulk of the fitness values are bounded by 0 and 1, but all sets exhibited solutions with negative fitness, representing a complete failure to fit the response data. Negative outliers were much more extreme for the regression tasks, in particular \textit{insur} and \textit{housing}. \textit{Softmax}, used for classification output, produced a probability distribution of values between 0 and 1 for all classes, summed to a total value of 1, effectively bounding the numeric divergence possible when the target is numeric and of arbitrary scale, as in this study. \begin{figure}[ht] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=1.02\columnwidth]{fitcomp_all_alt.pdf}} \caption[Solution fitness distribution]{Solution fitness distribution between the evaluated data sets. Boxes cover the interquartile range (IQR), whiskers up to 1.5 times the IQR and circles depict outliers beyond that point. Medians are coloured red and means, blue.} \label{fig:fitdisp} \end{center} \vskip -0.2in \end{figure} \subsection{Local Optima Networks} \label{sec:res:lon} A LON was extracted for each data set, with local optima (LO) found using best-improvement hill climbing as the nodes and edges drawn between LO if any controlled perturbation to one local optimum (source) reached a solution within the basin of another local optimum (target), as described in \sref{sec:lons:lon}. \tref{tbl:lonfeat} provides a comparison of basic features describing the resulting networks, further illustrating the variation between fitness landscapes. \begin{table}[t] \caption{LON features, using abbreviations \textbf{GO} = Global Optima, \textbf{LO} = Local Optima, \textbf{Edg} = Edges, \textbf{Fnl} = Funnels. Task types are indicated by \textit{cls} = classification and \textit{reg} = regression. Global optimum values are rounded to 5 decimals.} \label{tbl:lonfeat} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lclccc} \toprule \textbf{Data Set} & \textbf{Task} & \textbf{GO} & \textbf{LO} & \textbf{Edg} & \textbf{Fnl} \\ \midrule iris & cls & 1 (0.86255) & 35 & 619 & 1 \\ wine & cls & 1 (0.96343) & 25 & 287 & 1 \\ mnist & cls & 1 (0.8544) & 4 & 12 & 1 \\ fmnist & cls & 1 (0.60385) & 11 & 56 & 1 \\ cifar10 & cls & 1 (0.00552) & 90 & 2542 & 2 \\ cifar100 & cls & 1 (0.00055) & 101 & 3134 & 1 \\ winequal & reg & 1 (0.30812) & 26 & 341 & 1 \\ insur & reg & 1 (0.88873) & 45 & 974 & 1 \\ housing & reg & 1 (0.90936) & 28 & 366 & 1 \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsubsection{Local Optima Fitness Distribution} A comparison of LO fitnesses is displayed in \fref{fig:lops}, demonstrating an array of model capacities relative to the provided data sets. The distributions of LO fitness values also bares resemblance to the distributions of the solution space as a whole (\fref{fig:fitdisp}). The choice of $R^2$ as the fitness metric resulted in all solution fitnesses being globally unique. Consequentially, all data sets produced a unique global optimum (\tref{tbl:lonfeat}). \begin{figure}[t] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth]{lopcomp_alt.pdf}} \caption{Local optima fitness comparison between data sets. The globally optimum architecture is indicated above each fitness stack.} \label{fig:lops} \end{center} \vskip -0.2in \end{figure} \subsubsection{Network Modality} \label{sec:res:nodeedge} The number of local optima varied from as few as 4 for \textit{mnist} to as many as 101 for \textit{cifar100} -- more than $9\%$ of the search space. As can be expected, the edge count correlated with the number of nodes that they connect, as demonstrated in \fref{fig:nodeedgeA}. While the maximum number of potential directional connections between nodes increased exponentially as the number of nodes increased, totalling $n(n-1)$ for $n$ nodes, it was observed that for the selected data sets, the edge count increased in a linear fashion for networks with more than 20 nodes, approximated by $35n-600$. A possible explanation is that higher modality implies a higher number of smaller basins, so that on average a higher proportion of basins are out of reach from any particular solution. \begin{figure}[t] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=0.9\columnwidth]{nodeedgefit_alt.pdf}} \caption{Relation between LON nodes and edges, coloured according to global optimum value. Additional plots of the maximum possible number of edges, $n(n-1)$, and the actual approximation, $35n-600$, where n is the node count.} \label{fig:nodeedgeA} \end{center} \vskip -0.2in \end{figure} A lower global optimum value reduced the fitness range. This is correlated with an increase in modality when comparing the \textit{cifar} and \textit{mnist} data sets, but is not consistent, as illustrated in \fref{fig:nodeedgeA}. A better indicator may be the fitness standard deviation (SD) between local optima. \fref{fig:sdnodeedge} depicts the number of network nodes over the fitness SD per data set. At the extremes, \textit{cifar100} had the lowest SD and the highest modality, while \textit{mnist} expressed the inverse. The allocation of the intermediate cases made the overall trajectory resemble exponential decay, with the node and edge count decreasing rapidly with increases near the lower end of the SD range. Yet there was a wide spread, for example, between \textit{insur} and \textit{wine}, with similar SD. For the data sets considered, the inverse relation between SD and node count was thus found to be clearest at the extremes. \begin{figure}[t] \begin{center} \centerline{\includegraphics[width=0.9\columnwidth]{stdnodes_alt.pdf}} \caption{Relation between fitness SD and node count} \label{fig:sdnodeedge} \end{center} \vskip -0.2in \end{figure} Since the LON edges are directed, the breakdown of improving and deteriorating connections can be compared, that is, whether following an edge results in a move to a local optimum with a better or worse fitness. On average, improving moves were found to be around 1.8 times more common. \subsubsection{Local Optima Basins} A predominantly improving edge set makes sense when considering the effect of relative basin sizes between solutions. Fitter solutions can generally be expected to have bigger basins, easier to reach, but harder to escape from. \fref{fig:lopfitbas} provides an overview of the relative LO basin sizes across fitness values per data set. Overall, relative basin size can be observed to scale up as fitness increases. The most extreme example is \textit{fmnist}, which has an absolutely dominant basin at its global optimum. \begin{figure}[t] \vskip 0.1in \begin{center} \centerline{\includegraphics[width=\columnwidth]{lopfitbas_alt.pdf}} \caption{Local optima basin sizes across relative fitness levels} \label{fig:lopfitbas} \end{center} \vskip -0.2in \end{figure} Sizeable suboptimal basins also exist, sometimes exceeding the basin size at the global optimum. \fref{fig:irisgraphA} plots the 2-dimensional LON graph for \textit{iris}, with self loops removed to simplify the layout. The size of each vertex is determined by the incoming strength of LON edges, demonstrating the correspondence between basin size and likelihood of being reached by a perturbation. For the same reason that larger basins at higher fitness levels make improving moves more likely, the basins of suboptimal solutions may provide obstacles to optimisation over the fitness landscape, even forming suboptimal sinks. Despite the prominence of this solution in the LON, it is not a sink, and is outward connected to the comparatively modest global optimum. \begin{figure}[t] \vskip 0.1in \begin{center} \centerline{\includegraphics[width=0.9\columnwidth]{graph1lon_alt.jpg}} \caption{2-dimensional LON plot for \textit{iris} with nodes scaled by incoming strength or the sum of the incoming edges to a node. Improving edges are coloured green and deteriorating edges are dark grey, with thickness corresponding to edge weight. The global sink is coloured red and all other local optima are grey.} \label{fig:irisgraphA} \end{center} \vskip -0.2in \end{figure} \subsubsection{Funnels} Remarkably, the only data set producing a suboptimal funnel was \textit{cifar10}, depicted in \fref{fig:cifar10graphB}. The density of the graph is a result of the high modality pointed out in \sref{sec:res:nodeedge}. Despite the range of the fitness landscapes that have been populated, therefore, each contains a simple funnel structure and is therefore suitable for local search optimisation. This is a significant indication of the potential effectiveness of using LONs for architecture optimisation. \begin{figure}[t] \begin{center} \centerline{\includegraphics[width=0.9\columnwidth]{graph7mlon3D_alt.jpg}} \caption{3-dimensional MLON plot for \textit{cifar10} with nodes scaled by basin size and deteriorating edges removed, orientated such that the positive z-axis, corresponding to increasing fitness, points up.} \label{fig:cifar10graphB} \end{center} \vskip -0.2in \end{figure} \subsubsection{Local Optimisation} As an indication of fitness landscape searchability, the performance of an iterated local search (ILS) procedure, described in Algorithm~\ref{alg:ils}, was evaluated. The perturbation is defined as the same operation used to draw LON edges (\sref{sec:lons:lon}), with perturbation strength $k=2$ and stopping criterion $t=20$. Aggregating the results of 100 runs per data set, ILS discovered a global top 5 solution after evaluating on average only 50.67 evaluations (the average of the median evaluations needed per data set). \fref{fig:prog} provides a comparison of first discoveries of near optimal solutions. For almost all data sets, the median first encounter with a top 5 solution is within 50 evaluations, while the comparative hardness of \textit{cifar10} is clear. Each run only reached a fraction of all solutions before exhausting escape possibilities. Despite the limited reach, 65\% of overall runs produced the global optimum, after an average of 92.56 evaluations, which is less than a tenth of the solution space. \begin{figure}[t] \vskip 0.1in \begin{center} \centerline{\includegraphics[width=\columnwidth]{prog_ils_top5_stats.pdf}} \caption{First discovery of a global top 5 solution over 100 ILS procedures per data set.} \label{fig:prog} \end{center} \vskip -0.2in \end{figure} \begin{algorithm} \caption{Iterated local search (ILS)} \label{alg:ils} \begin{algorithmic} \STATE Let $S$ be the search space; $f$ the fitness function \STATE Let $k$ be a perturbation strength; $t$ a stopping threshold \STATE Select initial random solution $s_0 \in S$ \STATE $s \leftarrow$ \textsc{HillClimb}($s_0$) \STATE $i \leftarrow 0$ \REPEAT \STATE $x \leftarrow$ \textsc{Perturb}($s, k$) \STATE $s' \leftarrow$ \textsc{HillClimb}($x$) \IF {$f(s') \geq f(s)$ } \STATE $s \leftarrow s'$ \STATE $i \leftarrow 0$ \ENDIF \STATE $i \leftarrow i + 1$ \UNTIL {$i \geq t$} \vspace{0.1cm} \PROCEDURE {Perturb}{s, k} \STATE Let \textsc{Op} be a perturbation operation $\left( \mathcal{N}(s) \right)$ \FOR {$j \leftarrow 1,\dots,k$ } \STATE $s' \leftarrow \textsc{Op}(s)$ \STATE$s \leftarrow s'$ \ENDFOR \STATE \textbf{return} $s$ \ENDPROCEDURE \end{algorithmic} \end{algorithm} \section{Conclusion} \label{sec:con:res} This study presented an initial evaluation of the feasibility of using local optima network (LON) analysis to characterise and optimise over the feedforward neural network (FFNN) architecture space. The solution space was defined in such a way that full enumeration using multiple data sets was feasible. One of the main findings is the relatively simple global funnel structures of the resulting monotonic LONs (MLONs). The LONs produced for the data sets considered in this study exhibited diverging characteristics in terms of their modality, fitness distribution, basin size distribution and global optimum incoming strength. All but one MLON, however, had a single global funnel, with only two funnels in the remaining local optima network. The simple funnel structure supports the use of relatively simple local optimisers like iterative local search as opposed to significantly more computationally expensive population-based methods. A high standard deviation between local optimum fitness values was found to be related to low modality. Low modality in turn improves the relative ease of fitness optimisation. When encountered at low fitness levels, a very low standard deviation may indicate that the model is not sufficiently powerful for the given task, as is the case with the CIFAR-10 and CIFAR-100 data sets.\looseness=-1 One line of extension for future work is to investigate larger FFNN architecture spaces more representative of real-world tasks. Larger models are likely to improve overall fitness, which was found to correspond to simpler LONs and improve sampling performance. Another extension is to study LONs produced using different architectural arrangements, like recurrent or convolutional neural networks. Feedforward neural networks have insufficient representational capacity for many real-world problems, making basic network augmentation commonplace in modern machine learning. This study focused exclusively on the architecture space, but weight initialisation is also known to play an important role in model performance \cite{he2015delving, zhou2019deconstructing}. To investigate the weight initialisation space, some form of discretisation is required. A promising option, considering the relatively low number of possible configurations, is to study combinations of initial weight signs, keeping the magnitude constant \cite{zhou2019deconstructing}. This line of investigation may contribute towards understanding what makes lottery tickets successful \cite{frankle2018lottery}. \section*{Acknowledgement} The authors would like to thank the Centre for High Performance Computing (CHPC) (\url{http://chpc.ac.za}) for the use of their cluster to obtain the data for this study. \bibliographystyle{IEEEtran}
1,116,691,501,394
arxiv
\section{Introduction} The planned high luminosity electron ion collider (EIC) is designed to perform ``imaging'' of the proton (and of heavy ions) with unprecedented accuracy~\cite{EIC}. It will provide detailed multi-dimensional parton distributions and insight into the light-front wave function (LFwf) of the proton via high-energy $\gamma^{(*)} - p$ scattering. The purpose of this paper is to expose the color charge correlations obtained from the LFwf of the proton.\\ The concept of color charge density fluctuations in the transverse impact parameter plane emerges naturally in high-energy (small-$x$) scattering. The projectile charge traverses without recoil the (color) field produced coherently by all ``valence'' charges in the target, and its propagator is given by a path ordered exponential of that field, c.f.\ sec.~\ref{sec:DipScattAmpl} below. For scattering of a (virtual) photon from a proton target, this regime of coherent eikonal scattering may set in at $x\simle 0.1$ where the longitudinal coherence length $\sim 1/(x M_p)$ of the process in the rest frame of the proton begins to exceed its radius. Nuclear targets, on the other hand, require $x \simle 0.1/A^{1/3}$, where $A$ denotes the atomic number. The scale separation in soft coherent fields sourced by random, ``frozen'' valence charges was introduced by McLerran and Venugopalan~(MV) in ref.~\cite{MV}. Their model, devised for a very large nucleus, describes Gaussian fluctuations of {\em classical} color charge densities at vanishing momentum transfer: $\langle \rho^a(\vec q_1) \, \rho^b(\vec q_2)\rangle_{\text{MV}} \sim \mu^2\, \delta(\vec q_1 + \vec q_2)$. However, when the density of valence charges in the target is not very large, one would rather take the two-dimensional color charge density as an operator acting on the light-front wave function (LFwf) of the target~\cite{DMV}. We shall see that in the regime of moderate $x\sim 0.1$ color charge fluctuations in the proton are not Gaussian, and dependent on impact parameter and on the transverse distance scale they are probed at.\\ After analyzing color charge correlations in the proton we proceed to specify initial conditions for small-$x$ Balitsky-Kovchegov (BK) evolution~\cite{BK} of the dipole scattering amplitude. Detailed fits of BK evolution with running coupling corrections to the $\gamma^* - p$ cross section measured at HERA have been performed by Albacete {\it et al.} in ref.~\cite{Albacete:2009fh}. More recent fits improve the accuracy of the theory by employing a collinearly improved BK evolution equation (ref.~\cite{Ducloue:2019jmy} and references therein). However, such fits of small-$x$ QCD evolution to HERA DIS data typically impose simplified, {\it ad-hoc} initial conditions for the dipole scattering amplitude on the proton, starting at $x=10^{-2}$. We attempt to construct initial conditions based on the light-front wave function (LFwf) of the proton so that one may take advantage of ``proton imaging'' performed at a future electron-ion collider (EIC)~\cite{EIC}. We use a model LFwf to show that interesting, non-trivial transverse momentum and impact parameter dependent color charge correlations in the proton should be expected. Furthermore, these initial conditions include a non-zero $C$-odd ``Odderon'' contribution to the dipole scattering amplitude which may be evolved to smaller $x$~\cite{Kovchegov:2003dm} in order to address high-energy exclusive processes involving $C$-odd exchanges; or some spin dependent Transverse Momentum Dependent (TMD) distributions such as the (dipole) gluon Sivers function of a transversely polarized proton~\cite{Yao:2018vcg}. \\ Our final objective is to compute the Weizs\"acker-Williams (forward) gluon distributions, in particular the distribution of linearly polarized gluons, at next-to-leading (fourth) order in $A^+$ (sec.~\ref{sec:WW}). At this order the conventional and linearly polarized distributions no longer coincide, and they involve the correlator of four color charge density operators in the proton. This is an independent correlation function which can not be reduced to products of quadratic color charge correlators like in an effective theory of Gaussian color charge fluctuations. The WW gluon distribution is a TMD, its general operator definition has been provided in refs.~\cite{WWoperator}. The WW gluon TMDs appear in a variety of processes such as production of a dijet or heavy quark pair in hadronic collisions~\cite{WWhadronic} or DIS at moderate~\cite{WW-DISmoderate} or high energies~\cite{Dominguez:2011br,Dumitru:2015gaa,WW-dijet-smallx}; photoproduction of three jets~\cite{Altinoluk:2020qet}; photon pair~\cite{WWphoton}, quarkonium~\cite{WW-quarkonium}, quarkonium pair~\cite{WW-quarkonium-pair}, or quarkonium plus dilepton~\cite{WW-quarkonium-dilepton} production in hadronic collisions. These gluon distributions also determine the fluctuations of the divergence of the Chern-Simons current at the initial time of a relativistic heavy-ion collision~\cite{WW-CS}. \section{Setup} \label{sec:LFwf} The light cone state of an unpolarized on-shell proton with four-momentum $P^\mu = (P^+, P^-,\vec{P}_\perp)$ is written as~\cite{Lepage:1980fj} \begin{eqnarray} |P\rangle &=& \frac{1}{\sqrt{6}} \int \frac{{\rm d} x_1{\rm d} x_2 {\rm d} x_3} {\sqrt{x_1 x_2 x_3}} \delta(1-x_1-x_2-x_3) \int \frac{{\rm d}^2 k_1 {\rm d}^2 k_2 {\rm d}^2 k_3}{(16\pi^3)^3}\, 16\pi^3 \delta(\vec{k}_1+\vec{k}_2+\vec{k}_3)\nonumber\\ &\times& \psi(x_1, \vec k_1; x_2, \vec k_2; x_3, \vec k_3) \sum_{i_1, i_2, i_3}\epsilon_{i_1 i_2 i_3} |p_1,i_1; \, p_2,i_2; \, p_3,i_3\rangle~. \label{eq:def_|P>} \label{eq:valence-proton} \end{eqnarray} The $n$-parton Fock space amplitudes are universal and process independent. They encode the non-perturbative structure of hadrons. Here, we have restricted to the valence quark Fock state, assuming that the process probes parton momentum fractions of order $x\sim 0.1$, and moderately high transverse momenta. In this regime, the above should be a reasonable first approximation. The three on-shell quark momenta are specified by their lightcone momentum components $p_i^+ = x_i P^+$ and their transverse components $\vec{p}_{i} = x_i \vec{P}_\perp + \vec{k}_i$. The colors of the quarks are denoted by $i_{1,2,3}$. We omit helicity quantum numbers (and flavor indices) as they play no role in our analysis. $\psi$ is symmetric under exchange of any two of the quarks, and is normalized according to \begin{equation} \label{eq:Norm_psi3} \int {{\rm d} x_1{\rm d} x_2 {\rm d} x_3}\, \delta(1-x_1-x_2-x_3) \int \frac{{{\rm d}^2 k_1 {\rm d}^2 k_2 {\rm d}^2 k_3}}{(16\pi^3)^3}\, (16\pi^3)\,\delta(\vec{k}_1+\vec{k}_2+\vec{k}_3)\, |\psi|^2 = 1~. \end{equation} This corresponds to the proton state normalization \begin{eqnarray} \langle K | P\rangle &=& 16\pi^3 \, P^+ \delta(P^+ - K^+) \, \delta(\vec{P}_\perp - \vec{K}_\perp) \label{eq:ProtonNorm1} ~. \end{eqnarray} Below, we neglect plus momentum transfer so that $\xi = (K^+ - P^+)/P^+ \to 0$. This approximation is valid at high energies.\\ For numerical estimates we employ a model wave function $\psi(x_1, \vec k_1; x_2, \vec k_2; x_3, \vec k_3)$ described in appendix~\ref{app:BrodskySchlumpf}. \section{Dipole scattering amplitude} \label{sec:DipScattAmpl} The $S$-matrix for scattering of a quark - antiquark dipole off the fields in the target proton can be expressed as (see, e.g.\ ref.~\cite{Mueller:2001fv}) \begin{equation} \label{eq:S_dipole_b} {\cal S} (\vec r,\vec b) = \frac{1}{N_c}\,{\rm tr} \,\left< U\left(\vec b + \frac{\vec r}{2}\right)\, U^\dagger\left( \vec b - \frac{\vec r}{2}\right)\right> \, . \end{equation} Following the standard convention in the small-$x$ literature we define the scattering amplitude \begin{equation} \label{eq:T_dipole_b} {\cal T} (\vec r,\vec b) = 1-{\cal S} (\vec r,\vec b)~, \end{equation} without a factor of $i$. When integrated over impact parameters $\vec b$, eq.~(\ref{eq:T_dipole_b}) is related to the so-called dipole gluon distribution~\cite{Dominguez:2011wm}. Here, $U$ ($U^\dagger$) are (anti-)path ordered Wilson lines representing the eikonal scattering of the dipole of size $\vec r$ at impact parameter $\vec b$: \begin{equation} \label{eq:WilsonLines} U(\vec x_T) = {\cal P} e^{ig \int dx^- A^{+a}(x^-,\vec x_T)\, t^a} ~~~~~,~~~~~ U^\dagger(\vec x_T) = \overline{\cal P} e^{-ig \int dx^- A^{+a}(x^-,\vec x_T)\, t^a} ~. \end{equation} ${\cal S} (\vec r,\vec b)$ and ${\cal T} (\vec r,\vec b)$ are invariant under the simultaneous ${\cal P} \leftrightarrow \overline{\cal P}$, $\vec r \to - \vec r$, $gA^+\to -gA^+$. We now expand ${\cal T} (\vec r,\vec b)$ to third order in $gA^+$, neglecting exchanges of more than three gluons, and write it in terms of correlators of the field {\em integrated} over the longitudinal coordinate: \begin{eqnarray} A^{+a}(\vec x_T) &=& \int {\rm d} x^- A^{+a}(\vec x_T, x^-)~, \nonumber\\ A^{+a}(\vec x_T) \, A^{+b}(\vec y_T) + A^{+b}(\vec y_T) \, A^{+a}(\vec x_T) &=& {\cal P} \int {\rm d} x^- \int {\rm d} y^- A^{+a}(\vec x_T, x^-) A^{+b}(\vec y_T, y^-) \nonumber\\ & & + \overline{\cal P} \int {\rm d} x^- \int {\rm d} y^- A^{+a}(\vec x_T, x^-) A^{+b}(\vec y_T, y^-)~. \end{eqnarray} This field is related to the 2d color charge density through \begin{equation} - \nabla_\perp^2 A^{+a}(\vec x_T) = \rho^a(\vec x_T)~, \end{equation} allowing us to express the dipole scattering amplitude in terms of color charge density correlators. Some of the diagrams that contribute to the two- and three-gluon exchange amplitudes are shown in fig.~\ref{fig:diag-rhorho}. The general relation of correlators of Wilson lines at small $x$ to Generalized Parton Distributions has been elucidated in ref.~\cite{Altinoluk:2019wyu}, to all twists. \\ \begin{figure}[htb] \centering \includegraphics[width=0.45\textwidth]{rho-rho-OneBody.pdf} \hspace*{1.5cm} \includegraphics[width=0.45\textwidth]{rho-rho-ThreeBody.pdf} \caption{Left: one of the diagrams for the correlator $\langle \rho^a(\vec q_1)\, \rho^b(\vec q_2)\rangle$ (once Coulomb propagators are amputated); this contribution dominates at large relative gluon momenta but small total momentum transfer $\vec K_T=-\vec q_1 - \vec q_2$.\\ Right: one of the diagrams for the correlator $\langle \rho^a(\vec q_1)\, \rho^b(\vec q_2)\, \rho^c(\vec q_3)\rangle$; this contribution dominates when the three gluons share a large momentum transfer, $\vec K_T/3 \simeq -\vec q_1 \simeq - \vec q_2 \simeq- \vec q_3$.} \label{fig:diag-rhorho} \end{figure} $C$-even two gluon exchange corresponds to the scattering amplitude~\cite{DMV} \begin{eqnarray} {\cal T}_{gg}(\vec r,\vec b) &=& - \frac{g^4}{2} C_F\,\int\limits_{K_T, q} \frac{e^{-i\vec b \cdot \vec K_T}}{(\vec q -\frac{1}{2}\vec K_T)^2 \, (\vec q+ \frac{1}{2} \vec K_T)^2}\,\left( \cos\left(\vec r \cdot {\vec q}\right) - \cos\left(\frac{{\vec r}\cdot \vec K_T}{2}\right)\right) \, G_2\left({\vec q}-\frac{1}{2}\vec K_T,-{\vec q}-\frac{1}{2}\vec K_T\right) ~. \label{eq:Pomeron} \end{eqnarray} (We use the shorthand notation $\int_q = \int{\rm d}^2q/(2\pi)^2$.) Here, we introduced the color charge correlator \begin{equation} \left< \rho^a(\vec q_1) \, \rho^b(\vec q_2)\right> \equiv {\rm tr} \,t^a t^b\, g^2\, G_2(\vec q_1,\vec q_2)~, \end{equation} see appendix~\ref{sec:rho_Correlators} for details. It is symmetric under a simultaneous sign flip of both arguments and so ${\cal T}_{gg}(\vec r,\vec b)$ is real. The integral in eq.~(\ref{eq:Pomeron}) is free of infrared divergences since $G_2$ satisfies a Ward identity and vanishes when either one of the gluon momenta goes to zero~\cite{Bartels:1999aw,Ewerz:2001fb}: $G_2\left({\vec q}-\frac{1}{2}\vec K_T,-{\vec q}-\frac{1}{2}\vec K_T\right) \sim ({\vec q}\pm\frac{1}{2}\vec K_T)^2$ as ${\vec q}\to\pm \frac{1}{2}\vec K_T$. In fig.~\ref{fig:G2_bq} we show a numerical estimate for $G_2$ as a function of impact parameter $b$ or relative momentum $\vec q_{12} = \vec q_1 - \vec q_2 = 2\vec q_1 + \vec K_T$: \begin{equation} \widetilde G_2(\vec q_{12},\vec b) = \int_{K_T} e^{-i\vec b\cdot \vec K_T}\, G_2\left(\frac{\vec q_{12}-\vec K_T}{2}, -\frac{\vec q_{12}+\vec K_T}{2} \right)~. \end{equation} We also average over the relative directions of $\vec q_{12}$ and $\vec b$. For numerical estimates we used the model wave function by Brodsky and Schlumpf~\cite{Brodsky:1994fz} described briefly in appendix~\ref{app:BrodskySchlumpf}. \begin{figure}[htb] \centering \includegraphics[width=0.48\textwidth]{G2_bdep_av.pdf} \includegraphics[width=0.48\textwidth]{G2_qdep_av.pdf} \caption{The quadratic color charge density correlator $\widetilde G_2(\vec q_{12},b)$ in the proton as a function of impact parameter and relative transverse momentum of the two gluon probes.} \label{fig:G2_bq} \end{figure} $G_2$ measures charge correlations seen by two gluon probes of the same color. There is a color charge anti-correlation (``repulsion'') at small relative momentum of the gluon probes in the center of the proton which turns into a positive correlation (``attraction'') towards the periphery, or at high relative momentum. The integral of $\widetilde G_2$ over the 2d impact parameter plane at vanishing relative momentum is zero: \begin{equation} \int {\rm d}^2b\,\, \widetilde G_2(\vec q_{12}=0,\vec b) = 0~. \end{equation} A similar relation holds for the cubic charge correlators discussed below. \\ At third order in $A^{+a}$ we have the following scattering amplitude for $C$-odd three gluon exchange~\cite{DMV}: \begin{eqnarray} {\cal T}_{ggg}(\vec r,\vec b) &=& \frac{5}{18}\, g^6 \int\limits_{q_1, q_2, q_3} \frac{1}{q_1^2}\frac{1}{q_2^2}\frac{1}{q_3^2}\, e^{-i \vec b \cdot \vec K_T}\, G_3^-(\vec q_1,\vec q_2,\vec q_3)\,\left[ \sin\left(\vec r\cdot \vec q_1 + \frac{1}{2} \vec r\cdot \vec K_T\right) - \frac{1}{3}\sin\left(\frac{{1}}{2}\vec r\cdot \vec K_T\right)\right]~. \label{eq:Odderon-operator} \end{eqnarray} Here, $\vec K_T \equiv - (\vec q_1 + \vec q_2 + \vec q_3)$. We denote the $C$-odd part of the correlator of three color charges as \begin{equation} \left< \rho^a(\vec q_1) \, \rho^b(\vec q_2)\, \rho^c(\vec q_3)\right>_{C=-} \equiv \frac{1}{4} d^{abc}\, g^3\, G_3^-(\vec q_1,\vec q_2,\vec q_3) \end{equation} This correlator, too, is symmetric under a simultaneous sign flip of all three gluon momenta and so ${\cal T}_{ggg}(\vec r,\vec b)$ is imaginary. Also, it vanishes quadratically in any of the transverse momentum arguments so that ${\cal T}_{ggg}(\vec r,\vec b)$ is free of infrared divergences. \begin{figure}[htb] \centering \includegraphics[width=0.48\textwidth]{G3-_bdep_av.pdf} \includegraphics[width=0.48\textwidth]{G3-_qdep_av.pdf} \caption{The $C$-odd part of the cubic color charge density correlator $\widetilde G_3^-$ in the proton as a function of impact parameter and relative transverse momentum.} \label{fig:G3-_bq} \end{figure} The fact that $G_3^-$ does not vanish shows that color charge fluctuations in the proton state~(\ref{eq:valence-proton}) are not Gaussian. A numerical estimate of $\widetilde G_3^-$ is shown in fig.~\ref{fig:G3-_bq}. At small relative momentum we observe a positive correlation at the center of the proton; $\widetilde G_3^-(b)$ diverges logarithmically at $b\to0$ due to contributions from large momentum transfer $-t=K_T^2$. This turns into an anti-correlation around $b\approx 1$~GeV$^{-1}$, and then vanishes for large impact parameters. At high relative momentum the correlator is large and positive at small $b$. For generic impact parameters and momenta $\widetilde G_2$ and $\widetilde G_3^-$ are of similar numerical magnitude.\\ For completeness, we finally show the $C$-even part of the correlator of three color charges, \begin{equation} \left< \rho^a(\vec q_1) \, \rho^b(\vec q_2)\, \rho^c(\vec q_3)\right>_{C=+} \equiv \frac{i}{4} f^{abc}\, g^3\, G_3^+(\vec q_1,\vec q_2,\vec q_3)~, \end{equation} even though it does not contribute to the dipole scattering amplitude. \begin{figure}[htb] \centering \includegraphics[width=0.48\textwidth]{G3+_bdep_av.pdf} \includegraphics[width=0.48\textwidth]{G3+_qdep_av.pdf} \caption{The $C$-even part of the cubic color charge density correlator $\widetilde G_3^+$ in the proton as a function of impact parameter and relative transverse momentum.} \label{fig:G3+_bq} \end{figure} This correlator is negative near the center, and for small relative momenta, then turns into a positive correlation at large momenta.\\ All three color charge correlators decay with increasing impact parameter, just as expected intuitively. Observing the correlations at small $b$ involves large momentum transfer to the proton to zoom in on its center. The regime where the exchanged gluons share a large momentum transfer $-t = K_T^2$ is dominated by $n$-body diagrams such as the one shown in fig.~\ref{fig:diag-rhorho}(right), where the static gluons attach to as many sources as possible\footnote{This was first noted by Donnachie and Landshoff who argued that three gluon exchange should dominate over two-gluon exchange in elastic proton-proton scattering at high energy and large $-t$ ($\ll s$)~\cite{Donnachie:1979yu}.}~\cite{Dumitru:2019qec}. This leads to the greatest overlap of the wave functions of incoming and scattered proton.\\ We now show the behavior of the dipole scattering amplitude ${\cal T}(\vec b, \vec r)$. For all figures we assumed a fixed $\alpha_s = 0.35$~\cite{Dumitru:2019qec,Low:1975sv} and we align the impact parameter and dipole vectors. However, the scattering amplitude does depend on the relative orientation of $\vec b$ and $\vec r$~\footnote{This would give rise to azimuthal correlations in double parton scattering in hadronic collisions~\cite{Hagiwara:2017ofm}.}. \begin{figure}[htb] \centering \includegraphics[width=0.45\textwidth]{P_b__for_fixed_r.pdf} \includegraphics[width=0.45\textwidth]{P_r__for_fixed_b.pdf} \caption{The two gluon exchange amplitude ${\cal T}_{gg}(\vec b, \vec r)$.} \label{fig:Tgg} \end{figure} The two gluon exchange amplitude ${\cal T}_{gg}(\vec b, \vec r)$ is shown in fig.~\ref{fig:Tgg}. It displays the expected roughly exponential falloff at large impact parameters. The amplitude is significantly smaller than 1 even at the center of the proton, albeit not by several orders of magnitude, e.g.\ ${\cal T}_{gg} \simeq 0.1$ at $b=1$~GeV$^{-1}$ and $r=2$~GeV$^{-1}$. Matching this to ${\cal T}_{gg} = \frac{1}{4} r^2 Q_s^2(b)$ would correspond to a saturation momentum of about $Q_s(b) \approx 0.3$~GeV at $b=1$~GeV$^{-1}$ and $x\sim0.1$. For comparison, we recall $Q_s \approx 0.4-0.5$~GeV at $x=0.01$, on average over impact parameters, extracted from systematic fits of BK evolution with running coupling corrections to HERA data for $F_2$~\cite{Albacete:2009fh}. As expected, ${\cal T}_{gg}(\vec r)$ at fixed $b$ first increases with the size of the dipole; the slope is less steep at larger impact parameters where the target is more ``dilute''. The scattering amplitude eventually reaches a maximum value for $r_{\text{max}} \simge 5$~GeV$^{-1}$ beyond which it decreases again as the projectile dipole ``misses'' the target\footnote{This behavior also emerges as a consequence of impact parameter dependent small-$x$ BK evolution, even when the dipole amplitude at the initial $x_0$ increases monotonically with $r$~\cite{GolecBiernat:2003ym}.}. However, this behavior occurs in a regime of large dipoles where the analysis of the scattering amplitude (and of $\gamma^{(*)} \to q\overline{q}$) in perturbation theory is not valid. \begin{figure}[htb] \centering \includegraphics[width=0.45\textwidth]{O_b__for_fixed_r.pdf} \includegraphics[width=0.45\textwidth]{O_r__for_fixed_b.pdf} \caption{The $C$-odd three gluon exchange amplitude Im~${\cal T}_{ggg}(\vec b, \vec r)$.} \label{fig:Tggg} \end{figure} The $C$-odd three gluon exchange amplitude (``Odderon''\footnote{We should mention that we restrict to the Odderon associated with (relatively large) transverse momentum transfer $\vec K_T$. For nearly forward scattering another Odderon exchange associated with a spin flip of the proton may appear~\cite{spinOdderon}. }) $-i{\cal T}_{ggg}(\vec b, \vec r)$ is shown in fig.~\ref{fig:Tggg}. This amplitude changes sign under $\vec b \to - \vec b$ (negative parity) and vanishes at $b=0$. Its magnitude is maximal at $b\sim 0.5 - 1.2$~GeV$^{-1}$, approximately where the gradient of the two-gluon exchange amplitude is greatest~\cite{KovSievert}. For impact parameters $b\simle 3$~GeV$^{-1}$ and small dipoles, $r \simle 4$~GeV$^{-1}$, we find that ${\cal T}_{ggg}$ is smaller than ${\cal T}_{gg}$ by at least one order of magnitude\footnote{The magnitude of Im~${\cal T}_{ggg}$ obtained from the present LFwf is one order of magnitude smaller than the one used as the initial condition for small-$x$ evolution in ref.~\cite{Yao:2018vcg}, where the authors compute the dipole gluon Sivers function in a transversely polarized proton.}. This is not because color charge fluctuations in the proton are nearly Gaussian, as the magnitudes of $G_2$ and $G_3^-$ (shown above) are similar. Rather, it appears to originate mostly from the parity odd nature of ${\cal T}_{ggg}$ which gives rise to large cancellations in the integral in eq.~(\ref{eq:Odderon-operator}). As a consequence, semi-hard processes requiring $C$-odd three gluon exchange have small cross-sections~\cite{Dumitru:2019qec}. Alternatively, one may search for the perturbative Odderon via charge asymmetries in diffractive electroproduction of a $\pi^+\, \pi^-$ pair~\cite{Hagler:2002nh}. \section{Weizs\"acker-Williams gluon distributions} \label{sec:WW} In this section we relate the color charge correlators to the (forward) WW gluon distribution. It is given, at small-$x$, by the correlator of two light-cone gauge fields~\cite{Dominguez:2011wm,WW} \begin{equation} \label{eq:WW-Gij} xG^{ij}_{\text{WW}}(x,\vec q) = \frac{1}{2}\delta^{ij}\, xG^{(1)}(x,\vec q) + \frac{1}{2}\left(2\frac{q^i q^j}{q^2}-\delta^{ij}\right)\, xh_\perp^{(1)}(x,\vec q) = \frac{1}{4\pi^3} \left< A^{ia}(\vec q)\, A^{ja}(-\vec q)\right>~. \end{equation} The trace of $xG^{ij}_{\text{WW}}$ defines the conventional WW gluon distribution $xG^{(1)}(x,\vec q)$ while the traceless part corresponds to the distribution of linearly polarized gluons $xh_\perp^{(1)}(x,\vec q)$. Both are integrated over impact parameters since we consider the forward limit. In the non-forward case the general decomposition of the WW GTMD involves additional independent functions on the r.h.s.\ of eq.~(\ref{eq:WW-Gij}), see e.g.\ ref.~\cite{Boussarie:2018zwg}. The field in light-cone gauge is obtained from $A^+$ by a gauge transformation, \begin{equation} \label{eq:cov--LC} A^i(\vec x_T) = \frac{i}{g}\, U^\dagger(\vec x_T)\, \partial^i U(\vec x_T)~, \end{equation} such that in this gauge $A^+(\vec x_T)=0$. At linear order in $\rho$, $A^i(\vec q)\sim q^i\, \rho(\vec q)$ is longitudinal so that $xG^{(1)}(x,\vec q) = xh_\perp^{(1)}(x,\vec q)$, corresponding to maximal polarization: \begin{equation} \label{eq:xG-xh-highq} xG^{(1)}(x,\vec q) = xh_\perp^{(1)}(x,\vec q) = \frac{N_c^2-1}{8\pi^3 q^2} \, g^2\, G_2(\vec q, -\vec q)~. \end{equation} Beyond leading order in $\rho$ (or $A^+$) the L.C.\ gauge field is no longer purely longitudinal and one finds that $xG^{(1)}(x,\vec q) > xh_\perp^{(1)}(x,\vec q)$. See refs.~\cite{Metz:2011wb,Dominguez:2011br} for computations of these distributions to all orders in $A^+$, in the Gaussian MV model of classical color charges. Resummed WW gluon distributions for Gaussian color charge fluctuations with a more general two-point correlator have been derived in ref.~\cite{Dumitru:2016jku}; also see appendix~\ref{sec:WW_Orho4}. Here, we express the correction to $xG^{(1)}(x,\vec q)$ and $xh_\perp^{(1)}(x,\vec q)$ at fourth order in $A^+$ in terms of the quartic color charge correlator: \begin{eqnarray} \Delta xG^{(1)}(x,\vec q) = - \Delta xh_\perp^{(1)}(x,\vec q) &=& \frac{g^2}{16\pi^3} f^{abe} f^{cde} \int_{k,p} \frac{1}{k^2}\frac{1}{p^2}\frac{1}{(\vec q-\vec k)^2}\frac{1}{(\vec q + \vec p)^2} \left(\frac{\vec k\cdot \vec q \,\, \vec p\cdot \vec q}{q^2} - \vec k\cdot \vec p\right) \nonumber\\ & & \left< \rho^a(\vec q-\vec k)\, \rho^b(\vec k)\, \rho^c(-\vec q-\vec p)\, \rho^d(\vec p) \right>~. \end{eqnarray} The explicit expression for $f^{abe} f^{cde} \left< \rho^a(\vec q-\vec k)\, \rho^b(\vec k)\, \rho^c(-\vec q-\vec p)\, \rho^d(\vec p) \right>$ in terms of the proton LFwf is given in eq.~(\ref{eq:dxG_dxh_fg}) of appendix~\ref{sec:WW_Orho4}. Hence, at this order there is a splitting of $xG^{(1)}$ and $xh_\perp^{(1)}$ which are no longer equal. \begin{figure}[htb] \centering \includegraphics[width=0.45\textwidth]{xG_xh_plots.pdf} \includegraphics[width=0.45\textwidth]{ratio_xh_to_xG.pdf} \caption{The conventional and linearly polarized WW gluon distributions in the proton (at $x\sim 0.1$) to order $(A^+)^4$.} \label{fig:WW} \end{figure} Fig.~\ref{fig:WW} shows numerical results for the two WW gluon distributions in the proton. For $q \simge 0.5$~GeV the higher twist correction is very small and the ``polarization'' is nearly maximal. This confirms that a measurement of $xh_\perp^{(1)}(x,q)$ at an EIC appears promising, for example via dijet azimuthal asymmetries~\cite{WW-dijet-smallx}. The higher twist correction overwhelms the leading contribution below $q \sim 0.2$~GeV where a resummation to all orders in $A^+$ would be required. For the Gaussian MV model of classical color charge fluctuations this has been done in refs.~\cite{Metz:2011wb,Dominguez:2011br} (and its evolution to small $x$ in refs.~\cite{Dumitru:2015gaa,Marquet:2016cgx}) but here higher order correlators are independent functions and a resummation appears difficult. \section{Summary and Discussion} \label{sec:Summary} In this paper we have computed 2d color charge density correlations in the proton at moderate $x\sim0.1$. The correlators of two, three and four color charge density operators $\rho^a$ have been related explicitly to the light-front wave function of the proton. These correlators exhibit interesting dependence on the relative momenta of the probes, and on impact parameter. The two-point correlator $G_2(\vec q_1, \vec q_2) \sim \langle\rho^a(\vec q_1)\, \rho^a(\vec q_2)\rangle$, for example, is positive at large relative momentum $\vec q_{12} = \vec q_1 - \vec q_2$, indicating ``attraction'' of like charges. It turns negative (``repulsion'') at smaller relative momentum, for central impact parameters. The correlation function satisfies a sum rule such that at $q_{12} =0$ its integral over the impact parameter plane vanishes: $\int {\rm d}^2b \, \widetilde G_2(\vec b, q_{12}=0) = 0$. We note that $\widetilde G_2(\vec b, \vec q_{12})$ is a {\em two-body} Generalized Parton Distribution (GPD) which depends not only on impact parameter but also on the relative transverse momentum (or distance) of the two gluon probes\footnote{For the proton wave function considered here, there is no dependence on $x$. We refer to ref.~\cite{Diehl:2003ny} for a review on GPDs.}: \begin{eqnarray} \label{eq:G2(b)_2-body} \widetilde G_2(\vec b, \vec q_{12}) &=& \int_{K_T} e^{-i \vec b \cdot \vec K_T} \int {\rm d} x_1 {\rm d} x_2 {\rm d} x_3 \, \delta(1-x_1-x_2-x_3) \int \frac{{\rm d}^2 p_1 {\rm d}^2 p_2 {\rm d}^2 p_3}{(16\pi^3)^2} \, \delta(\vec{p}_1+\vec{p}_2+\vec{p}_3) \nonumber\\ & &~~~~ \left[\psi^*(\vec p_1 +(1-x_1) \vec K_T, \vec p_2 -x_2 \vec K_T, \vec p_3 -x_3 \vec K_T) \right.\nonumber\\ & & ~~~~\left. -\psi^*(\vec p_1 -\frac{\vec q_{12}-\vec K_T}{2} -x_1\vec K_T, \vec p_2 + \frac{\vec q_{12}+\vec K_T}{2} -x_2 \vec K_T, \vec p_3 -x_3 \vec K_T) \right] \psi(\vec p_1, \vec p_2, \vec p_3)~. \end{eqnarray} $\psi$ denotes the amplitude of the three-quark Fock state of the proton. The first, one-body term is dominant for large $b$ and $q_{12}$ while the second, two-body contribution dominates for small $b$ and $q_{12}$. To illustrate the importance of $n$-body contributions to the color charge correlators, in fig.~\ref{fig:G2-G3-qDensity} we compare $\widetilde G_2(\vec b, q_{12}=0)$ and $\widetilde G_3^-(\vec b, q_{12}=q_{23}=0)$ to the 1-body quark density\footnote{The quark density is given by three times the first term in eq.~(\ref{eq:G2(b)_2-body}).} in impact parameter space, i.e.\ to the proton ``thickness function'' $T_p(b)$. Even at vanishing relative momenta these coincide only at rather large $b$. The color charge correlators $\langle\rho^a(\vec q_1)\, \rho^b(\vec q_2)\rangle$ and $\langle\rho^a(\vec q_1)\, \rho^b(\vec q_2) \, \rho^c(\vec q_3)\rangle_{C=-}$ can be probed in exclusive production of various charmonium states in (virtual) photon -- proton scattering~\cite{Dumitru:2019qec,Mantysaari:2016ykx} or via charge asymmetries in pion pair production~\cite{Hagler:2002nh}. \begin{figure}[htb] \centering \includegraphics[width=0.45\textwidth]{bG2_bG3-_bQD_bdep_av.pdf} \caption{Quadratic and $C$-odd cubic color charge correlators, and the 1-body quark density, as functions of impact parameter. } \label{fig:G2-G3-qDensity} \end{figure} \\~~\\ Another main result of the paper is that color charge fluctuations in the proton are far from Gaussian. The magnitudes of the $C$-even and $C$-odd components of the cubic correlator $\langle \rho^a\rho^b\rho^c \rangle / g^3$ are comparable to that of the two-point correlator $\langle \rho^a\rho^b \rangle / g^2$. In particular, $C$-odd correlations of cubic fluctuations near the center of the proton are large and positive, for sufficiently small relative momenta of the gluon probes. \\~~\\ Sub-femto-scale color charge correlations in the proton determine the dipole scattering amplitude. Relating them to the proton LFwf, which could in principle be determined via ``imaging'' of the proton at a future electron-ion collider, could help constrain and improve initial conditions for small-$x$ evolution. In particular, our analysis provides initial conditions which account for the above-mentioned non-trivial structure of two- and three-point correlators as functions of the transverse momentum ($\vec q_{12}$) or distance scale ($\vec r$), impact parameter $\vec b$, and their relative angular orientation. Hence, they may be useful for checking the consistency of BK evolution with the impact parameter dependence of the dipole $S$-matrix extracted from data at small $x$~\cite{S(b)-small-x}. The scattering amplitude derived here also includes a non-zero $C$-odd ``Odderon'' contribution to the dipole scattering amplitude which may be evolved to smaller $x$~\cite{Kovchegov:2003dm} to predict cross sections for exclusive processes involving $C$-odd exchanges, or the dipole gluon Sivers function of a transversely polarized proton~\cite{Yao:2018vcg}. Somewhat surprisingly, perhaps, our numerical analysis indicates that the $C$-odd amplitude for three gluon exchange ${\cal T}_{ggg}(\vec r, \vec b)$ is much smaller in magnitude than the $C$-even amplitude ${\cal T}_{gg}(\vec r, \vec b)$ for two gluon exchange. As already mentioned, this is not because color charge fluctuations in the proton are nearly Gaussian. Neither is it due to the additional power of $\alpha_s$ in ${\cal T}_{ggg}(\vec r, \vec b)$ which is compensated by other numerical factors. Rather, it is mainly a consequence of the fact that this amplitude is odd under parity. This leads to large cancellations in the three gluon exchange diagram (for central impact parameters) when their transverse momenta are reversed. ${\cal T}_{ggg}(\vec r, \vec b)$ must vanish, also, for large impact parameters or large dipoles as the density of color charge in the periphery of the proton is low. Consequently, we expect that cross sections for semi-hard exclusive processes involving $C$-odd three gluon exchange are small and require high luminosity.\\~~\\ We have also computed the conventional and linearly polarized Weizs\"acker-Williams gluon TMDs $xG^{(1)}(x,q)$ and $xh_\perp^{(1)}(x,q)$ in the proton at moderately low $x\sim0.1$. At leading twist (order $(A^+)^2$) the field in light-cone gauge is purely longitudinal and there is maximal polarization, $xG^{(1)}(x,q)=xh_\perp^{(1)}(x,q)$. The first power correction introduces a transverse part to $A^{ia}$ so that these gluon distributions are no longer equal. The correction to $xG^{(1)}(x,q)$ and $xh_\perp^{(1)}(x,q)$ involves a correlator of four $A^+$ in the proton. This is an independent function when color charge fluctuations are not Gaussian, and we have related it explicitly to overlap integrals of the LFwf of the proton. Numerically, we find that for $q\simge0.5$~GeV the higher twist correction is small and ``polarization'' is close to maximal. Hence, a measurement of $xh_\perp^{(1)}(x,q)$ at an EIC appears promising.\\ Throughout the paper we have approximated the proton in terms of its valence quark Fock state. It will be important to include the $\left|qqqg\right>$ Fock state, too, where the gluon is not necessarily soft. This may affect color charge correlations which probe high parton transverse momenta, and should improve the matching to small-$x$ BK evolution. Work in that direction is in progress. \section*{Acknowledgements} We thank Y.~Hatta and L.~Motyka for useful comments. Figs~\ref{fig:diag-rhorho}, \ref{fig:WW_O4} have been prepared with Jaxodraw~\cite{jaxo}. A.D.\ acknowledges support by the DOE Office of Nuclear Physics through Grant No.\ DE-FG02-09ER41620; and from The City University of New York through the PSC-CUNY Research grant 62098-00~50. V.S. acknowledges support by the DOE Office of Nuclear Physics through Grant No. DE-SC0020081. V.S. thanks the ExtreMe Matter Institute EMMI (GSI Helmholtzzentrum f\"ur Schwerionenforschung, Darmstadt, Germany) for partial support and hospitality. T.S.\ is supported by the Polish National Science Center (NCN) grants No.\ 2017/27/B/ST2/02755 and 2019/32/C/ST2/00202.
1,116,691,501,395
arxiv
\section{Distributed Synchronous SGD (DSSGD) with Sparsification} \label{apdx:dsgd} Here, we describe the Distributed Synchronous version of \ac{SGD} optimization algorithm which is the main work-horse behind most of the distributed training~\cite{aji_sparse}. \cref{algo:dsgd} presents the specifics of the algorithm with sparsification. The algorithm executes in parallel on each worker and starts with sampling a batch-sized sub-set from the full training dataset. Then, each worker performs a local pass including forward pass to obtain a loss value followed by a backward pass to calculate the gradient vector for updating the model parameters. At this point and before moving along with the model update, the workers have to synchronize by performing aggregation (or averaging) of their gradient vectors and use the aggregated gradient for the update. The gradient is sparsified using a chosen compressor (e.g., $\topk$, DGC, \scheme\!, .., etc) and target sparsification ratio. For example, to invoke \scheme\! compressor, one would invoke function {\emph Sparsify} of \cref{algo:algo1} which takes as input the gradient $\mathbf \gr$ and target sparsification ratio $\delta$. Then, the aggregation can be either accomplished via means of a parameter server which has a global copy of the model parameters and receives the gradients from the workers and update its local model and then the workers can pull the up-to-date model parameters at any time~\cite{Dean2012}. The other way is to perform aggregation in a peer-to-peer fashion via means of collective operation like All-Reduce or All-Gather which requires no extra parameters server for the aggregation~\cite{horovod}. The peer-to-peer collective communication methods are widely adopted by most frameworks \cite{pytorch, horovod} and known to scale well in practice \cite{Goyal2017} and hence is adopted in this work. \subsection{Discussion on \scheme\! Algorithm} We highlight a few technical aspects of \scheme\! algorithm presented in \cref{algo:algo1}. \smartparagraph{Scalability concerns:} The compression algorithm has no scalability issues since it executes locally and does not rely on inter-node communication. Also, the compressor only depends on the size of the gradient vector leading to the same compression time on all the workers regardless of the number of training workers that run in parallel. \smartparagraph{Algorithm's dependence on training iteration:} the gradient sparsity changes over iterations as shown in ~\cref{fig:fitteddistributions}~and~\cref{fig:fitteddistributionsEC}. The proposed algorithm leverages extreme-value theorem to handle sparsity variations by adapting the number of stages at each iteration. This enables adaptive fitting of the gradient at each iteration via the sparsity-inducing distribution enabling the estimation of an approximate threshold that obtains the top $k$ elements of the gradient vector. \smartparagraph{Sparsity and compressability of the gradients:} our algorithm relies on a principled statistical approach, which makes it robust to various sparsity levels of the gradient given that the compressibility property holds. And if not, most sparsifiers would be equally ineffective. Moreover, the compressibility property is the reason why $\topk$ is commonly used in the literature. Therefore, in this work, we seek an approximate fast threshold estimation method that exploits a common prior information of the gradients, while preserving the convergence guarantee of $\topk$, albeit with different rate depending on the accuracy of the estimated threshold. \begin{algorithm}[!t] \caption{Sparsified Distributed Synchronous SGD} \label{algo:dsgd} \tcc{Worker $n$} \tcc{Initialization} \KwIn{$D$: Local Dataset} \KwIn{$B$: Minibatch size per node} \KwIn{$N$: The total number of workers} \KwIn{$\lambda$: The learning rate} \KwIn{$\delta$: The target sparsification ratio} \KwIn{$x$: Model parameters $x=(x[0],x[1], ...,x[d])$} \tcc{loop till end of training} \For{i = 0, 1, ...} { \tcc{Calculate stochastic gradient} $\gin[i]$ = 0\\ \For{i = 1, ..., B} { Sample data point $d$ from $D$\\ Calculate $\nabla f(x;d)$\\ $\gin[i] = \gin[i] + \frac{1}{B} \nabla f(x;d)$\\ } \tcc{Aggregate workers' gradients} Collective-Comm: $\Gin[i] = \frac{1}{N} \sum_{n=1}^{N} \text{Sparsify}(\gin[i], \delta)$\\ \tcc{Update model parameters} $\xin[i+1] = \xin[i] + \lambda\, \Gin[i]$\\ } \end{algorithm} \section{Gradient Features and Distribution} \subsection{Validation of Gradient Compressibility} \label{apdx:statmethods} The compressibility of the gradient vector allows efficient compression for the gradients through sparsification techniques, e.g., $\topk$ and thresholding-based compression \cite{Elzanaty19,grace}. Here, we empirically investigate the compressibility of the gradients according to \cref{def:compressable}. In order to verify the vector compressibility, we consider the gradients generated while the training of ResNet20. The absolute of the gradients are sorted in descending order to obtain the vector $\tilde{\mathbf \gr}$ with $\d=269722$. In \cref{Fig:compressgradients}, the elements of the gradient vector $\tilde{\mathbf \gr}$, i.e., $\tilde{\gr}_{j}$, are reported vs their index, for three iterations in the beginning, middle, and end of the training.\footnote{Note that in \cref{Fig:compressgradients}, we focus only on the elements from $1$ to $10^5$, as for larger indices the amplitude of the vector elements are sufficiently small.} As a benchmark, we report a power low decay example with decay exponent $p>0.5$, i.e., $p=0.7$. It can be noticed that the gradients follow a power-law decay with decay exponent $p>0.7>0.5$; hence, they are compressible from \eqref{eq:powerlaw}. In \cref{fig:bestkapprox}, we report the sparsification error for the best $\k$ approximation, e.g., the $\topk$, as a function of $\k$. We also report an example of the power decay model with decay exponent $p-0.5=0.2$. We can see the best $\k$ approximation error decays faster than the benchmark. Hence, the vector can be considered compressible, according to \eqref{eq:bestkapprox}. We also validate this behavior for various models and datasets, not reported here for conciseness. Therefore, the gradient vectors can be considered compressible in the sense of \cref{def:compressable}. \begin{figure*}[!t] \centering \begin{subfigure}[h]{0.48\textwidth} \includegraphics[width=1\textwidth]{Figures/modeling/GradientcompressibilitySigmaSA.pdf} \caption{\footnotesize The sorted magnitude of the gradients vs their indexes, and the fitted curve via power law in \eqref{eq:powerlaw}.} \label{Fig:compressgradients} \end{subfigure} \hfill \begin{subfigure}[h]{0.48\textwidth} \includegraphics[width=1\textwidth]{Figures/modeling/GradientcompressibilitySigmainputSA.pdf} \caption{\footnotesize The approximation error for the $\topk$ vs the number of non-zero elements, $\k$.} \label{fig:bestkapprox} \end{subfigure} \caption{The compressibility property of the gradients} \label{fig:compressability} \end{figure*} \subsection{Validation of Gradient Distributions} \label{apdx:graddist} \label{sec:empvalid} In this part, we discuss the distribution of the gradients generated while training several neural networks. Since the gradient vectors are compressible, \acp{SPD} can approximate the gradient distribution. This feature, i.e. Property \ref{property:sparspromotdist}, is numerically validated as follows. First, we consider the gradients from training ResNet-20 with \ac{SGD}. The generated gradient vector at iteration $i$ is compressed using $\topk$ with $\delta=0.001$, and the distributed \ac{SGD} is employed as described in \cref{apdx:dsgd}. We investigate two cases: i) memoryless compression, where the \acf{EC} mechanism is not applied, as shown in \cref{fig:fitteddistributions} ii) memory-based compression, where an \ac{EC} mechanism is deployed by adding the sparsification error from the previous iteration to the gradient vector before the $\topk$ compression, i.e., $\mathbf \gr_{\{i\}}=\mathbf \gr_{\{i\}}+ \left[\mathbf \gr_{\{i-1\}} -\Tk[\mathbf \gr_{\{i-1\}}]\right]$. For both cases, we collect the uncompressed gradients from the master worker, as different workers have similar gradients in distributions. The gradient vectors are then normalized by their $\ell_{2}$ norm to easily visualize and compare the evolution of the gradient distributions over various iterations. Then, the collected gradients are fitted by the three proposed \acp{SPD}, i.e., double exponential, double gamma, and double \ac{GPD} distributions. The parameters of the distribution are estimated as indicated in Corollary~\ref{corollary:Laplacethreshold}, Corollary~\ref{corollary:gammathreshold}, and Corollary~\ref{corollary:Gparetothreshold}. For the training with \ac{EC} mechanism in \cref{fig:fitteddistributionsEC}, it becomes more challenging to fit the gradients, especially for larger iterations, as can be seen in \cref{fig:PDF2EC}. This behavior arises due to the addition of the sparsification error from the previous stage to the gradients, as the resulting distribution of the gradients changes. More precisely, the gradient distribution is the convolution between the \ac{PDF} of the error from the last stage and \ac{PDF} of the current gradient vector before the \ac{EC}. Therefore, the distribution of the gradients significantly changes over the iterations. Therefore, single-stage fitting suffers more when \ac{EC} mechanism is used, particularly for fitting the tail, as in \cref{fig:CDF1EC} and \cref{fig:CDF2EC}. We also validate that the gradients generated from the other networks in \cref{tab:models} can be well approximated with \acp{r.v.} distributed according to one of the \acp{SPD}, which are not reported here for conciseness. In general, there is a slight variation in the fitting accuracy among the three \acp{SPD} for various networks and datasets, due to the nature of the gradients. For example, the double exponential distribution can not capture well the gradients with an empirical distribution that decays fast. In contrast, the double gamma and double \ac{GPD} distributions have an additional shape parameter that can approximate the behavior of sparser vectors with $\alpha<1$. Nevertheless, the double-exponential behaves well when the distribution of the absolute of the gradients decays as fast as the exponential distribution. \begin{figure*}[!ht] \centering \begin{subfigure}[h]{0.48\textwidth} \centering \includegraphics[width=1\textwidth]{Figures/modeling/FittingPDFECSA.pdf} \caption{} \label{fig:PDF1EC} \end{subfigure} \hfill \begin{subfigure}[h]{0.48\textwidth} \centering \includegraphics[width=1\textwidth]{Figures/modeling/FittingCDFECSA.pdf} \caption{} \label{fig:CDF1EC} \end{subfigure} \\ \begin{subfigure}[h]{0.48\textwidth} \centering \includegraphics[width=1\textwidth]{Figures/modeling/FittingPDF2ECSA.pdf} \caption{} \label{fig:PDF2EC} \end{subfigure} \hfill \begin{subfigure}[h]{0.48\textwidth} \centering \includegraphics[width=1\textwidth]{Figures/modeling/FittingCDF2ECSA.pdf} \caption{} \label{fig:CDF2EC} \end{subfigure} \caption{Gradient fitting using the three \acp{SPD} for the gradient vector along with the empirical distribution generated from training ResNet-20 on CIFAR10 dataset using $\topk$ compressor \textbf{with \ac{EC} mechanism}, for the $100^{\text{th}}$ [(a) PDF, (b) CDF] and $10000^{\text{th}}$ [(c) PDF, (d) CDF] iterations.} \label{fig:fitteddistributionsEC} \end{figure*} \subsection{Analysis of Double Gamma and Double Generalized Pareto Distributed Gradients} \label{apdx:threshodcalculation} \subsubsection{Threshold Calculation for Gamma Distributed Gradients} \label{apdx:gammathreshold} \begin{customcorollary}{1.2}\label{corollary:gammathreshold} Considering that the gradients that can be well-fitted by double gamma distribution with shape parameter $\alpha \leq 1$, the absolute of the gradient is gamma distributed \cite{Bond:01}, i.e., $ |\G| {\sim} \operatorname{gamma}(\alpha,\b)$. The sparsifying threshold can be derived as \begin{align} \label{eq.gammathreshold} \eta(\delta) &=\hat{\b} \, P^{-1}(\hat{\alpha},1-\delta) \\ & \simeq -\hat{\b}\, {\left[\log(\delta)+ \log({\Gamma(\hat{\alpha}))}\right]}, \end{align} where ${ P(\alpha,x)\triangleq \frac{1}{\Gamma(\alpha)}\int _{0}^{x}t^{\alpha-1}\,\mathrm {e} ^{-t}\,{\rm {d}}t\,}$ is the regularized lower incomplete gamma function, and $P^{-1}(\alpha,p)\triangleq \{x: P(\alpha,x)=p\}$ is the inverse of the regularized lower incomplete gamma function \cite{AbraSte:65}, \begin{align} \ahat &\triangleq \frac{3 - s+ \sqrt{(s - 3)^2 + 24\,s}}{12\,s}, &\bhat \triangleq \frac{\widehat{\mu}}{\ahat}, \end{align} with $s \triangleq \log(\widehat{\mu})-\widehat{\mu}_{\log}$, $\widehat{\mu}_{\log} \triangleq \frac{1}{\d}\sum_{i=1}^{\d} \log\left({|\gr|}_{i}\right)$ , and $\widehat{\mu}$ and $\widehat{\sigma}^2$ are the sample mean and variance for the absolute gradient vector $|\mathbf \gr|$, respectively. \end{customcorollary} \begin{proof} The gradients are modeled by double gamma distribution % with $\alpha \leq 1$, with \ac{PDF} defined in \cite{Bond:01} as \begin{align} f_{\G}(\gr;\alpha,\b) & =\frac{1}{2} \frac{|\gr|^{\alpha-1} e^{-|\gr|/\beta}}{\b^\alpha\Gamma(\alpha)}, &&\text{for}\quad -\infty < \gr < \infty. \end{align} Hence, the absolute of the gradient is modeled as gamma distribution with \ac{PDF} \begin{align} \label{eq:gammapdf} f_{|\G|}(\gr;\alpha,\b) & = \frac{\gr^{\alpha-1} e^{-\gr/\beta}}{\b^\alpha\Gamma(\alpha)}, &&\text{for}\quad 0 \leq \gr < \infty. \end{align} The \ac{CDF} of the gamma \ac{r.v.} which can be written from \eqref{eq:gammapdf} as \begin{align} F_{|\G|}(\gr;\alpha,\b) &=\int_{0}^{\gr}\frac{t^{\alpha-1} e^{-t/\beta}}{\b^\alpha\Gamma(\alpha)} \mathrm{d}t\\ &=\int_{0}^{\gr/\b}\frac{ z^{\alpha-1} e^{-z}}{\Gamma(\alpha)} \mathrm{d}z \triangleq P(\alpha, \gr/\b ) , \end{align} where $P(\alpha, x)$ is the regularized lower incomplete gamma function \cite{AbraSte:65}. The threshold in \eqref{eq.gammathreshold} follows from the inverse of the \ac{CDF} at $1-\delta$, as illustrated in \eqref{eq:thresholdabs} and by substituting the parameters of the gamma distribution with their estimates ${\ahat}$ and $\bhat$. Nevertheless, calculating the threshold involves the inverse of incomplete gamma function which can be computationally heavy. In the following we provide a closed-form approximation for the threshold. First, we would like to find a closed-form approximation for the inverse lower incomplete function at $1-\delta$, i.e., $x \triangleq P^{-1}(\ahat,1-\delta)$. Starting from the bound on $P(\ahat, x)$ in \cite{Olver:97}, we have \begin{align} P(\ahat, x)&= 1-\delta \geq 1- \frac{x^{\ahat-1}\,e^{-x}}{\Gamma(\ahat)} \qquad \text{for\,\,}\ahat\leq 1, x> 0. \end{align} After some manipulations, we get \begin{align} x &\leq -\log\left(\delta \Gamma(\ahat)\right) -(1-\ahat)\log(x), &&\text{for\,\,}{\ahat}\leq 1, x> 0,\\ x&\leq - \log\left(\delta\right) -\log\left(\Gamma({\ahat})\right), &&\text{for\,\,}{\ahat}\leq 1, x\geq 1.\label{eq:inversep} \end{align} Finally, by substituting $P^{-1}(\ahat,1-\delta)$ with \eqref{eq:inversep} in \eqref{eq.gammathreshold}, we get \begin{align} \eta &\leq - {\bhat}\, \left[\log(\delta)+ \log({\Gamma({\ahat}))}\right], &&\text{for\,\,}{\ahat}\leq 1, x\geq 1 \end{align} with equality if $\ahat=1$. For $0<x<1$ or $\ahat>1$, the bound does not hold, however, it provides a good approximation for the threshold when $\ahat$ is close to one. For estimating the pentameters, let us start by % the \ac{PDF} of the gamma distribution, defined as \begin{align} f_{|\G|}(\gr;\a,\b) & = \frac{\gr^{\a-1} e^{-\gr/\b}}{\b^\a\Gamma(\a)} \quad \text{ for } x > 0, \quad \a,\b>0 \end{align} where $\alpha$ and $\beta$ are the shape and scale parameters, respectively. The shape parameter can be estimated from the absolute gradient vector $|\mathbf \gr|$ using \ac{MLE} as the solution of \begin{align}\label{eq.alphaMLE} \Psi(\alpha)-\log(\alpha)+\log(\widehat{\mu})- \widehat{\mu}_{\log}=0, \end{align} where $\Psi(x)\triangleq \frac{\d \Gamma(x)}{\d x}$ is the digamma function, $\widehat{\mu} \triangleq \frac{1}{\d}\sum_{i=1}^{\d} {|\gr|}_{i}$ is the sample mean, and $\widehat{\mu}_{\log} \triangleq \frac{1}{\d}\sum_{i=1}^{\d} \log\left({|\gr|}_{i}\right)$ \cite{Papoulis:02}. On the other hand, the scale parameter can be estimated as $\bhat=\widehat{\mu}/\alpha$. Nevertheless, the shape parameter estimation in \eqref{eq.alphaMLE} involves solving a non-linear equation with a special function. Hence, it increases the computational complexity for the scheme, leading to higher time overhead for the compression. In order to reduce the complexity, we propose to employ a simpler closed-form approximation for the shape parameter, i.e., \begin{align} &\ahat = \frac{3 - s+ \sqrt{(s - 3)^2 + 24\,s}}{12\,s}, & \bhat=\frac{\widehat{\mu}}{\alpha}, \end{align} where $s \triangleq \log(\widehat{\mu})-\widehat{\mu}_{\log}$ \cite{Minka:02}. \end{proof} \subsubsection{Threshold Calculation for Generalized Pareto Distributed Gradients} \label{apdx:GPDthreshold} \begin{customcorollary}{1.3}\label{corollary:Gparetothreshold} For gradients distributed as double generalized Pareto \acp{r.v.}, the absolute of the gradients is modeled as \ac{GPD} distributed \acp{r.v.}, $|\G|\sim \operatorname{GP}(\a,\b,a)$, where $0 < \a < 1/2$, $\b$, $a=0$ are the shape, scale, and location parameters. The sparsifying threshold that achieves a compression ratio $\delta$ is \begin{align}\label{eq:thresholddgpd} \eta &= \frac{\bhat}{\ahat} \left(e^{-\ahat \log(\delta)}-1 \right), \end{align} where \begin{align}\label{eq:gpdestimatesappdx} \ahat&\triangleq \frac{1}{2}\, \left(1-\frac{\hat{\mu}^2}{\hat{\sigma}^2} \right), &&\bhat \triangleq \frac{1}{2}\, \hat{\mu} \left(\frac{\hat{\mu}^2}{\hat{\sigma}^2} +1 \right), \end{align} with $\hat{\mu}$ and $\hat{\sigma}^2$ being the sample mean and variance for the absolute gradient vector, $|\mathbf \gr|$, respectively. \end{customcorollary} \begin{proof} the gradients can be well-fitted by double \ac{GPD} distribution with \ac{PDF}, indicated in \cite{ArmDunLee:13} as\footnote{The double \ac{GPD} distribution resembles the Laplacian distribution for $\a \rightarrow 0$. Similarly, the \ac{GPD} becomes exponential distribution for $\a=0$.} \begin{multline} f_{\G}(\gr)= \frac{1}{2 \b} \left(1+ \a \,\frac{|\gr|}{\b} \right)^{-(\frac{1}{\a}+1)}, \\ 0 < \a < \frac{1}{2},\,- \infty <\gr < \infty \end{multline} Hence, the absolute of the gradients can be modeled as \ac{GPD} distributed \acp{r.v.} with \ac{PDF} \begin{align} f_{|\G|}(\gr)= \frac{1}{\b} \left(1+ \a\, \frac{\gr}{\b} \right)^{-(1/\a+1)}, && 0 < \a \leq \frac{1}{2},\, \gr \geq 0 \end{align} and the corresponding \ac{CDF} can be written from \cite{HosWal:87} as \begin{align}\label{eq:cdfgpd} F_{|\G|}(\gr)= 1- \left(1+ \a\, \frac{\gr}{\b} \right)^{-1/\a}. \end{align} The inverse \ac{CDF} can be written from \eqref{eq:cdfgpd} as \begin{align} \label{eq:inversecdfgpd} F^{-1}_{|\G|}(p)= \frac{\b}{\a} \left(e^{-\a \log(1-p)}-1 \right). \end{align} From \eqref{eq:thresholdabs} and \eqref{eq:inversecdfgpd} and by substituting the distribution parameters with their estimates, provided below, the threshold in \eqref{eq:thresholddgpd} follows. Unfortunately, there are no closed-form \ac{ML} estimators for the parameters of \ac{GPD} distributions. Hence, the \ac{ML} estimates have to be computed through complex numerical optimization. Alternately, the parameters can be estimated in closed-from through the \ac{MM} method under some conditions on the shape parameter \cite{HosWal:87}. More precisely, for the considered range of the shape parameter, i.e., $-0.5<\alpha<0.5$, the first and second moments exit and they can be written as \begin{align} \label{eq:meanssgdp} \mu &= \frac{\b}{1+\a}, && S^2= \frac{\b^2}{(1+\a)^2 (1+2\,\a)}, \end{align} where $\mu$ and $S^2$ are the mean and mean square, respectively. Therefore, from \eqref{eq:meanssgdp} through the \ac{MM} method, the parameters can e estimated as \begin{align}\label{eq:gpdestimatesapndx} \ahat&= \frac{1}{2}\, \left[1-\frac{\hat{\mu}^2}{\hat{\sigma}^2} \right], &&\bhat= \frac{1}{2}\, \hat{\mu} \left[\frac{\hat{\mu}^2}{\hat{\sigma}^2} +1 \right], \end{align} where $\hat{\mu}$ and $\hat{\sigma}^2$ are the sample mean and variance for the absolute gradient vector, $|\mathbf \gr|$, respectively. \end{proof} \subsubsection{Proof of \cref{lemma:PoT}} \label{apdx:prooflemmaPoT} The distribution of the \ac{PoT} absolute gradients for the $m$th stage can be approximated as \ac{GPD} distribution from Theorem 4.1 in \cite{Coles:01} with \ac{CDF} \begin{align} F_{{{|\bar{\G}_{m}|}}}(\gr)=& 1- \left(1+ \a_{m}\, \frac{\gr-\eta_{m-1}}{\b_{m}} \right)^{-1/\a_{m}}, \nonumber \\ &\gr \geq \eta_{m-1}, -1/2< \a_{m} < 1/2, \label{eq:mscdfgpd} \end{align} where the first and second moments of the \ac{r.v.} ${{{|\bar{\G}_{m}|}}}$ are finite and the \ac{PDF} is smooth for $-1/2 <\a_{m} < 1/2$ \cite{HosWal:87}. The inverse \ac{CDF} can be written from \eqref{eq:mscdfgpd} as \begin{align} \label{eq:msinversecdfgpd} F^{-1}_{{{|\bar{\G}_{m}|}}}(p)= \frac{\b_{m}}{\a_{m}} \left(e^{-\a_{m} \log(1-p)}-1 \right) +\eta_{m-1}. \end{align} The threshold in \eqref{eq:msthresholddgpd} follows from \eqref{eq:thresholdabs} and \eqref{eq:msinversecdfgpd} and by substituting the distribution parameters with their estimates derived as from \eqref{eq:meanssgdp} \begin{align}\label{eq:gpdestimatesapndxPoT} \ahat_{m}&= \frac{1}{2}\, \left[1-\frac{\bar{\mu}^2}{\bar{\sigma}^2} \right], &&\bhat_{m}= \frac{1}{2}\, \bar{\mu} \left[\frac{\bar{\mu}^2}{\bar{\sigma}^2} +1 \right], \end{align} where the sample mean $\bar{\mu}$ and the variance $\bar{\sigma}^2$ are computed from absolute of the \ac{PoT} gradients shifted by the threshold, i.e., $|\tilde{\mathbf \gr}_{m}|-\eta_{m-1}$. \subsubsection{Proof of Corollary~\ref{corollary:expPoT}} \label{apdx:proofcorollaryexpPoT} The \ac{CCDF} of the exceedance \ac{r.v.} can be written as \begin{align} &\mathbb{P}\left\{{{{|\bar{\G}_{m}|}}} \geq \gr\right\} \nonumber \\ &=\mathbb{P}\Big\{{|\G_{m}|} \geq \gr \,\Big|\, |\G_{m}|> \eta_{m-1} \Big\},\forall \gr \geq \eta_{m-1} \\ &=\mathbb{P}\Big\{{|\G_{m}|} \geq \eta_{m-1}\!+\!y \,\Big|\, |\G_{m}|\!>\! \eta_{m-1} \Big\}, \forall y \!\triangleq\! \gr\!-\!\eta_{m-1} \!\geq\! 0 \\ &= \frac{1- F_{|G_{m}|}(\eta_{m-1}+y)}{1- F_{|G_{m}|}(\eta_{m-1})} =e^{- \frac{\gr-\eta_{m-1}}{\b_{m}}}. \label{eq:msexp} \end{align} From \eqref{eq:msexp}, the \ac{PoT} gradients is distributed as exponential \ac{r.v.} with location $\eta_{m-1}$. Hence, the \ac{r.v.} ${|\bar{\G}_{m}|}-\eta_{m-1}$ is exponentially distributed. Consequently, the threshold can be calculated from \eqref{eq:LaplaceThreshold} after proper shifting. \section{Proof of Lemma~\ref{lemma:convanalysis} for the Convergence Analysis} \label{appnd:conanalyproof} Let $\bar{f}: \mathbb{R}^\d \rightarrow \mathbb{R}$ be a function that is required to be minimized. This function can be a convex or non-convex $L_{0}$-smooth function \cite{ef-sgd}. Also, the expected value of the stochastic gradient vector equals the oracle gradient, and the second moment of the stochastic gradient is upper bounded by some constant, i.e., $\sigma_{0}^2$. Let us start first with the assumption that the genie-aided distribution for the amplitude of the stochastic gradient, $F_{\G}(g)$, is known.\footnote{The genie-aided distribution assumption is relaxed later.} Hence, the threshold $\eta$ is calculated as in \cref{eq:threshold} for some compression ratio $\delta$.\footnote{Note, the genie-aided distribution of gradients' amplitude, $F_{\G}(g)$, is not similar to the oracle gradient, $\nabla \bar{f}(\mathbf x_{\{i\}})$.} After applying the threshold based compression operator $\mathbb{C}_{\eta}$, the number of non-zero gradients in the sparsified vector is a \ac{r.v.} distributed as binomial distribution with number of trials $\d$ and success probability $\delta$. Hence, the expected number of non-zero gradients matches that of $\topk$, i.e., $\mathbb{E}\left\{ \normo{ \mathbb{C}_{\eta}\{\mathbf{\mathbf \gr}\}} \right\}=\delta \d= \k.$ Therefore, the threshold based compression technique, designed to keep the $\k$ largest gradients in magnitude, has the same $\k$-contraction property of $\topk$ on average \begin{align}\label{eq:contraction} \mathbb{E}\left\{{\left\lVert{\mathbb{C}_{{\eta}}\{\mathbf{\mathbf \gr}\}}- \mathbf{\mathbf \gr}\right\rVert}_{2}^{2}\right\} &= \mathbb{E}\left\{{\left\lVert{\mathbb{T}_{{\k}}\{\mathbf{\mathbf \gr}\}}- \mathbf{\mathbf \gr}\right\rVert}_{2}^{2}\right\} \nonumber \\ &\leq (1-\delta)\,\mathbb{E}\left\{{\mathbf{\|\mathbf \gr\|}}_{2}^{2} \right\}. \end{align} From \eqref{eq:contraction} and Theorem II in \cite{ef-sgd} for compressed \ac{SGD} adopted with the \ac{EC} technique, we have \begin{align} \min_{i\in [I]} \mathbb{E}\{\norm{\nabla \bar{f}\left(\mathbf x_{\{i\}}\right)}_{2}^{2} \} &\leq \frac{4(\bar{f}(\mathbf x_{0})-\bar{f}^{*})+L_{0}\, \sigma_{0}^2}{2\sqrt{I+1}}\nonumber \\ &+\frac{4 \, L_{0}^2\, \sigma_{0}^{2} \,(1-\delta)}{\delta^2\,(I+1)}, \end{align} where $\bar{f}^{*}$ is a minimum value for the function $\bar{f}$, and $I$ is the number of iterations over which the function is minimized. Therefore, the rate of convergence of the threshold based scheme with genie-aided distribution coincides with that of $\topk$ designed with the same compression ratio $\delta$ in remark 4 in \cite{ef-sgd}. In other words, after $I>\mathbb{O}\left(1/\delta^2\right)$ iteration, the thresholding scheme coincides with the \ac{SGD} convergence rate. Now let us move to a more realistic case where we do not know the genie-aided distribution of the gradients. Indeed, there can be a discrepancy between the original and estimated distribution $\hat{F}_{\G}(g)$, which weakens the assumption of \ac{SPD}. In this case, the threshold is estimated as $\hat{\eta}=\hat{F}^{-1}_{\G}(1-\delta)$, leading to an error in the resulting average compression ratio, $\hat{\delta} \triangleq \kh/\d$, quantified as \begin{align} \hat{\delta}-\delta &\triangleq \frac{1}{\d} \left(\mathbb{E}\left\{\normo{\mathbb{C}_{\hat{\eta}}\{\mathbf{\mathbf \gr}\}}\right\} -\mathbb{E}\left\{\normo{\mathbb{C}_{\eta}\{\mathbf{\mathbf \gr}\}}\right\} \right)\\ &= F_{\G}\left(\eta(\delta) \right)- {F}_{\G}(\hat{\eta}(\delta)). \end{align} In \cref{algo:algo1}, the number of thresholding stages are adapted such that \begin{align} &\left\lvert \hat{\delta}-\delta \right\rvert \leq \epsilon \, \delta, && 0 \leq \epsilon < 1\,. \end{align} Hence, the actual compression ratio can be bounded as \begin{equation} \delta\, \left(1- \epsilon\right) \leq \hat{\delta} \leq \delta\, \left(1+ \epsilon \right). \end{equation} For $\hat{\delta} \geq \delta$, the proposed scheme convergences with a rate faster than that of $\topk$, as the total number of iterations required to reach the \ac{SGD}'s rate is $I>\mathbb{O}\left(\frac{1}{\delta^2 \, (1+\epsilon)^2}\right)$, which is smaller than that required for $\topk$. The reason is that the proposed scheme, in this case, has a better contraction property on average. On the other hand, for $\hat{\delta} \leq \delta$, after number of iterations $I>\mathbb{O}\left(\frac{1}{\delta^2 \, (1-\epsilon)^2}\right)$, the proposed scheme coincides with the SGD convergence rate, requiring more iterations than $\topk$. In Lemma~\ref{lemma:convanalysis}, we report only the worst-case convergence rate, requiring more iterations. \section{Experimental Specifications} \label{apdx:clusters} \paragraph{\textbf{Cluster 1 - Dedicated Environment}} \begin{itemize}[noitemsep,topsep=0pt,leftmargin=10pt] \item 8 nodes per experiment \item GPUs per node: 1 $\times$ Tesla V100-SXM2 with 16GB of GPU memory \item GPU inter-connection: traversing PCIe and the SMP interconnect between NUMA nodes \item CPU: Intel(R) Xeon(R) Silver 4112 CPU @ 2.60GHz, 16 cores \item System memory: 512 GiB \item Ethernet: 25 Gbps SFI/SFP+ - Ethernet \item Network Topology: Star network topology \item OS: Ubuntu 18.04 + Linux Kernel v4.15 \item Environment: Horovod's Docker container on DockerHub \item Software: PyTorch 1.1.0, Horovod 0.16, and OpenMPI v4.0 \end{itemize} \paragraph{\textbf{Cluster 2 - Shared Environment}} \begin{itemize}[noitemsep,topsep=0pt,leftmargin=10pt] \item 1 node per experiment \item GPUs per node: 8 $\times$ Tesla V100-SXM2 with 32GB of GPU memory \item GPU inter-connection: traversing PCIe and the SMP interconnect between NUMA nodes \item CPU: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz, 16 cores \item System memory: 512 GiB \item Ethernet: 100 Gbps - InfiniBand \item Network Topology: Fat Tree topology \item OS: CentOS 7.7 + Linux Kernel v3.10 \item Environment: Miniconda 4.3 \item Software: PyTorch 1.3, Horovod 0.18, and OpenMPI 4.0 \end{itemize} \subsection{Further Experimental and Evaluation Details} \label{apdx:expdetails} Here, we present more details on our experimental settings, benchmarks, hyper-parameters, etc. First, we describe the three benchmarks used in this work which covers three commonly used ML tasks in practice. The benchmarks also cover both \ac{RNN} and \ac{CNN} architectures. \textbf{Image Classification: } We studied ResNet20 anf VGG16 on Cifar10, and ResNet-50 and VGG19 on ImageNet. Cifar10 consists of 50,000 training images and 10,000 validation images in 10 classes~\cite{cifar10}, while ImageNet contains over 1 million training images and 50,000 validation images in 1000 classes~\cite{imagenet}. We train CIFAR10 models with vanilia SGD (without Momentum) and ImageNet models with Nesterov-momentum SGD following the training schedule in~\cite{Gross2016}. The warm-up period is set to 5 epochs for all schemes. \textbf{Language Modeling: } The Penn Treebank corpus (PTB) dataset consists of 923,000 training, 73,000 validation and 82,000 test words~\cite{ptb}. We adopt the 2-layer LSTM language model architecture with 1500 hidden units per layer~\cite{lstm}. We use Nesterov-momentum SGD with gradient clipping, while learning rate decays when no improvement has been made in validation loss. The warm-up period is 5 epoch out of the 30 epochs. \textbf{Speech Recognition: } The AN4 dataset contains 948 training and 130 test utterances~\cite{an4}. We use DeepSpeech architecture without n-gram language model~\cite{deepspeech}, which is a multi-layer RNN following a stack of convolution layers. We train a 5-layer LSTM of 800 hidden units per layer with Nesterov momentum SGD and gradient clipping, while learning rate anneals every epoch. The warm-up period is 5 epochs out of 150 epochs. \paragraph{\textbf{Further Evaluation Details:}} For training speed-up, we evaluate the speed-up based on the time-to-accuracy of the method that is when it can achieve (or exceed) a certain training accuracy or test perplexity. The target test accuracy is 75\% for ResNet20 and 80\% for VGG16 on CIFAR-10. The target test perplexity is 105 for PTB benchmark. The target \ac{CER} is 55 for AN4. We compare no compression, existing and proposed sparsification methods with ratios of ($k=0.1,0.01,0.001$) using 8 nodes. \section{Extra experiments, and results} \label{apdx:moreexp} \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.8\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/legend.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/resnet20_0.1_8_1_compratio.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.1.} \label{fig:resnet20-avgcomp0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/resnet20_0.01_8_1_compratio.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.01.} \label{fig:resnet20-avgcomp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/resnet20_0.001_8_1_compratio.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.001.} \label{fig:resnet20-avgcomp0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/vgg16_0.1_8_1_compratio.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.1.} \label{fig:vgg16-avgcomp0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\textwidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/vgg16_0.01_8_1_compratio.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.01.} \label{fig:vgg16-avgcomp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/compress_ec_sgd/vgg16_0.001_8_1_compratio.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.001.} \label{fig:vgg16-avgcomp0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/compress_ec_nesterov/resnet50_0.1_8_1_compratio.pdf} \caption{ResNet50 on ImageNet - Ratio 0.1.} \label{fig:resnet50-avgcomp0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/compress_ec_nesterov/resnet50_0.01_8_1_compratio.pdf} \caption{ResNet50 on ImageNet - Ratio 0.01.} \label{fig:resnet50-avgcomp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/compress_ec_nesterov/resnet50_0.001_8_1_compratio.pdf} \caption{ResNet50 on ImageNet - Ratio 0.001.} \label{fig:resnet50-avgcomp0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/compress_ec_nesterov/lstm_0.1_8_1_compratio.pdf} \caption{PTB on LSTM - Ratio 0.1.} \label{fig:ptb-avgcomp0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/compress_ec_nesterov/lstm_0.01_8_1_compratio.pdf} \caption{LSTM on PTB - Ratio 0.01.} \label{fig:ptb-avgcomp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/compress_ec_nesterov/lstm_0.001_8_1_compratio.pdf} \caption{LSTM on PTB - Ratio 0.001.} \label{fig:ptb-avgcomp0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/compress_ec_nesterov/lstm_0.1_8_1_compratio.pdf} \caption{LSTM on AN4 - Ratio 0.1.} \label{fig:an4-avgcomp0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/compress_ec_nesterov/lstm_0.01_8_1_compratio.pdf} \caption{LSTM on AN4 - Ratio 0.01.} \label{fig:an4-avgcomp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/compress_ec_nesterov/lstm_0.001_8_1_compratio.pdf} \caption{LSTM on AN4 - Ratio 0.001.} \label{fig:an4-avgcomp0.001-8} \end{subfigure} \caption{Smoothed compression ratio for all benchmarks at different ratios.} \label{fig:compratio} \end{figure*} In the following, we present more results including more detailed metrics and experimental scenarios. In the following, we refer to 1-stage double Gamma followed by $M-1$ stage Generalized Pareto and multi-stage Generalized Pareto, and multi-stage double exponential are refereed to \scheme\!-GP, \scheme\!-P, and \scheme\!-E respectively. \subsection{Further Metrics and Experimental Scenarios} \paragraph{\textbf{Quality of Estimation Methods:}} \cref{fig:compratio} shows the smoothed (or running average) of the compression ratio for all benchmarks and the three ratios ($0.1$, $0.01$, and $0.001$) used in the experiments. The results signify the quality of the obtained threshold throughout the training for DGC, RedSync, GaussianKSGD and the three \scheme\! methods. The results, in general, reinforce our previous observation that \scheme\! schemes perform quite well and achieve nearly the same threshold quality as of the sampling methods of DGC. \scheme\! schemes are also significantly better than the other estimation methods (i.e., RedSync and GaussianKSGD). Moreover, other estimation methods (e.g., RedSync and GaussianKSGD) generally results in high oscillations and their over/under-estimation can be up to $\approx\!\pm60\times$ the target. We also observe, in few cases, that the multi-stage \scheme\!-GP (i.e., Gamma-Pareto) results in slight over-estimation which is at most 2 times the target ratio. This could be attributed to the inaccuracies from the first-stage threshold estimation that uses closed-form moment-matching approximation used for fitting the double-Gamma distribution. To support the observation presented in \cref{fig:an4-speedup-8-all} in which \scheme\!, unlike all other methods, achieved the target Character Error Rate (CER) because it over-estimated the threshold at early stage of training. In particular \cref{fig:an4-avgcomp0.001-8} shows that \scheme\!-E algorithm, at the beginning of training, uses the single-stage fitting for the target ratio which leads to threshold over-estimation for few iterations until it settles at the final number of stages. So, thanks to the multi-stage adaptation technique, it can reach to the appropriate number of stages which allows it stay at the target compression ratio. The initial extra-volume at the beginning of training, at this extreme sparsification ratio for this benchmark, leads to significant improvement in accuracy gains and explains the results presented in \cref{fig:an4-speedup-8}. \paragraph{\textbf{Training Loss: }} we present the training loss vs run time plots for all benchmarks using all ratios. \cref{fig:accuracy} shows the convergence of all schemes over time and the results in general confirm the speed-up results presented in \cref{sec:experiments} and \cref{apdx:expalldist}. The results highlight the gains in terms of time and accuracy from employing compression over the no-compression. They also signify that most compressors (except for GaussianKSGD and RedSync) achieve same accuracy as $\topk$ but at lower overhead than $\topk$. \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.8\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/legend.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/resnet20_0.1_8_1_loss.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.1.} \label{fig:resnet20-acc0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/resnet20_0.01_8_1_loss.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.01.} \label{fig:resnet20-acc0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/resnet20_0.001_8_1_loss.pdf} \caption{ResNet20 on CIFAR10 - Ratio 0.001.} \label{fig:resnet20-acc0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/vgg16_0.1_8_1_loss.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.1.} \label{fig:vgg16-acc0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\textwidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/vgg16_0.01_8_1_loss.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.01.} \label{fig:vgg16-acc0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/vgg16_0.001_8_1_loss.pdf} \caption{VGG16 on CIFAR10 - Ratio 0.001.} \label{fig:vgg16-acc0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/accuracy_ec_nesterov/resnet50_0.1_8_1_loss.pdf} \caption{ResNet50 on ImageNet - Ratio 0.1.} \label{fig:resnet50-acc0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/accuracy_ec_nesterov/resnet50_0.01_8_1_loss.pdf} \caption{ResNet50 on ImageNet - Ratio 0.01.} \label{fig:resnet50-acc0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/accuracy_ec_nesterov/resnet50_0.001_8_1_loss.pdf} \caption{ResNet50 on ImageNet - Ratio 0.001.} \label{fig:resnet50-acc0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/accuracy_ec_nesterov/lstm_0.1_8_1_loss.pdf} \caption{PTB on LSTM - Ratio 0.1.} \label{fig:ptb-acc0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/accuracy_ec_nesterov/lstm_0.01_8_1_loss.pdf} \caption{LSTM on PTB - Ratio 0.01.} \label{fig:ptb-acc0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_ptb/accuracy_ec_nesterov/lstm_0.001_8_1_loss.pdf} \caption{LSTM on PTB - Ratio 0.001.} \label{fig:ptb-acc0.001-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/accuracy_ec_nesterov/lstm_0.1_8_1_loss.pdf} \caption{LSTM on AN4 - Ratio 0.1.} \label{fig:an4-acc0.1-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/accuracy_ec_nesterov/lstm_0.01_8_1_loss.pdf} \caption{LSTM on AN4 - Ratio 0.01.} \label{fig:an4-accp0.01-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.3\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_an4/accuracy_ec_nesterov/lstm_0.001_8_1_loss.pdf} \caption{LSTM on AN4 - Ratio 0.001.} \label{fig:an4-acc0.001-8} \end{subfigure} \caption{Smoothed training loss vs wall run-time for all benchmarks at different target sparsity ratios.} \label{fig:accuracy} \end{figure*} \paragraph{\textbf{VGG19 on ImageNet: }} We also present similar metrics (i.e., smoothed compression ration and training loss vs runtime) for the VGG19 benchmarks in \cref{fig:vgg19-more}. The results in \cref{fig:vgg19-avgcomp0.001-8} show that all \scheme\! methods estimate the threshold with high accuracy. They also show that GaussianKSGD miserably fails to estimate the threshold and RedSync experiences significantly high variability. \cref{fig:vgg19-avgcomp0.001-8} also shows that \scheme\! methods have noticeably higher speed-ups over all other schemes (esp., $\topk$, RedSync and GaussianKSGD). \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.8\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/accuracy_ec_sgd/legend.pdf} \end{subfigure} \begin{subfigure}[ht]{0.44\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/compress_ec_nesterov/vgg19_0.001_8_1_compestratio.pdf} \caption{Smoothed compression ratio.} \label{fig:vgg19-avgcomp0.001-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.44\linewidth} \includegraphics[ width=\textwidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/accuracy_ec_nesterov/vgg19_0.001_8_1_loss.pdf} \caption{Training loss.} \label{fig:vgg19-acc0.001-8} \end{subfigure} \caption{Performance metrics of training VGG19 on ImageNet using ratio of $0.001$.} \label{fig:vgg19-more} \end{figure*} \paragraph{\textbf{CPU as the Compression Device:}} In this experiment, instead of using the GPU as the compression target, we use the CPU device as the compression device and report on the average training throughput. Due to the slow speed of the experiment, we only run the experiment for two epochs as we are interested in the throughput numbers. We compare the performance of $\topk$, DGC and \scheme\!-E. \cref{fig:compcpu} presents the average training throughput (the first 10 iterations are excluded in the average). First, we note that the throughput on CPU is relatively high for $\topk$ method which consistently performed the worst when GPU is the target compression device. In contrast, \ac{DGC} is now performing the worst among all methods due to the slow performance of random sampling on CPU device. On the other hand, \scheme\! consistently performs the best even on CPU as the target device. These results are not surprising as it closely matches the observations from the micro-benchmark results (\cref{apdx:moremicrobench}). \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.5\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend2.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_compcpu_cifar10/allratio_ec_sgd/resnet20_8_1_throughput1.pdf} \caption{ResNet20 on CIFAR10 (TPut).} \label{fig:resnet20-cputput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_compcpu_cifar10/allratio_ec_sgd/vgg16_8_1_throughput1.pdf} \caption{VGG16 on CIFAR10 (TPut).} \label{fig:vgg16-cputput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_compcpu_ptb/allratio_ec_sgd/lstm_8_1_throughput1.pdf} \caption{LSTM on PTB (TPut).} \label{fig:lstm-cputput-8} \end{subfigure} \caption{Throughput when CPU is the compression device: (a) ResNet20 , (b) VGG16 and (c) LSTM-PTB.} \label{fig:compcpu} \end{figure*} \paragraph{\textbf{Full ImageNet training on Multi-GPU node:}} In \cref{fig:ibex}, we present the results for training both ResNet50 and VGG19 on ImageNet fully for 90 epochs using a single node equipped with 8 Nvidia-V100 32GB GPUs in the shared cluster presented in \cref{apdx:clusters}. Each allocation of a node in the shared cluster is limited to 24 hours of run-time. We use compression ratio of $0.1$ for ResNet50 and $0.01$ for VGG19. Figure \ref{fig:resnet50-acc-ibex} and \ref{fig:vgg19-acc-ibex} show the top-1 test accuracy at the end of the training either due to finishing the 90 epochs or allocation is revoked. They shows that that compression can achieve the same or higher accuracy than no-compression baseline. Also, in case of VGG19, compression speed-ups allow the training to converge faster and hence the higher accuracy. \cref{fig:resnet50-tput-ibex} and \cref{fig:vgg19-tput-ibex} show the training throughput and that all methods supersedes $\topk$. Moreover, \scheme\! schemes achieve higher throughput than \ac{DGC} and $\topk$. Finally, \cref{fig:resnet50-comp-ibex} and \cref{fig:vgg19-comp-ibex} show the estimation quality and they show that the quality is very bad for Gaussian-based fitting methods while \scheme\! schemes can achieve same estimation quality as of the sampling of DGC. \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/resnet50_8_1_speedup.pdf} \caption{Training accuracy.} \label{fig:resnet50-acc-ibex} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/resnet50_8_1_throughput.pdf} \caption{Training Throughput.} \label{fig:resnet50-tput-ibex} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/resnet50_8_1_compestratio.pdf} \caption{Estimation Quality.} \label{fig:resnet50-comp-ibex} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/vgg19_8_1_speedup.pdf} \caption{Training accuracy.} \label{fig:vgg19-acc-ibex} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/vgg19_8_1_throughput.pdf} \caption{Training Throughput.} \label{fig:vgg19-tput-ibex} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/ibex_imagenet/allratio_ec_nesterov/vgg19_8_1_compestratio.pdf} \caption{Estimation Quality.} \label{fig:vgg19-comp-ibex} \end{subfigure} \caption{Training Performance of ImageNet on ResNet50 [(a), (b), (c)] and VGG19 [(d), (e), (f)] using the multi-GPU node.} \label{fig:ibex} \end{figure*} \subsection{Compression Complexity of DNN models} \label{apdx:moremicrobench} \textbf{Compression Overhead of Real Models:} In \cref{fig:microbench-models-speedup} and \cref{fig:microbench-models-time}, we present the compression speed-up over $\topk$ and the latency overhead for some models including ResNet20, VGG16, ResNet50 and RNN-LSTM used in training CIFAR10, ImageNet and PTB datasets, respectively. The results confirms the results, presented earlier, for VGG16, where Threshold-based methods including \scheme\! outperforms $\topk$ and \ac{DGC} both on GPU and CPU as target compression device over all models in comparison. The results also show that \ac{DGC} outperforms $\topk$ on the GPU device while $\topk$ outperforms \ac{DGC} on the CPU device. Overall, for flexibility reasons and the compatibility with various devices, both $\topk$ and DGC are not preferable. \begin{figure*}[!ht] \centering \begin{subfigure}[ht]{0.5\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend2.pdf} \end{subfigure} \\ \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet20_cuda_compression_microbench_speedup.pdf} \caption{ResNet20 on GPU} \label{fig:resnet20-cuda-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/vgg16_cuda_compression_microbench_speedup.pdf} \caption{VGG16 on GPU} \label{fig:vgg16-cuda-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet50_cuda_compression_microbench_speedup.pdf} \caption{ResNet50 on GPU} \label{fig:resnet50-cuda-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/lstm_cuda_compression_microbench_speedup.pdf} \caption{LSTM on GPU} \label{fig:lstm-cuda-speedupall} \end{subfigure} \\ \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet20_cpu_compression_microbench_speedup.pdf} \caption{ResNet20 on CPU} \label{fig:resnet20-cpu-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/vgg16_cpu_compression_microbench_speedup.pdf} \caption{VGG16 on CPU} \label{fig:vgg16-cpu-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet50_cpu_compression_microbench_speedup.pdf} \caption{ResNet50 on CPU} \label{fig:resnet50-cpu-speedupall} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/lstm_cpu_compression_microbench_speedup.pdf} \caption{LSTM on CPU} \label{fig:lstm-cpu-speedupall} \end{subfigure} \caption{Compression speed-up over $\topk$ of compressing gradient vector of different models using various compressors and ratios on GPU (a,b,c,d) and CPU (e,f,g,h).} \label{fig:microbench-models-speedup} \end{figure*} \begin{figure*}[!t] \centering \begin{subfigure}[ht]{0.37\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend2.pdf} \end{subfigure} \\ \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet20_cuda_compression_microbench_time.pdf} \caption{ResNet20 on GPU} \label{fig:resnet20-cuda-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/vgg16_cuda_compression_microbench_time.pdf} \caption{VGG16 on GPU} \label{fig:vgg16-cuda-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet50_cuda_compression_microbench_time.pdf} \caption{ResNet50 on GPU} \label{fig:resnet50-cuda-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/lstm_cuda_compression_microbench_time.pdf} \caption{LSTM on GPU} \label{fig:lstm-cuda-timeall} \end{subfigure} \\ \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet20_cpu_compression_microbench_time.pdf} \caption{ResNet20 on CPU} \label{fig:resnet20-cpu-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/vgg16_cpu_compression_microbench_time.pdf} \caption{VGG16 on CPU} \label{fig:vgg16-cpu-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/resnet50_cpu_compression_microbench_time.pdf} \caption{ResNet50 on CPU} \label{fig:resnet50-cpu-timeall} \end{subfigure} \hfill \begin{subfigure}{0.235\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/models/allratio/lstm_cpu_compression_microbench_time.pdf} \caption{LSTM on CPU} \label{fig:lstm-cpu-timeall} \end{subfigure} \caption{Compression latency of different models using various compressors and ratios on GPU (a,b,c,d) and CPU (e,f,g,h).} \label{fig:microbench-models-time} \end{figure*} \subsection{Compression Complexity using Synthetic Gradients Vectors of Different Sizes} Here, we run the micro-benchmark using synthetic gradient vectors initialized based on input size of (0.26, 2.6, 26, 260) Million elements which is equivalent to $\approx\!(1, 11, 114, 1140)$ MBytes of gradient data sent in each iteration, respectively. We aim to measure the performance of each compressor in terms of the speed-up over $\topk$ and latency for wide range of gradient sizes. The results match the former observations on DNN models of different sizes. In particular, \cref{fig:synth-microbench-speedup} shows the speed-up over $\topk$ on GPU and CPU for each size of the synthetic gradient vectors. We again can observe that on GPU, all methods are faster than $\topk$ and all threshold estimation methods achieve higher speed-ups over DGC and nearly same speed-ups among each other which is attributed to the slow performance of $\topk$ (or sorting) operations on GPU. On the CPU, in contrary, we observe that DGC is the slowest method and $\topk$ excels over it which is attributed to slow performance of random sampling on CPU. Threshold estimation methods maintains same speed-ups on both GPU and CPU (but with relatively different compression times on CPU and GPU). \begin{figure*}[!t] \centering \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144_cuda_compression_microbench_speedup.pdf} \caption{0.26 Mil Elem Tensor on GPU} \label{fig:randn0.26-cuda-speedup} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_2621440_cuda_compression_microbench_speedup.pdf} \caption{2.6 Mil Elem Tensor on GPU} \label{fig:rand2.6-cuda-speedup} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_26214400_cuda_compression_microbench_speedup.pdf} \caption{26 Mil Elem Tensor on GPU} \label{fig:rand26M-cuda-speedup} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144000_cuda_compression_microbench_speedup.pdf} \caption{260 Mil Elem Tensor on GPU} \label{fig:rand260M-cuda-speedup} \end{subfigure} \\ \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144_cpu_compression_microbench_speedup.pdf} \caption{0.26 Mil Elem Tensor on CPU} \label{fig:randn0.26-cpu-speedup} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_2621440_cpu_compression_microbench_speedup.pdf} \caption{2.6 Mil Elem Tensor on CPU} \label{fig:rand2.6-cpu-speedup} \end{subfigure} \hfill \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_26214400_cpu_compression_microbench_speedup.pdf} \caption{26 Mil Elem Tensor on CPU} \label{fig:rand26M-cpu-speedup} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144000_cpu_compression_microbench_speedup.pdf} \caption{260 Mil Elem Tensor on CPU} \label{fig:rand260M-cpu-speedup} \end{subfigure} \caption{Compression speedups over $\topk$ of synthetic tensors using various compressors and ratios on GPU (a,b,c,d) and CPU (e,f,g,h).} \label{fig:synth-microbench-speedup} \end{figure*} \begin{figure*}[!t] \centering \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144_cuda_compression_microbench_speedup.pdf} \caption{0.26 Mil Elem Tensor on GPU} \label{fig:randn0.26-cuda-time} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_2621440_cuda_compression_microbench_time.pdf} \caption{2.6 Mil Elem Tensor on GPU} \label{fig:rand2.6-cuda-time} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_26214400_cuda_compression_microbench_time.pdf} \caption{26 Mil Elem Tensor on GPU} \label{fig:rand26M-cuda-time} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144000_cuda_compression_microbench_time.pdf} \caption{260 Mil Elem Tensor on GPU} \label{fig:rand260M-cuda-time} \end{subfigure} \\ \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144_cpu_compression_microbench_time.pdf} \caption{0.26 Mil Elem Tensor on CPU} \label{fig:randn0.26-cpu-time} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_2621440_cpu_compression_microbench_time.pdf} \caption{2.6 Mil Elem Tensor on CPU} \label{fig:rand2.6-cpu-time} \end{subfigure} \hfill \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_26214400_cpu_compression_microbench_time.pdf} \caption{26 Mil Elem Tensor on CPU} \label{fig:rand26M-cpu-time} \end{subfigure} \hfill \begin{subfigure}{0.24\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/all-microbench/randnormal/allratio/randn_262144000_cpu_compression_microbench_time.pdf} \caption{260 Mil Elem Tensor on CPU} \label{fig:rand260M-cpu-time} \end{subfigure} \caption{Compression latency of synthetic tensors using various compressors and ratios on GPU (a,b,c,d) and CPU (e,f,g,h).} \label{fig:microbench-synth-time} \end{figure*} \section{Results of all SIDs} \label{apdx:expalldist} Here, in \cref{fig:all}, we include the results for the other two \ac{SPD}s discussed in \cref{apdx:GPDthreshold}, i.e., double Gamma and Generalized Pareto. Note that the two multi-stage \ac{SPD} added here are the 1-stage double Gamma followed by $M-1$ stage of Generalized Pareto and multi-stage Generalized Pareto which are refereed to as \scheme\!-GP and \scheme\!-P, respectively. The results and observations are, in general, match the ones we made earlier in \cref{sec:experiments} for \scheme\!-E. However, we observe that, in some cases, \scheme\!-E achieves slightly better speed-ups compared to \scheme\!-GP and \scheme\!-P. This is because of better and slightly lower overhead estimation of the exponential-based threshold which requires only the calculation of the mean of the gradient vector (\cref{algo:algo1}). Specifically, in these cases, \scheme\!-GP which achieves on average the target compression ratio but it tends to have slightly higher variance in terms of the estimation quality (e.g., \cref{fig:ptb-comp-8-all} and \cref{fig:ptb-comp-8-all}). Hence, while variance might be a problem, if it is within the pre-defined tolerance range from the target ratio ($\epsilon_L$,$\epsilon_H$), the impact on the performance would be negligible. \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.75\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend3.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_speedup.pdf} \caption{LSTM-PTB (Speed-up).} \label{fig:ptb-speedup-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_throughput.pdf} \caption{LSTM-PTB (Throughput).} \label{fig:ptb-throughput-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_compestratio.pdf} \caption{LSTM-PTB (Est. Quality).} \label{fig:ptb-comp-8-all} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_speedup.pdf} \caption{LSTM-AN4 (Speed-up).} \label{fig:an4-speedup-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_throughput.pdf} \caption{LSTM-AN4 (Throughput).} \label{fig:an4-throughput-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_compestratio.pdf} \caption{LSTM-AN4 (Est. Quality).} \label{fig:an4-comp-8-all} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/allratio_ec_sgd/resnet20_8_1_speedup.pdf} \caption{ResNet20-CIFAR10 (Speedup).} \label{fig:resnet20-speedup-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/allratio_ec_sgd/resnet20_8_1_compestratio.pdf} \caption{ResNet20-CIFAR10 (Est. Quality).} \label{fig:resnet20-good-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_cifar10/allratio_ec_sgd/vgg16_8_1_speedup.pdf} \caption{VGG16-CIFAR10 (Speedup).} \label{fig:vgg16-speedup-8-all} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_speedup.pdf} \caption{ResNet50-ImageNet (Accuracy).} \label{fig:resnet50-speedup-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_throughput.pdf} \caption{ResNet50-ImageNet (Throughput).} \label{fig:resnet50-tput-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_compestratio.pdf} \caption{ResNet50-ImageNet (Est. Quality).} \label{fig:resnet50-comp-8-all} \end{subfigure} \\ \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_speedup.pdf} \caption{VGG19 on ImageNet (Accuracy).} \label{fig:vgg19-speedup-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_throughput.pdf} \caption{VGG19 on ImageNet (Throughput).} \label{fig:vgg19-tput-8-all} \end{subfigure} \hfill \begin{subfigure}[ht]{0.32\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/all-endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_compestratio.pdf} \caption{VGG19 on ImageNet (Est. Quality).} \label{fig:vgg19-comp-8-all} \end{subfigure} \caption{Performance of using 8 nodes for training LSTM on PTB [(a),(b),(c)], a LSTM on AN4 [(d),(e),(f)], CIFAR10 on ResNet20 [(g),(h)] and VGG16 [(i)], and training ResNet50 [(j), (k), (l)] and VGG19 [(m), (n), (o)] on ImageNet dataset.} \label{fig:all} \end{figure*} \section{Introduction} As \acp{DNN} continue to become larger and more sophisticated, and ever increasing amounts of training data are used~\cite{megatronml,gpt3}, scaling the training process to run efficiently on a distributed cluster is currently a crucial objective that is attracting a multitude of efforts~\cite{nvidiasummit,PipeDream,bytescheduler,DML_survey}. Modern deep learning toolkits~\cite{pytorch,tensorflow,horovod} are capable of distributed data-parallel training whereby the model is replicated and training data are partitioned among workers. Training \acp{DNN} in such settings in practice relies on synchronous distributed \ac{SGD} or similar optimizers (refer to \cref{apdx:dsgd} for more details). Let $\N$ be the number of workers, and $\xi[i] \in \R^{\d}$ denote the model parameters with $\d$ dimensions at iteration $i$. At the end of the $i^{\text{th}}$ iteration, each worker runs the back-propagation algorithm to produce a local stochastic gradient, $\gin[i] \in \R^d$, at worker $n$. Then, each worker updates its model parameters using the final gradient aggregated across all workers as $\xi[i+1] = \xi[i] - \lambda \frac{1}{N} \sum_{n=1}^{N} \gin[i]$, where $\lambda$ is the learning rate. Gradient aggregation involves communication, which is either between the workers in a peer-to-peer fashion (typically through collective communication primitives like all-reduce) or via a parameter server architecture. Due to the synchronous nature of the optimizer, workers cannot proceed with the $(i+1)^{\text{th}}$ iteration until the aggregated gradient is available. Therefore, in distributed training workloads, communication is commonly one of the predominant bottlenecks~\cite{lin2018deep,Fang2019}. Addressing this communication bottleneck is the focus of this paper, where we pursue the path of improving training by reducing the communicated data volume via lossy gradient compression. Compression entails two main challenges: $(i)$ it can negatively affect the training accuracy (because the greater the compression is, the larger the error in the aggregated gradient), and $(ii)$ it introduces extra computation latency (due to the compression operation itself). While the former can be mitigated by applying compression to a smaller extent or using compression with error-feedback~\cite{lin2018deep,ef-sgd}, the latter, if gone unchecked, can actually slow down training compared to not compressing. Surprisingly, much of the prior works in this area ignored the computation overheads of compression. Given that modern clusters for deep learning workloads nowadays use high speed, low latency network fabrics (e.g., $100$~Gbps Ethernet or InfiniBand), we argue that the efficiency of compression needs to be explicitly accounted for. Motivated by the above observations, we propose \scheme\! compression.\footnote{Our code release is available at \url{https://github.com/sands-lab/SIDCo}.} \scheme\! builds on a sound theory of signal compressibility and enjoys linear complexity in the size of model parameters. Importantly, this affords for an implementation that parallelizes very efficiently using modern GPUs and other hardware targets. Thus, our work addresses a previously-overlooked yet crucial technical obstacle to using compression in practice, especially for communication-bounded training of large models. \subsection{Related Work} Efficient communication in distributed training has received extensive attention~\cite{PipeDream,Wangni18,grace}. One approach tries to maximize the overlap between the computation and communication to hide the communication overhead~\cite{PipeDream,bytescheduler}. However, the gains from these methods are bounded by the length of computation and are modest when the training is dominantly communication-bound. Alternatively, many approaches adopt methods that reduce the amount of communication, volume~\cite{lin2018deep} or frequency~\cite{patel2019communication}. In this work, we focus on gradient compression as it shows considerable benefits. \smartparagraph{Gradient Compression} is a well-known volume reduction technique~\cite{lin2018deep,Fang2019,Wangni18,Ahmed-AAAI-2020,grace}. Each worker applies a compression operator $\C$ to $\gin[i]$ to produce a compressed gradient vector that is transmitted for aggregation. Generally, the compressor $\C$ involves quantization and/or sparsification operations. \smartparagraph{Gradient Quantization} represents gradients with fewer bits for each gradient element. Under some conditions, quantization is known to achieve the same convergence as no compression~\cite{wu_memqsgd,pmlr-v119-fu20c}. \ac{EC} is used to attain convergence when gradients are quantized using fewer bits~\cite{wu_memqsgd,ef-sgd,grace}. Given the standard 32-bit float number representation, the volume reduction of quantization is limited by $32\times$, i.e., $1$~bit out of $32$~bits, which may not be sufficient for large models or slow networks and it requires expensive encoding to pack the quantized bits~\cite{Ahmed-CONEXT-2020}. \smartparagraph{Gradient Sparsification} selects a subset of gradient elements. It is generally more flexible than quantization, as it can reduce volume by up to $d\times$ and adapts easily to network conditions~\cite{Ahmed-DC2-INFOCOM21}. It was shown that in some cases, up to 99.9\% of the non-significant gradient elements can be dropped with limited impact on convergence~\cite{lin2018deep,aji_sparse,shi2019understanding}. Gradient sparsification using $\topk$ -- selecting the top $k$ elements by their magnitude -- is known to yield better convergence compared to other compression schemes, e.g., Random-$k$~\cite{lin2018deep,Alistarh18_sparse}. However, $\topk$ or its variants are notorious for being computationally inefficient~\cite{grace}. $\topk$ selection does not perform well on accelerators such as GPUs~\cite{ShanbhagMIT2018}. For instance, in many cases, it is reported that $\topk$ imposes high overheads and worsens the run-time of distributed training~\cite{shi2019understanding, grace}. \subsection{Background and Motivation} The main challenge with using gradient compression (e.g., sparsification or quantization) is the computational overhead it introduces in the training. If the overhead is greater than the reduction gains in communication time, the overall iteration time increases. Hence, to be useful, a robust compressor should have a low overhead~\cite{Fang2019,shi2019understanding}. As presented earlier, one of the dominantly robust compressors is $\topk$, however it is also computationally heavy~\cite{Fang2019,shi2019understanding,shi2021towards}. Because of this, $\topk$, for large models, results in either an increased training time or unsatisfactory performance benefits. Numerous efforts based on algorithmic or heuristic approaches have been dedicated to enhancing the performance of $\topk$~\cite{lin2018deep,shi2019understanding,ShanbhagMIT2018,SketchML}. Existing fast implementations of $\topk$ are compute-intensive (e.g., on CPU, the computational complexity is $\mathcal{O}(\d~\log_{2}\k)$)~\cite{ShanbhagMIT2018}. Recently, more optimized implementations for multi-core hardware are proposed, which greatly depend on the data distribution and work best for a small number of $k$~\cite{ShanbhagMIT2018}. For instance, the Radix select algorithm used in PyTorch is $\mathcal{O}\left(\lceil{b}/{r}\rceil~\d\right)$ where $b$ is the number of bits in the data values and $r$ is the radix size~\cite{pytorch}. Yet, using gradient vectors of various sizes, $\topk$ is the slowest on GPUs and not the fastest on CPUs, as shown later in our micro-benchmark and in \cref{apdx:moremicrobench}. In the context of gradient compression, {\em Threshold-based methods}, aiming to overcome the overhead of $\topk$, select in linear time gradient elements larger in magnitude than a threshold $\eta$. \ac{DGC}~\cite{lin2018deep} proposes to sample a random subset of the gradients (e.g., 1\%), apply $\topk$ on the sampled sub-population to find a threshold which is then used to obtain the actual $\topk$ elements hierarchically.\footnote{Aside from the expensive random sampling, in worst case, DGC invokes $\topk$ twice, once on the subset to obtain a threshold and another to obtain $\k$ elements if the number of elements obtained via the threshold are more than the target $\k$.} Even though DGC leads to improved performance over $\topk$, its computational complexity is still in the same order of $\topk$'s complexity. {\em Threshold estimation methods} on the other hand, are shown to attain linear time complexity~\cite{aji_sparse,Alistarh18_sparse,Dryden2016CommunicationQF}. Recently, several works have leveraged certain features of the gradients to enhance the training process~\cite{Narang2018,pmlr-v119-fu20c}. Some approaches leveraged these features and devised heuristics to estimate and find the $\topk$ threshold which exhibits lower compression overhead compared to $\topk$ and \ac{DGC}~\cite{Fang2019,shi2019understanding}. In particular, RedSync~\cite{Fang2019} finds the threshold by moving the ratio between the maximum and mean values of the gradient; GaussianKSGD~\cite{shi2019understanding} adjusts an initial threshold obtained from fitting a Gaussian distribution through an iterative heuristic to obtain the $\topk$ elements. Nevertheless, the threshold estimation of these methods is of bad quality and the number of selected elements, $\hat{k}$, varies significantly from the target $\k$ (\cref{sec:experiments}). In this work, we propose a statistical approach to estimate an accurate threshold for selecting the $\topk$ elements with minimal overhead. In particular, we exploit the compressibility of the gradients and opt for \acp{SPD} that fit the gradients well. For instance, double exponential (i.e., Laplace), double gamma and double generalized Pareto distributions have been used as sparsity-promoting priors in Bayesian estimation framework \cite{MonMouUma:18,ArmDunLee:13,BabMolKat:10}. Our study of the gradients supports the assumption for their compressibility and suitability for modeling the gradients as \acp{r.v.} distributed according to one of the \acp{SPD}. \begin{figure*}[t] \centering \begin{subfigure}[ht]{0.4\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend2.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/microbench/models/allratio/vgg16_cuda_compression_microbench_speedup.pdf} \caption{Compression with GPU} \label{fig:vgg16-cuda-speedup} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/microbench/models/allratio/vgg16_cpu_compression_microbench_speedup.pdf} \caption{Compression with CPU} \label{fig:vgg16-cpu-speedup} \end{subfigure} \hfill \begin{subfigure}[ht]{0.31\linewidth} \includegraphics[width=1\textwidth]{Figures/experiments/endtoend/mcnodes_cifar10/allratio_ec_sgd/vgg16_8_1_compestratio.pdf} \caption{Quality of Threshold Estimation} \label{fig:vgg16-good-8} \end{subfigure} \caption{The compression speedups over $\topk$ using different compression ratios $(0.1, 0.01, 0.001)$, on (a) GPU and (b) CPU. (c) shows the average estimation quality of the target $k$. The experiments are performed for VGG16 (\cref{tab:models}) with the setup detailed in \S\ref{sec:algorithm}.} \end{figure*} \begin{table*}[!t] \caption{Summary of the benchmarks used in this work.} \centering \scalebox{0.61}{ \begin{tabular}{ccccrrrrccc } \toprule Task & \makecell{Neural \\ Network} & Model & Dataset & \makecell{Training \\ Parameters} & \makecell{Per-Worker \\Batch Size} & \makecell{Learning \\ Rate} & Epochs & \makecell{Comm \\ Overhead} & \makecell{Local \\ Optimizer} & \makecell{Quality \\ metric} \\\midrule \multirow{3}{*}{\makecell{Language \\ Modeling}} & \multirow{3}{*}{RNN} & \multirow{3}{*}{\makecell{ LSTM\\ \citep{lstm} \\ 2 layers-1500 hidden units}} & \multirow{3}{*}{\makecell{PTB\\ \citep{ptb}}} & 66,034,000 & 20 & 22 & 30 & \textbf{\textcolor{red}{94\%}} & NesterovMom-SGD & Test Perplexity \\\\\\ \midrule \multirow{2}{*}{\makecell{Speech \\ Recognition}} & \multirow{2}{*}{RNN} & \multirow{2}{*}{\makecell{ LSTM \\ 5 layers-1024 hidden units}} & \multirow{2}{*}{\makecell{AN4\\ \citep{an4}} }& 43,476,256 & 20 & 0.004 & 150 & \textbf{\textcolor{magenta}{80\%}} & NesterovMom-SGD & WER \& CER \\\\ \midrule \multirow{6}{*}{\makecell{Image \\ Classification}} & \multirow{6}{*}{CNN} & {\makecell{ResNet-20\\ \citep{resnet152}}} \ & {\makecell{CIFAR-10\\ \citep{cifar10}}} & 269,467 & 512 & 0.1 & 140 & \textcolor{black}{10\%} & SGD &\multirow{6}{*}{Top-1 Accuracy} \\ & & {\makecell{VGG16\\ \citep{vgg}}} & CIFAR-10 & 14,982,987 & 512 & 0.1 & 140 & \textbf{\textcolor{orange}{60\%}} & SGD & \\ & & ResNet-50 & {\makecell{ImageNet\\ \citep{imagenet}}} & 25,559,081 & 160 & 0.2 & 90 & \textbf{\textcolor{orange}{72\%}} & NesterovMom-SGD & \\ & & VGG19 & ImageNet & 143,671,337 & 160 & 0.05 & 90 & \textbf{\textcolor{magenta}{83\%}} & NesterovMom-SGD & \\ \bottomrule \end{tabular} \label{tab:models} } \end{table*} To motivate our approach, we conduct initial micro-benchmark experiments to evaluate the compression overhead of sparsification techniques: $\topk$, \ac{DGC} (which uses random sub-sample for threshold calculation), RedSync and GaussianKSGD (which heuristically estimate the threshold), and one of our proposed \scheme\! schemes that estimates the threshold via a multi-stage fitting (\cref{sec:analysis}). We use both CPU and GPU to benchmark the performance (see~\cref{apdx:clusters}). We show the speed-up of different compressors normalized by the compression speed of $\topk$. We observe from the results that methods based on random sub-sampling (e.g., \ac{DGC}) excel on GPU (\cref{fig:vgg16-cuda-speedup}), but they imposes huge overhead on CPU and leads to DGC performing significantly worse than $\topk$ on CPU (\cref{fig:vgg16-cpu-speedup}). In contrast, methods that are based on estimating a threshold over which only $\k$ elements are selected, impose consistently lower compression overhead compared to $\topk$ and \ac{DGC} on both GPU and CPU. This shows that, except for linear time threshold-based methods, a variable compression overhead is to be expected on different architectures (e.g., CPU, GPU, TPU, FPGA or AI chips).\footnote{We note that many efforts are dedicated to the optimization and enabling of fast training on low-cost devices such as CPUs instead of opting for expensive hardware accelerations~\cite{Vincent2011,Das2018,Beidi2020}.} \cref{fig:vgg16-good-8} shows the normalized actual compression ratio (i.e., $\hat{k}/k$) for various schemes; note that the heuristic approaches fail to obtain the right threshold, leading to unpredictable behavior. \subsection{Contributions} In this work, we make the following contributions: \begin{itemize}[noitemsep,topsep=0pt,leftmargin=10pt] \item We exploit the sparsity of the gradients via modeling the gradients as \acp{r.v.} with \acp{SPD} and propose a multi-stage fitting technique based on \ac{PoT} which works well with aggressive sparsification ratios and adapts to the distribution changes of the gradient \item We design \scheme\!, a threshold sparsification method with closed-form expressions for three \acp{SPD} to keep the compression overhead as low as possible. \item We show that \scheme\! consistently outperforms existing approaches via an extensive set of numerical and experimental evaluation on different benchmarks \end{itemize} \section{Proposed Gradient Model and Threshold Estimation} \label{sec:analysis} We discuss the compressibility of the gradients and their statistical distribution. Then, two threshold-based schemes are proposed that leverage the compressibility of the gradients. \subsection{Gradient Compressibility} Signals, including gradient vectors of \acp{DNN}, can be efficiently compressed by exploiting some of their inherent features. Among these features, sparsity and compressibility are the key drivers for performing signal compression~\cite{Mal:08,Elzanaty19,ElzGioChi:19}. We start by a precise definition of compressible signals. \begin{definition}[Compressible Signals \cite{BarDavDua:11}]\label{def:compressable} The signal ${\mathbf \gr}\in \mathbb{R}^\d$ is compressible if the magnitudes of its sorted coefficients obey the following power law decay: \begin{equation}\label{eq:powerlaw} \tilde{g}_{j} \leq c_{1}\, j^{-\p} \quad \forall j \in \{1,2,\cdots, \d \}, \end{equation} where $\tilde{\mathbf \gr}$ is the sorted vector of ${|\mathbf \gr|}$ in descending order, ${\tilde{g}}_{j}$ is the $j^{\text{th}}$ element of $\tilde{\mathbf \gr}$, and $\p>1/2$ is the decay exponent, for some constant $c_{1}$. For compressible signals with power law decay, the sparsification error, $\sigmak(\mathbf \gr)$, is bounded as \begin{equation}\label{eq:bestkapprox} \sigmak (\mathbf \gr) \triangleq \norm{\mathbf \gr -\Tk[\mathbf \gr]}_{2} \leq c_{2}\, \k^{1/2-\p}, \end{equation} where $\norm{\mathbf x}_{q}={\left(\sum_{j=1}^{\d} \mathbf x_{j}^{q}\right)}^{1/q} $ is the $\ell_{q}$ norm of $\mathbf x$, $\Tk[\cdot]$ is the $\topk$ sparsification operator that keeps only the largest $\k$ elements in magnitude and set the others to zero, $\Tk[\mathbf \gr]$ is a \k-sparse vector with only $\k$ non-zero elements, and $c_2$ is a constant. The signal is more compressible if it decays faster, i.e., $\p$ is higher~\cite{Devore:98}. \end{definition} % \begin{property}[Gradients Compressibility]\label{property:gradientcompressible} The gradients generated during the training of most \acp{DNN} are compressible in the sense of \cref{def:compressable}. \end{property} % \begin{myreasoning} From \cref{def:compressable}, it can be verified whether the gradient vectors are compressible. In \cref{apdx:statmethods}, we empirically validate that the gradients generated during the training of widely-adopted \acp{DNN} respect the condition for compressibility stated in \eqref{eq:powerlaw} and \eqref{eq:bestkapprox}. \end{myreasoning} \subsection{Gradient Modeling} The target now is to find the distribution of the gradient vector, while accounting for the compressibility of the gradients. The selection of sparsity-promoting priors that are able to efficiently capture the statistical characteristics of the gradients with low computational complexity is a challenging task. However, we notice an essential property for the distribution of the gradients that permits high compression gains with low computational overhead. \begin{property \label{property:sparspromotdist} Gradients generated from many \acp{DNN} during the training can be modeled as \acp{r.v.} distributed according to some \aclp{SPD}, i.e., double exponential, double gamma and double \ac{GPD} distributions. More precisely, we have \begin{equation} \G \mathrel{\dot\sim} \operatorname{Distribution}(\boldsymbol{\Theta}), \end{equation} where $\operatorname{Distribution}({\cdot})$ is one of the three \acp{SPD} with parameters indicated by the vector $\boldsymbol{\Theta}$ that generally depends on the iteration and worker's data. Also, the \ac{PDF} of $\G$, $f_{\G}(\gr;\boldsymbol{\Theta})$, is symmetric around zero. \vspace{-0.1cm} \end{property} \vspace{-0.1cm} \begin{myreasoning} Since the gradients are compressible as indicated by Property~\ref{def:compressable}, they can be well approximated by sparse vectors with minimal error, as implied from \eqref{eq:bestkapprox}. Hence, the distributions that promote sparsity are good candidates for fitting (or modeling) the gradient vectors.\footnote{For threshold estimation, we are interested in the distribution of the amplitude of a random element in the gradient vector.} For instance, the double exponential, double gamma, double \ac{GPD}, and Bernoulli-Gaussian distributions have been used as priors that promote sparsity in \cite{MonMouUma:18,ArmDunLee:13,BabMolKat:10,Elzanaty19}. Property~\ref{property:sparspromotdist} is empirically verified for several \ac{DNN} architectures and datasets in \cref{sec:algorithm} and \cref{apdx:graddist}. \label{apdx:graddist} \label{sec:empvalid} \begin{figure*}[!ht] \centering \begin{subfigure}[h]{0.46\textwidth} \includegraphics[width=1\textwidth]{Figures/modeling/FittingPDFSA.pdf} \caption{} \label{fig:PDF1} \end{subfigure} \hfill \begin{subfigure}[h]{0.46\textwidth} \includegraphics[width=1\textwidth]{Figures/modeling/FittingCDFSA.pdf} \caption{} \label{fig:CDF1} \end{subfigure} \\ \begin{subfigure}[h]{0.46\textwidth} \includegraphics[width=1\textwidth]{Figures/modeling/FittingPDF2SA.pdf} \caption{} \label{fig:PDF2} \end{subfigure} \hfill \begin{subfigure}[h]{0.46\textwidth} \centering \includegraphics[width=1\textwidth]{Figures/modeling/FittingCDF2SA.pdf} \caption{} \label{fig:CDF2} \end{subfigure} \caption{Fitting using the three \acp{SPD} for the gradient vector along with the empirical distribution generated from training ResNet-20 on CIFAR10 using $\topk$ compressor without \ac{EC} mechanism, for the $100^{\text{th}}$ [(a) PDF, (b) CDF] and $10000^{\text{th}}$ [(c) PDF, (d) CDF] iterations.} \label{fig:fitteddistributions} \end{figure*}% For instance, we consider the gradients resulting from the training of ResNet-20 with \ac{SGD}. The collected gradients are fitted by the three proposed \acp{SPD}, i.e., double exponential, double gamma, and double \ac{GPD} distributions. In \cref{fig:fitteddistributions}, the empirical distribution of the gradients and their absolutes, without \ac{EC} mechanism, are shown along with the distributions of the three fitted \ac{SPD} for two iterations. We can notice in \cref{fig:PDF1} that the three proposed distributions can capture the main statistical characteristic of the gradients, as their \acp{PDF} approximate the empirical distribution for most of the gradient domain. This can be understood because of the compressibility of the gradients illustrated before. The compressibility of \acp{r.v.} distributed according to one of the \acp{SPD} can be attributed to the shape of their \acp{PDF}, where the most probable values are those with small amplitudes. From \cref{fig:PDF1} and \cref{fig:PDF2}, it can be seen that the gradients at iteration $10000$ (\cref{fig:PDF2}) are more sparse than those at iteration $100$ (\cref{fig:PDF1}), where the \ac{PDF} at iteration $10000$ has higher values at smaller gradient values and it has faster tail. Regarding the \ac{CDF} of the absolute value of the gradients in \cref{fig:CDF1} and \cref{fig:CDF2}, we can see that the \acp{SPD} well approximate the empirical \ac{CDF}. However, at the tail of the distribution, they tend to overestimate/underestimate the \ac{CDF} slightly. The reason is that the fitting is biased toward the majority of the data with lower values, as the gradient vector is sparse. \end{myreasoning} \subsection{Single-Stage Threshold Estimator}\label{sec:sst-thresh} We now describe the proposed compression scheme. First, the threshold that yields the target compression ratio, $\delta\triangleq \k/\d$, is derived for each of the three proposed \acp{SPD}. Then, we present a single-stage thresholding scheme for moderate compression ratios. For aggressive compression ratios with $\delta \ll 1$, e.g., $\delta \leq 0.001$, we propose a multi-stage thresholding scheme to accurately estimate the threshold. The sparsification threshold can be computed from the fitted distribution of the gradients as follows: \begin{lemma} For $\G {\sim}\operatorname{Distribution}(\boldsymbol{\Theta})$ with \ac{CDF} $F_{\G}(\gr;\boldsymbol{\Theta})$, the threshold $\eta$ that yields the $\topk$ vector with average target compression ratio $\delta \triangleq \k/\d$ can be derived as \begin{align} \label{eq:thresholdabs} \eta(\delta) &= F_{|\G|}^{-1}(1-\delta;{\widehat{\boldsymbol{\Theta}}}) \\ &=F_{\G}^{-1}\left(1-\frac{\delta}{2};{\widehat{\boldsymbol{\Theta}}}\right), \label{eq:threshold} \end{align} where $\widehat{\boldsymbol{\Theta}}$ is the estimated parameters for the gradient distribution, $F_{|\G|}(g ;\widehat{\boldsymbol{\Theta}})$ is the \ac{CDF} of the absolute gradient , $F_{|\G|}^{-1}(p;\widehat{\boldsymbol{\Theta}}) \triangleq \left\{g\in \mathbb{R}^{+} : F_{|\G|}(g;\widehat{\boldsymbol{\Theta}})=p \right\} $ is the inverse \ac{CDF} of the absolute gradient at probability $p$, and $F_{\G}^{-1}(p;{\widehat{\boldsymbol{\Theta}}})$ is the inverse \ac{CDF} of the gradient, also known as quantile function or \ac{PPF}. \vspace{-0.2cm} \end{lemma} \begin{proof} From Property~\ref{property:sparspromotdist}, the gradients can be modeled as \acp{r.v.} distributed according to a \ac{SPD} with \ac{CDF} $F_{G}(g)$. Next, we would like to drive a threshold $\eta$ such that on average the absolute values of $\k$ elements out of $\d$ are larger than $\eta$. The problem can be seen as a binomial random process, where the number of trials is $\d$, the success probability is the probability that the absolute of the gradient is larger than $\eta$, i.e., $p \triangleq \mathbb{P}\left\{\left|\G\right| \geq \eta \right\}$, and the average number of successes (exceedances) is $\k$. In this process, the number of exceedances is a binomial \ac{r.v.} with $\d$ number of trials and $p$ probability of success \cite{Papoulis:02}. The mean of the number of exceedances is $\d\,p$. In order to have, on average, $\k$ elements out of $\d$, we should have $\mathbb{P}\left\{\left|\G\right| \geq \eta \right\}=\delta.$ Hence, the threshold $\eta$ is the $100(1-\delta)$ percentile of the distribution of absolute gradients as in \eqref{eq:thresholdabs}. From the symmetry of the gradient distribution around zero, we have $\mathbb{P}\left\{\left|\G\right| \geq \eta \right\}=2\,\mathbb{P}\left\{\G \leq -\eta \right\}$. Therefore, from \eqref{eq:thresholdabs}, we get $ \eta\!=\! - {F}_{\G}^{-1}\left({\delta}/{2};{\widehat{\boldsymbol{\Theta}}}\right)\!=\!F_{\G}^{-1}\left(1\!-\!{\delta}/{2}; {\widehat{\boldsymbol{\Theta}}}\right). $ \vspace{-10pt} \end{proof} In the following, we report the threshold calculation for gradients modeled by double exponential distribution. The corresponding analysis for double gamma and \ac{GPD} is presented in \cref{apdx:threshodcalculation}. \begin{corollary}\label{corollary:Laplacethreshold} For double exponentially distributed gradients with scale parameter $\b$ and location zero (symmetric around zero), i.e., $ \G{\sim} \operatorname{Laplace}(\b)$, the threshold that achieves $\delta$ compression ratio can be computed as \begin{equation}\label{eq:LaplaceThreshold} \eta= \hat{\b} \log\left(\frac{1}{\delta}\right), \qquad \quad \hat{\b}\triangleq \frac{1}{\d}\,\sum_{j=1}^{\d} \left|\gr_{j}\right|, \end{equation} where $\hat{\b}$ is the \ac{MLE} of the scale parameter. \vspace{-0.1cm} \end{corollary} \begin{proof} For $ \G {\sim} \operatorname{Laplace}(\b)$, the gradient absolute is modeled as exponential distribution with scale $\b$, $\left|\G \right| \sim \operatorname{Exp}(\b)$ \cite{EvaHasPea:93}. From the inverse \ac{CDF} of exponential distribution at probability $p$, i.e., {$F_{|\G|}^{-1}=-{\b} \log(1-p)$}, the \ac{MLE} of $\b$ \cite{EvaHasPea:93}, and \eqref{eq:thresholdabs}, the threshold in \eqref{eq:LaplaceThreshold} follows. \end{proof} \vspace{-7pt} \textbf{Gradient compression through thresholding:} After computing the threshold, the compressed gradient vector is found as $\widehat{{\gr}}_{j} =\Ceta\left\{\gr_{j} \right\} \triangleq \gr_{j}\, \mathbb{I}_{\left\{\left|\gr_{i}\right| \geq \eta \right\}},$ for each $j \in \{1,2,\cdots, \d\},$ where the vector $\widehat{\mathbf \gr} \in \mathbb{R}^{\d}$ is the compressed gradient vector, $\mathbb{I}_{\{\text{condition}\}}$ is an indicator function that equals one when the condition is satisfied and zero otherwise. In the following, we denote by $\bar{\mathbf \gr}$ and $\kh$ the vector that contains only the exceedance non-zero gradients and their number, respectively.\footnote{Note that the compressed vector $\widehat{{\gr}}_{j}$ coincides with the $\topk$ sparsified gradient with $\k=\kh$, i.e., $\Ceta\left\{\gr_{j} \right\}=\mathbb{T}_{\kh} \left\{\gr_{j} \right\}$.} \textbf{Possible issues in far tail fitting:} The target compression ratio $\delta$ can be as low as $10^{-4}$. Therefore, in order to accurately estimate the threshold, the fitted distribution should tightly resemble the gradient distribution at the tail. This is quite challenging because the estimation of the parameters tends to account more for the majority of data at the expense of the tail. Hence, the threshold obtained from single-stage fitting is accurate up to some moderate compression ratios. For lower compression ratios, the threshold tends to underestimate/overestimate the target $\delta$. Hence, a more accurate tail fitting method is required to reduce the bias induced by the majority of non-significant gradients, as we show next. \subsection{Multi-Stage Threshold Estimator} \label{sec:mst-thresh} We propose a multi-stage fitting approach to overcome the far tail estimation problem. For convenience, we start with the two-stage approach. First, the gradients are fitted with one of the three \acp{SPD} and compressed using the proposed procedure in \cref{sec:sst-thresh} with a threshold $\eta_{1}$ computed to yield an initial compression ratio $\delta_{1} \triangleq \k_{1}/\d > \delta$. Then, the vector of the exceedance gradients, $\bar{\mathbf \gr}$, is used to fit another distribution, defined precisely below. Then, another threshold $\eta_{2}$ is computed to achieve a compression ratio $\delta_{2} \triangleq \k_{2}/\k_{1}$ with respect to the exceedance gradients. The second compression ratio is chosen such that the overall compression ratio of the original gradient vector is the target ratio $\delta$, i.e., $\delta_{2}={\delta}/{\delta_{1}}$. Then, the estimated threshold from the last stage is applied to compress the original gradient vector. This procedure can be extended to multi-stages such that $\delta=\prod_{m=1}^{M}\delta_{m}$, where $M$ is the number of stages. The remaining question is whether the exceedance (known also as \ac{PoT}) gradients have the same distribution as the original gradients before the compression. The extreme value theory in statistics can provide an answer for this question \cite{KotNad:00,Smith:84,Leadbetter:91,Coles:01}. Let $\kh_{m}$ be the number of exceedance gradients after the $m^{th}$ thresholding stage. Then, if we apply a threshold operator on a sequence of \acp{r.v.}, $|\G_{1}|, |\G_{2}|, \cdots, |\G_{\kh_{m-1}}|$, the distribution of the exceedance \acp{r.v.}, ${|\bar{\G}_{1}|}, {|\bar{\G}_{2}|}, \cdots, {|\bar{\G}_{\kh_{m}}|}$, can be approximated by a \ac{GPD} distribution for large enough threshold and vector dimension, irrespective of the original distribution of the gradients. Next, we exploit the extreme value theory to compute the threshold for the multi-stage approach. \begin{lemma}\label{lemma:PoT} Considering that for the $m^{th}$ thresholding stage with $m\geq 2$, the absolute of the exceedance gradients, $\bar{|\G|_{m}}$, can be modeled as $\operatorname{GP}(\a_{m},\b_{m},a_{m})$, where $-1/2 < \a_{m} < 1/2$, $\b_{m}$, and $a_{m}={\eta}_{m-1}$ are the shape, scale, and location parameters. The threshold that achieves a compression ratio $\delta_{m}$ is obtained as \begin{align}\label{eq:msthresholddgpd} \eta_{m} &= \frac{\bhat_{m}}{\ahat_{m}} \left(e^{-\ahat_{m} \log\left(\delta_{m}\right)}-1 \right) +{\eta}_{m-1}, \\ \ahat_{m} &\triangleq \frac{1}{2}\, \left(1-\frac{\bar{\mu}^2}{\bar{\sigma}^2} \right), \\ \bhat_{m} &\triangleq \frac{1}{2}\, \bar{\mu} \left(\frac{\bar{\mu}^2}{\bar{\sigma}^2} +1 \right), \end{align} where ${\eta}_{m-1}$ is the threshold computed at the previous stage and $\bar{\mu}$ and $\bar{\sigma}^2$ are the sample mean and variance of $|{\bar{\mathbf \gr}}_{m}|-\eta_{m-1}$, respectively. For the proof of \cref{lemma:PoT}, please refer to \cref{apdx:prooflemmaPoT}. \vspace{-0.1cm} \end{lemma} \begin{corollary}\label{corollary:expPoT} If the absolute of the gradients is modeled as exponentially distributed \acp{r.v.}, $\left|\G_{m} \right| \sim \operatorname{Exp}(\b_{m})$, the distribution of the exceedance gradients over the threshold $\eta_{m-1}$, after proper shifting, is still exponentially distributed, i.e., ${|\bar{\G}_{m}|}-\eta_{m-1} \sim \operatorname{Exp}(\b_{m})$. The new stage threshold is \begin{align} \eta_{m}&= {\bhat}_{m} \log\left(\frac{1}{\delta_{m}}\right)+\eta_{m-1},\\ {\bhat}_{m}&\triangleq \frac{1}{\kh_{m-1}}\,\sum_{j=1}^{\kh_{m-1}} \left|\bar{\gr}_{j}\right|-\eta_{m-1}, \end{align} where $\bar{\gr}_{j}$ is the $j^\text{th}$ element of the vector ${\bar{\mathbf \gr}}_{m}$. \textnormal{The proof is provided in \cref{apdx:proofcorollaryexpPoT}.} \end{corollary} In the proposed scheme, we exploit \cref{corollary:expPoT} such that when the absolute of the gradients is fitted by an exponential distribution in the first stage, the latter stages for the exceedance gradients are also fitted by exponential distributions, i.e., multi-stage exponential. On the other hand, for gamma-fitted absolute gradients in the first stages, the latter stages are fitted by a \ac{GPD} distribution, from \cref{lemma:PoT}, i.e., gamma-\ac{GPD}. Finally, for \ac{GPD} distributed absolute gradients in the first stage, the \ac{GPD} is still used for the \ac{PoT} data, from \cref{lemma:PoT}, i.e., multi-stage \ac{GPD}. \begin{figure*}[t!] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.5\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend3.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_speedup.pdf} \caption{LSTM-PTB (Speed-up).} \label{fig:ptb-speedup-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_throughput.pdf} \caption{LSTM-PTB (Throughput).} \label{fig:ptb-throughput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/allratio_ec_nesterov/lstm_8_1_compestratio.pdf} \caption{LSTM-PTB (Estimation Quality).} \label{fig:ptb-comp-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_speedup.pdf} \caption{LSTM-AN4 (Speed-up).} \label{fig:an4-speedup-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_throughput.pdf} \caption{LSTM-AN4 (Throughput).} \label{fig:an4-throughput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_an4/allratio_ec_nesterov/lstm_8_1_compestratio.pdf} \caption{LSTM-AN4 (Estimation Quality).} \label{fig:an4-comp-8} \end{subfigure} \caption{Performance of training RNN-LSTM on PTB [(a),(b),(c)] and AN4 [(d),(e),(f)] datasets.} \label{fig:lstm} \end{figure*} \section{\ac{SIDCo} Algorithm} \label{sec:algorithm} \scheme\! leverages \acp{SPD} to obtain a threshold via the multi-stage threshold estimator described in \cref{sec:mst-thresh}. We select the number of stages, $M$, via an adaptive algorithm such that the estimation error, averaged over $Q$ iterations, is bounded below a predefined error tolerance, i.e, \begin{align}\label{eq:boundederror} &\left\lvert \hat{\delta}-\delta \right\rvert \leq \epsilon \, \delta, && 0 \leq \epsilon < 1\,. \end{align} First, we describe the algorithm that \scheme\! follows to perform the gradient compression. The full pseudo-code is shown in \cref{algo:algo1} of the Appendix. The algorithm in each iteration, takes as input the gradient vector and produces a compressed vector. The vector is sparsified through the multi-stage fitting strategy described in \cref{sec:mst-thresh}. In each stage, the function {\em Thresh\_Estimation} uses the chosen \ac{SPD} to obtain a threshold. The algorithm dynamically adapts the number of stages $M$ by monitoring the quality of its estimated selection of elements and adjusting $M$ using function {\em Adapt\_Stages}. The algorithm starts by calling the {\em sparsify} function which takes the gradient and target ratio as the parameters. Then, the algorithm applies a multi-stage estimation loop of $M$ iterations. In each iteration, the vector is partially sparsified with the previously estimated threshold obtained from the previous stage $m-1$. Then, given the ratio $\delta_m$ at loop step $m$, the chosen \ac{SPD} distribution fitting is invoked via the function {\em Thresh\_Estimation} to obtain a new threshold. At the last stage (i.e., step $M$ of the loop), the resulting estimation threshold should approximate the threshold that would obtain the target ratio $\delta$ of the input vector. Then, the estimated threshold is used to sparsify the full gradient vector and obtain the values and their corresponding indices. For each invocation of the algorithm in each training iteration, the algorithm maintains statistics like the average ratio of the quality of its estimations over the past training steps $Q$. Then, at the end of every $Q$ training steps, the algorithm invokes {\em Adapt\_Stages} which adjusts the current number of stages $M$ based on user-defined allowable error bounds of the estimation (i.e., $\epsilon_H$ and $\epsilon_L$). After the adjustment, the next algorithm invocation will use the new number of stages $M$. The number of stages is adjusted only if the obtained ratio is not within the error bounds. \subsection{Convergence Analysis} In the following, we present the convergence analysis of \scheme\!. \begin{lemma} \label{lemma:convanalysis} Let $\hat{\delta}$ be the average achieved compression ratio of the proposed scheme with bounded discrepancy with respect to the target $\delta$ with error tolerance $\epsilon$ as in \eqref{eq:boundederror} which is assured by Algorithm~\ref{algo:algo1} in the Appendix. Also, let $i$ be the current training iteration, then the convergence rate of the proposed scheme coincides with that of the \ac{SGD} if \begin{equation} i>\mathbb{O}\left(\frac{1}{\delta^2 \, (1-\epsilon)^2}\right). \end{equation} \end{lemma} \begin{proof} The convergence of the proposed scheme would mainly follow the existing convergence analysis of $\topk$~\cite{Alistarh18_sparse, aji_sparse, stich2018sparsified}, because \scheme\! is designed to estimate a threshold for obtaining the top $\k$ elements. In contrast to $\topk$, the number of non-zero elements in the proposed scheme is a binomial \acs{r.v.}, $\hat{K}$, and not a constant. Second, the expected value of the estimated number of non-zero elements, $\hat{k} \triangleq \mathbb{E}\{\hat{K}\}$, may not coincide with the target $\k$, due to a possible mismatch between the assumed \acs{SPD} of the stochastic gradients and their original distribution. The complete proof is detailed in Appendix~\ref{appnd:conanalyproof}. \end{proof} The proper selection of the distribution as a \ac{SPD} permits the actual compression rate to approach the designed compression ratio with small $\epsilon$. This can be seen from the extensive numerical results in plots showing the estimation quality of \cref{fig:vgg16-good-8}, \cref{fig:resnet20-good-8}, \cref{fig:resnet50-comp-8}, and \cref{fig:compratio}. One can notice that on average $\hat{\k}/\k \approx 1 $, hence it resembles $\topk$. For some rare extreme cases, we have $\hat{\delta} \geq 0.8\, \delta$ (i.e., $\epsilon=20\%$), meaning that we need at most about $50\%$ more iterations than $\topk$ to reach the rate of \ac{SGD}. \section{Experimental Evaluation} \label{sec:algorithm} This evaluation answers the following questions:\\ $\bullet$ What benefits, in terms of training speed-up and model quality, does \scheme\! provide compared to state-of-the-art approaches (\emph{gains in training time to accuracy})?\\ $\bullet$ Are the improvements of \scheme\! only due to its faster training over other schemes (\emph{training throughput gains})?\\ $\bullet$ How accurate is the the threshold estimation of \scheme\! compared to the state-of-the-art (\emph{estimation quality})? In the following, we describe the main results, and present more experimental results and scenarios in \cref{apdx:moreexp}. \subsection{Experimental Settings} \label{sec:experiments} Unless otherwise mentioned, the default settings of the experiments are as follows.\\ \smartparagraph{Environment:} We perform our experiments on 8 server machines equipped with dual 2.6 GHz 16-core Intel Xeon Silver 4112 CPU, 512GB of RAM, and 10 Gbps NICs. Each machine has an NVIDIA V100 GPU with 16GB of memory. The servers run Ubuntu 18.04, Linux kernel 4.15.0. We use PyTorch 1.1.0 with CUDA 10.2 as the ML toolkit. We use Horovod 0.16.4 configured with OpenMPI 4.0.0 for collective communication. \smartparagraph{Benchmarks and hyper-parameters:} The benchmarks and hyper-parameters are listed in \cref{tab:models}. We use both \ac{CNN} and \ac{RNN} models for image classification and language modeling tasks, respectively. We use compression ratios ($\delta$) of 0.1 (10\%), 0.01 (1\%) and 0.001 (0.1\%) to span a wide range of the trade-off between compression and accuracy similar to prior work \cite{aji_sparse,Alistarh18_sparse,lin2018deep}. Further details of the environment, tasks and settings of the experiments are given in \cref{apdx:clusters}. \smartparagraph{Compressors: } We compare \scheme\! with $\topk$, \ac{DGC}, RedSync and GaussianKSGD. The \ac{EC} mechanism is employed to further enhance the convergence of \ac{SGD} with compressed gradients \cite{lin2018deep,ef-sgd}. For \scheme\!, we set $\delta_{1}=0.25$, $\epsilon=20\%$, and $Q=5$ iterations to adapt the stages as in \cref{algo:algo1}. For conciseness, we present the performance of \scheme\! with double exponential fitting (shown in the figures as \ac{SIDCo}-E).\footnote{The results for double \ac{GPD} (\ac{SIDCo}-GP) and double gamma (\ac{SIDCo}-P), presented in \cref{apdx:expalldist}, are quite similar.} \smartparagraph{Metrics:} We quantify the performance of a given scheme (i.e., \scheme\!, Top-$k$, DGC, RedSync or GaussianKSGD) via the following metrics:\\ $\bullet$ \emph{Normalized Training Speed-up:} We evaluate the model quality at iteration $T$ (the end of training) and divide it by the time taken to complete $T$ iterations. We normalize this quantity by the same measurement calculated for the baseline case. This is the normalized training speed-up relative to the baseline;\\ $\bullet$ \emph{Normalized Average Training Throughput:} is the average throughput normalized by the baseline's throughput which illustrates the speed-up from compression irrespective of its impact on model quality;\\ $\bullet$ \emph{Estimation Quality:} is the compression ratio ($\hat{k}/d$) averaged over the training divided by the target ratio ($\delta=k/d$) along with the $90\%$ confidence interval as error-bars. \begin{figure}[!t] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.65\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/accuracy_ec_nesterov/legend.pdf} \end{subfigure} \begin{subfigure}[ht]{0.48\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/accuracy_ec_nesterov/lstm_0.001_8_1_loss_step.pdf} \caption{Train loss vs iterations} \label{fig:ptb-loss-0.001} \end{subfigure} \hfill \begin{subfigure}[ht]{0.48\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_ptb/compress_ec_nesterov/lstm_0.001_8_1_compestratio.pdf} \caption{Thresh. Estimation Quality.} \label{fig:ptb-avgcomp-0.001} \end{subfigure} \\ \begin{subfigure}[ht]{0.48\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_an4/accuracy_ec_nesterov/lstm_0.001_8_1_loss_step.pdf} \caption{Train loss vs iterations} \label{fig:an4-loss-0.001} \end{subfigure} \hfill \begin{subfigure}[ht]{0.48\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_an4/compress_ec_nesterov/lstm_0.001_8_1_compestratio.pdf} \caption{Thresh. Estimation Quality} \label{fig:an4-avgcomp-0.001} \end{subfigure} \caption{The training performance for the LSTM model on PTB and AN4 datasets with compression ratio of $0.001$.} \label{fig:rnn-extra} \end{figure} \begin{figure*}[t!] \captionsetup[subfigure]{justification=centering} \centering \begin{subfigure}[ht]{0.5\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend3.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.31\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_cifar10/allratio_ec_sgd/resnet20_8_1_speedup.pdf} \caption{ResNet20-CIFAR-10 (Speedup).} \label{fig:resnet20-speedup-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_cifar10/allratio_ec_sgd/resnet20_8_1_compestratio.pdf} \caption{ResNet20-CIFAR-10 (Est. Quality).} \label{fig:resnet20-good-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_cifar10/allratio_ec_sgd/vgg16_8_1_speedup.pdf} \caption{VGG16-CIFAR-10 (Speedup)} \label{fig:vgg16-speedup-8} \end{subfigure} \caption{The training performance for ResNet20 [(a),(b)] and VGG16 [(c)] on CIFAR-10 dataset.} \label{fig:cifar10} \end{figure*} Next, we present the results for the benchmarks in~\cref{tab:models}. \subsection{Recurrent Neural Networks (RNNs)} \textbf{RNN-LSTM on PTB: } This benchmark has the highest communication overhead (\cref{tab:models}). In~\cref{fig:ptb-speedup-8}, \scheme\! shows significant speed-up over no-compression by {\em $\approx\!41.7\times$} and improves over $\topk$ and \ac{DGC} by up to {\em $\approx\!7.6\times$} and {\em $\approx\!1.9\times$}, respectively. At high compression ratio of $0.001$, both RedSync and GaussianKSGD compression methods do not converge to the target loss and test perplexity (\cref{fig:ptb-loss-0.001}) and therefore they attain zero speed-ups. \cref{fig:ptb-throughput-8} shows that threshold estimation schemes including \scheme\! have the highest training throughput. However, in~\cref{fig:ptb-comp-8}, \ac{DGC} and \scheme\! are the only methods that accurately estimate the target ratio with high confidence. However, for GaussianKSGD at ratio of $0.001$ and RedSync at ratios of $0.01$ and $0.001$, the number of selected elements is two orders-of-magnitude lower than the target. Moreover, over the training process, the estimation quality of RedSync has high variance, harming convergence. \cref{fig:ptb-avgcomp-0.001} shows, at target ratio of $0.001$, RedSync causes significant fluctuation in compression ratio and training does not converge. GaussianKSGD results in very low compression ratio which is close to $0$ and far from the target leading to significantly higher loss (and test perplexity) values compared to the target values. \textbf{RNN-LSTM on AN4:} \cref{fig:an4-speedup-8} shows that \scheme\! achieves higher gains compared to other compressors by up to {\em $\approx\!2.1\times$} for ratios of $0.1$ and $0.01$. Notability, at ratio of $0.001$, only \scheme\! achieved the target \ac{CER}. Thus, we ran other compressors for $250$ epochs to achieve the target \ac{CER} (instead of the default 150), except for GaussianKSGD, which does not converge. The gains of \scheme\! over the other compressors are increased by up to {\em $\approx\!4\times$}. The reason could be that the model is more sensitive to compression (esp., in the initial training phase). \scheme\! starts as single-stage before performing stage adaptations, leading to a slight over-estimation of $k$ and so more gradient elements are sent during training start-up. Throughput-wise, \cref{fig:an4-throughput-8} shows that threshold-estimation methods including \scheme\! enjoy higher training throughput, explaining the gains over the baseline. Similar to LSTM-PTB results,~\cref{fig:an4-comp-8} shows that on average, with low variance, \scheme\! closely matches the estimated ratios of DGC while other estimation methods have poor estimation quality. Similar to PTB, \cref{fig:an4-avgcomp-0.001} shows, at target ratio of $0.001$, RedSync causes significant fluctuation in compression ratio and GaussianKSGD results in very low compression ratio (close to 0) which is far from the target. This leads both methods to achieve significantly higher loss (or test perplexity) values compared to the target loss (or test perplexity) values. \subsection{Convolutional Neural Networks (CNNs)} \begin{figure*}[!h] \captionsetup[subfigure]{justification=centering} \centering \centering \begin{subfigure}[ht]{0.5\linewidth} \includegraphics[width=1\linewidth]{Figures/experiments/legend3.pdf} \end{subfigure} \\ \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_speedup.pdf} \caption{ResNet50-ImageNet (Accuracy)} \label{fig:resnet50-speedup-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_throughput.pdf} \caption{ResNet50-ImageNet (Throughput)} \label{fig:resnet50-tput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/resnet50_8_1_compestratio.pdf} \caption{ResNet50-ImageNet (Est. Quality)} \label{fig:resnet50-comp-8} \end{subfigure} \\ \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_speedup.pdf} \caption{VGG19-ImageNet (Accuracy)} \label{fig:vgg19-speedup-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_throughput.pdf} \caption{VGG19-ImageNet (Throughput)} \label{fig:vgg19-tput-8} \end{subfigure} \hfill \begin{subfigure}[ht]{0.30\linewidth} \includegraphics[width=\linewidth]{Figures/experiments/endtoend/mcnodes_imagenet/allratio_ec_nesterov/vgg19_8_1_compestratio.pdf} \caption{VGG19-ImageNet (Est. Quality)} \label{fig:vgg19-comp-8} \end{subfigure} \caption{The training performance for ResNet50 [(a), (b), (c)] and VGG19 [(d), (e), (f)] on ImageNet dataset.} \label{fig:imagenet} \end{figure*} \textbf{ResNet20 and VGG16 on CIFAR-10:} \cref{fig:resnet20-speedup-8} shows that, for ResNet20, all compressors achieve somewhat comparable and modest speed-ups over the no-compression baseline (except at ratio of 0.001, where accuracy is degraded and hence the lower speed-up than the baseline). This is not surprising because ResNet20 is not network-bound. However, for the larger VGG16 model, \cref{fig:vgg16-speedup-8} shows that \scheme\! achieves significant speed-ups over no-compression, $\topk$ and DGC by up to $\approx\!5\times$, $1.5\times$, and $1.2\times$, respectively. \cref{fig:resnet20-good-8} shows that, unlike other estimation schemes, \scheme\! can accurately achieve the target ratio. \textbf{ResNet50 and VGG19 on ImageNet:} In these experiments, we set a time-limit of 5 hours per run to reduce our costs. For calculating the speed-up, we compare the top-1 accuracy achieved by different methods at the end of training. First, for ResNet50 benchmark, we use compression ratios of $0.1$, $0.01$, and $0.001$. \cref{fig:resnet50-speedup-8} shows that \scheme\! achieves the highest accuracy that is higher than the baseline, $\topk$ and \ac{DGC} by $\approx15$, $3$, and $2$ accuracy points, i.e., normalized accuracy gains of $\approx\!40\%$, $5\%$, and $4\%$, respectively. \cref{fig:resnet50-tput-8} shows that \scheme\! attains the highest throughput among all methods (except for RedSync at $0.1$ compression). \cref{fig:resnet50-comp-8} shows that, unlike GaussianKSGD and RedSync, which both result in estimation quality far from the target with high variance, \scheme\! estimates the threshold with very high quality for all ratios. Similar trends are observed for the VGG19 benchmark where we use compression ratio of $0.001$. As shown in \cref{fig:vgg19-speedup-8,fig:vgg19-tput-8,fig:vgg19-comp-8}, \scheme\! estimates the threshold with high quality, and achieves the highest top-1 accuracy and training throughput among all methods. The accuracy gains compared to the baseline, $\topk$ and DGC are $\approx\!34\times$, $2.9\times$, and $1.13\times$, respectively. \smartparagraph{Takeaways:} Our approach is simple in nature, which is intentional, to make it applicable in practice. Nonetheless, our work goes beyond existing works that estimate a threshold for $\topk$ sparsification. These works either did not leverage the statistical property of the gradients (DGC) or assumed Gaussian distribution without a thorough study of the gradient (e.g., RedSync, GaussianKSGD). On a GPU, \scheme\! improves over DGC by at least $2\times$, and the speed-ups are significantly larger on the CPU as shown in ~\cref{fig:vgg16-cpu-speedup} and~\cref{apdx:moremicrobench}. As a threshold estimation method, SIDCo does not only benefit from the throughput gains of threshold methods but also from the high quality of its threshold estimation. The results in~\cref{fig:rnn-extra,fig:compratio} indicate that existing estimation methods (e.g., RedSync and GaussianKSGD) fail to achieve consistent threshold estimation behavior even though they may provide throughput gains. Their throughput gains, in many cases, are due to severe under-estimation of the target ratio, which results in lower volumes of data sent compared to other compressors. \section{Conclusion} We solved a practical problem in distributed deep learning. We showed that the performance of compressors other than threshold-based ones has high computational costs whereas existing threshold-estimation methods fail to achieve their target. To address these issues, we proposed \scheme\!, a multi-stage threshold-based compressor through imposing a sparsity prior on the gradients. We evaluated \scheme\! and compared it with popular compressors using common benchmarks involving RNN and CNN architectures. \ac{SIDCo}, unlike existing threshold estimation methods, can efficiently approximate the target threshold and results in significant gains of up to $\approx\!41.7\times$, $7.5\times$, and $1.9\times$ over no-compression baseline, $\topk$ and \ac{DGC} compression methods, respectively. Also, we expect further gains for large and communication-bounded models. In the future, we will explore ways to estimate a threshold for which compression satisfies other quality targets. \small \bibliographystyle{mlsys2021}
1,116,691,501,396
arxiv
\section{Introduction} \setcounter{equation}{0} The Schur class ${\mathcal S}$ of all analytic complex-valued functions mapping the unit disk $\D$ into its closure has played a prominent role in function theory and its applications beginning with the work of I. Schur \cite{Schur}. Among several alternative characterizations of the Schur class is one in terms of positive kernels: {\em the function $f \colon {\mathbb D} \to {\mathbb C}$ is in the class ${\mathcal S}$ if and only if the associated kernel $K_f(z,\zeta)=\frac{1-f(z)\overline{f(\zeta)}}{1-z\bar{\zeta}}$ is positive on $\D\times\D$ or equivalently, if and only if the {\em Pick matrix} \begin{equation} P_f(z_1,\ldots,z_n)= \left[\frac{1-f(z_i)\overline{f(z_j)}}{1-z_i\overline{z}_j}\right]_{i,j=1}^n \label{1.1} \end{equation} is positive semidefinite for any choice of finitely many points $z_1,\ldots,z_n\in\D$}. The ``only if" part is the classical result of Pick and Nevanlinna \cite{pick, nevan1}. For the ``if" part, let us observe that positivity of $1\times 1$ matrices $P_f(z)$ already guarantees $|f(z)|\le 1$ ($z\in\D$). Thus, larger Pick matrices are needed in the ``if" direction to guarantee the analyticity of $f$. The latter can be done via constructing the coisometric de Branges-Rovnyak realization \cite{dbr2} for $f$ or using a more recent lurking isometry argument \cite{Ball-Winnipeg}. A remarkable fact established by Hindmarsh \cite{hind} (see also \cite{fh}) is that analyticity is implied by positivity of all $3\times 3$ Pick matrices. The objective of this paper is to extend this result to regular functions in quaternionic variable (Theorem \ref{T:1.3} below). \smallskip Let $\mathbb H$ denote the skew field of real quaternions $\alpha=x_0+{\bf i}x_1+{\bf j}x_2+{\bf k}x_3$ where $x_\ell\in\mathbb R$ and ${\bf i}, {\bf j}, {\bf k}$ are imaginary units such that ${\bf i}^2={\bf j}^2={\bf k}^2={\bf ijk}=-1$. The real and the imaginary parts, the conjugate and the absolute value of a quaternion $\alpha$ are defined by ${\rm Re}(\alpha)=x_0$, ${\rm Im}(\alpha)=ix_1+jx_2+kx_3$, $\bar \alpha={\rm Re}(\alpha)-{\rm Im}(\alpha)$ and $|\alpha|^2=\alpha\bar{\alpha}=|{\rm Re}(\alpha)|^2+|{\rm Im}(\alpha)|^2$, respectively. By $\mathbb B=\{\alpha\in\mathbb H: \; |\alpha|<1\}$ we denote the unit ball in $\mathbb H$. \smallskip Since multiplication in $\mathbb H$ is not commutative, function theory over quaternions is somewhat different from that over the field $\mathbb {C}$. There have been several notions of regularity (or analyticity) for $\mathbb H$-valued functions, most notable of which are due to Moisil \cite{moisil}, Fueter \cite{fuet1, fuet2}, and Cullen \cite{cullen}. More recently, upon refining and developing Cullen's approach, Gentili and Struppa introduced in \cite{genstr} the notion of regularity which, being restricted to functions on a quaternionic ball around the origin, turns out to be the feature of power series with quaternionic coefficients on one side; we refer to the recent book \cite{gss} for a detailed exposition of the subject. Here we accept the following definition of regularity on the quaternionic unit ball. \begin{definition} {\rm A function $f: \, \mathbb B\to \mathbb H$ is called {\em left-regular} on $\mathbb B$ if it admits the power series expansion with quaternionic coefficients on the right which converges absolutely on $\mathbb B$: \begin{equation} f(z)=\sum_{k=0}^\infty z^kf_k\quad\mbox{with $\; \; f_k\in\mathbb H\; $ such that $\; \; \overline{\displaystyle\lim_{k\to\infty}}\sqrt[k]{|f_k|}\le 1$}. \label{2.2} \end{equation} If in addition, $f(\alpha):={\displaystyle\sum_{k=0}^\infty \alpha^kf_k}\in \overline{\mathbb B}$ for all $\alpha\in\mathbb B$, we say that $f$ belongs to the {\em left Schur class} $\mathcal {QS}_{_L}$. Right regular functions and the {\em right Schur class} $\mathcal {QS}_{_R}$ can be defined similarly.} \label{D:1.1} \end{definition} Quaternionic Schur classes have become an object of intensive study quite recently. A number of related results (e.g., M\"obius transformations, Schwarz Lemma, Bohr's inequality) are presented in \cite[Chapter 9]{gss}. Among other results, we mention realizations for slice regular functions \cite{acs1}, Schwarz-Pick Lemma \cite{bist}, Blaschke products \cite{acs2}, Nevanlinna-Pick interpolation \cite{abcs}. \section{Pick matrices and Hindmarsh's theorem} \setcounter{equation}{0} A straightforward entry-wise verification shows that the complex matrix \eqref{1.1} satisfies the Stein equality \begin{equation} P_f(z_1,\ldots,z_n)-TP_f(z_1,\ldots,z_n)T^*=EE^*-NN^*, \label{2.3} \end{equation} where \begin{equation} T=\begin{bmatrix}z_1 & & 0 \\ & \ddots & \\ 0 && z_n\end{bmatrix},\quad E=\begin{bmatrix}1 \\ \vdots \\ 1\end{bmatrix},\quad N=\begin{bmatrix}f(z_1) \\ \vdots \\ f(z_n)\end{bmatrix}. \label{2.3a} \end{equation} Since $|z_i|<1$, the latter matrix is the {\em unique} matrix subject to identity \eqref{2.3}. In case $z_i\in\mathbb B$ and $f(z_i)\in\mathbb H$, the Stein equation \eqref{2.3} still has a unique solution $P_f(z_1,\ldots,z_n)$ (still called the Pick matrix of $f$). Solving this equation gives the explicit formula for the entries of $P_f(z_1,\ldots,z_n)$ in terms of series \begin{equation} P_f(z_1,\ldots,z_n)= \left[\sum_{k=0}^\infty z_i^k(1-f(z_i)\overline{f(z_j)})\overline{z}_j^k\right]_{i,j=1}^n \label{1.4} \end{equation} which converge due to the following estimate: $$ \left|\sum_{k=0}^\infty z_i^k(1-f(z_i)\overline{f(z_j)})\overline{z}_j^k\right| \le 2\sum_{k=0}^\infty | z_i|^k |z_j|^k=\frac{2}{1-| z_i||z_j|}. $$ According to a result from \cite{abcs}, for any function $f\in\mathcal {QS}_{_L}$, the associated Pick matrix \eqref{1.4} is positive semidefinite for any choice of finitely many points $z_1,\ldots,z_n\in\mathbb B$. The notions of adjoint matrices, of Hermitian matrices and positive semidefinite matrices over $\mathbb H$ are similar to those over $\mathbb C$ (we refer to a very nice survey \cite{fuzhen} on this subject). \smallskip The following quaternionic analog of the Hindmarsh theorem \cite{hind} is the main result of the present paper. \begin{theorem} Let $f: \, \mathbb B\to \mathbb H$ be given and let us assume that $3\times 3$ Pick matrices $P_f(z_1,z_2,z_3)$ are positive semidefinite for all $(z_1,z_2,z_3)\in\mathbb B^3$. Then $f$ belongs to $\mathcal {QS}_{_L}$. \label{T:1.3} \end{theorem} Before starting the proof we make several observations. \begin{remark} For any $z_1,z_2\in\mathbb {C}$, the quaternion $z_1{\bf j}z_2$ belongs to $\mathbb {C}{\bf j}$. \label{R:2.6} \end{remark} \noindent The statement follows from the multiplication table for imaginary units in $\mathbb H$. We also remark that any quaternion $\alpha=x_0+{\bf i}x_1+{\bf j}x_2+{\bf k}x_3$ admits a unique representation $\alpha=\alpha_1+\alpha_2{\bf j}$ with $\alpha_1,\alpha_2\in\mathbb {C}$. Consequently, any quaternionic matrix $A$ admits a unique representation $A=A_1+A_2{\bf j}$ with complex matrices $A_1$ and $A_2$. \begin{remark} The matrix $A=A_1+A_2{\bf j}\in\mathbb H^{n\times n}$ ($A_1,A_2\in\mathbb {C}^{n\times n}$) is positive semidefinite if and only if the complex matrix $\begin{bmatrix}A_1 & A_2 \\ -\overline{A}_2 & \overline{A}_1\end{bmatrix}$ is positive semidefinite (see \cite{fuzhen}). \label{R:2.2} \end{remark} {\rm Two quaternions $\alpha$ and $\beta$ are called {\em equivalent} (conjugate to each other) if $\alpha=h^{-1}\beta h$ for some nonzero $h\in\mathbb H$.} It follows (see e.g., \cite{fuzhen}) that \begin{equation} \alpha\sim\beta\quad\mbox{if and only if}\quad {\rm Re}(\alpha) ={\rm Re}(\beta) \; \; \mbox{and} \; \; |\alpha|=|\beta|. \label{2.0} \end{equation} Therefore, the conjugacy class of a given $\alpha\in\mathbb H$ form a $2$-sphere (of radius $|{\rm Im}(\alpha)|$ around ${\rm Re}(\alpha)$). \begin{remark} If $\alpha,\beta,\gamma$ are three distinct equivalent quaternions, then \begin{equation} \gamma^k=(\gamma-\beta)(\alpha-\beta)^{-1}\alpha^k+(\alpha-\gamma)(\alpha-\beta)^{-1}\beta^k \quad\mbox{for all}\quad k=0,1,\ldots \label{2.1} \end{equation} {\rm Indeed, since $\alpha\sim\beta$, it follows from \eqref{2.0} that $(\alpha-\beta)\alpha(\alpha-\beta)^{-1}=\overline{\beta}$ and subsequently, \begin{equation} (\alpha-\gamma)a^k(\alpha-\gamma)^{-1}=\overline{\beta}^k=(\gamma-\beta)^{-1}\gamma^k(\gamma-\beta), \label{2.1a} \end{equation} where the first equality is clear and the second is a virtue of the first since $\beta$ and $\gamma$ are also equivalent. Similarly, \begin{equation} (\alpha-\beta)^{-1}\beta^k(\alpha-\beta)=\overline{a}^k=(\alpha-\gamma)^{-1}\gamma^k(\alpha-\gamma) \label{2.1b} \end{equation} for all integers $k\ge 0$. Then we get \eqref{2.1} from \eqref{2.1a} and \eqref{2.1b} as follows: \begin{align*} &(\gamma-\beta)(\alpha-\beta)^{-1}\alpha^k+(\alpha-\gamma)(a-\beta)^{-1}\beta^k\\ &=\gamma^k(\gamma-\beta)(\alpha-\beta)^{-1}+\gamma^k(\alpha-\gamma)(\alpha-\beta)^{-1}=\gamma^k. \end{align*}} \label{R:2.0} \end{remark} It turns out that the values of a regular function $f$ at two points from the same conjugacy class uniquely determine $f$ at any point from this class; the formula \eqref{2.4} below was established in \cite{genstr} in a more general setting. \begin{remark} {\rm Let $f$ be left-regular on $\mathbb B$ and let $\alpha,\beta,\gamma\in\mathbb B$ be distinct equivalent points. Then \begin{equation} f(\gamma)=(\gamma-\beta)(\alpha-\beta)^{-1}f(\alpha)+(\alpha-\gamma)(\alpha-\beta)^{-1}f(\beta). \label{2.4} \end{equation} Indeed, equality \eqref{2.1} verifies formula \eqref{2.4} for monomials $f(z)=z^k$. Extending it by right linearity to power series \eqref{2.2} completes the proof.} \label{R:rep} \end{remark} \noindent {\bf Proof of Theorem \ref{T:1.3}:} We first observe that for $n=1$, the formula \eqref{1.4} amounts to $$ P_f(z_1)=\sum_{k=0}^\infty z_1^k(1-|f(z_i)|^2)\overline{z}_1^k=\frac{1-|f(z_1)|^2}{1-|z_1|^2} $$ for each $z_1\in\mathbb B$. Therefore, condition $P_f(z_1)\ge 0$ implies $|f(z_1)|\le 1$. \smallskip It remains to show that $f$ is left-regular. Toward this end, we first show that there exist complex Schur-class functions $g$ and $h$ such that \begin{equation} f(\zeta)=g(\zeta)+h(\zeta){\bf j}\quad\mbox{for all}\quad \zeta\in\D. \label{op} \end{equation} Indeed, for each fixed {\em complex} point $\zeta\in\mathbb {C}\cap\mathbb B=\D$, the quaternion $f(\zeta)\in\mathbb H$ admits a (unique) representation \eqref{op} with $g(\zeta)\in\mathbb {C}$ and $h(\zeta)\in\mathbb {C}$. For any two points $\zeta_1,\zeta_2\in\D$ and any $k\ge 0$, we then compute \begin{align*} \zeta_1^k(1-f(\zeta_1)\overline{f(\zeta_2)})\overline{\zeta}_2^k =&\zeta_1^k\overline{\zeta}_2^k-\zeta_1^k\left[g(\zeta_1)+h(\zeta_1){\bf j}) (\overline{g(\zeta_2)}-{\bf j}\overline{h(\zeta_2)}\right]\overline{\zeta}_2^k\\ =&\zeta_1^k\left[1-g(\zeta_1)\overline{g(\zeta_2)}- h(\zeta_1)\overline{h(\zeta_2)}\right]\overline{\zeta}_2^k\\ &+\zeta_1^k\left[g(\zeta_1){\bf j}\overline{h(\zeta_2)}-h(\zeta_1){\bf j}\overline{g(\zeta_2)}\right] \overline{\zeta}_2^k. \end{align*} Summing up the latter equalities over all $k\ge 0$ gives \begin{align} \sum_{k=0}^\infty \zeta_1^k(1-f(\zeta_1)\overline{f(\zeta_2)})\overline{\zeta}_2^k=& \frac{1-g(\zeta_1)\overline{g(\zeta_2)}-h(\zeta_1)\overline{h(\zeta_2)}}{1-\zeta_1\overline{\zeta}_2} \label{2.6}\\ &+\sum_{k=0}^\infty \zeta_1^k\left[g(\zeta_1){\bf j}\overline{h(\zeta_2)}-h(\zeta_1){\bf j}\overline{g(\zeta_2)}\right]\overline{\zeta}_2^k.\notag \end{align} The first term on the right is complex whereas the second term belongs to $\mathbb {C}{\bf j}$ by Remark \ref{R:2.6}. Let us consider the Pick matrix $P_f(\zeta_1,\zeta_2,\zeta_3)$ based on arbitrary points $\zeta_1,\zeta_2,\zeta_3\in\mathbb {C}$. According to \eqref{1.4} and \eqref{2.6}, $$ P_f(\zeta_1,\zeta_2,\zeta_3)=P_{f,1}(\zeta_1,\zeta_2,\zeta_3)+P_{f,2}(\zeta_1,\zeta_2,\zeta_3) $$ where \begin{equation} P_{f,1}(\zeta_1,\zeta_2,\zeta_3)= \left[\frac{1-g(\zeta_i)\overline{g(\zeta_j)}-h(\zeta_i)\overline{h(\zeta_j)}} {1-\zeta_i\overline{\zeta}_j}\right]_{i,j=1}^3\in\mathbb {C}^{3\times 3} \label{2.7} \end{equation} and where $P_{f,2}(\zeta_1,\zeta_2,\zeta_3)$ is a matrix from $\mathbb {C}^{3\times 3}{\bf j}$. By the assumption in Theorem \ref{T:1.3}, the matrix $P_f(\zeta_1,\zeta_2,\zeta_3)$ is positive semidefinite. Then, the complex matrix $P_{f,1}(\zeta_1,\zeta_2,\zeta_3)$ is positive semidefinite by Remark \ref{R:2.2}. The matrix \eqref{2.7} can be written as \begin{equation} P_{f,1}(\zeta_1,\zeta_2,\zeta_3)=\Lambda-G\Lambda G^*-H\Lambda H^*\ge 0 \label{2.8} \end{equation} where $$ \Lambda=\left[\frac{1} {1-\zeta_i\overline{\zeta}_j}\right]_{i,j=1}^3,\quad G=\sbm{g(\zeta_1) & 0 & 0 \\ 0 & g(\zeta_2) & 0 \\ 0 & 0 & g(\zeta_3)},\quad H=\sbm{h(\zeta_1) & 0 & 0 \\ 0 & h(\zeta_2) & 0 \\ 0 & 0 & h(\zeta_3)}, $$ and it is well known that the matrix $\Lambda$ is positive semidefinite. Then we conclude from \eqref{2.8} that $\Lambda-G\Lambda G^*\ge 0$ and $\Lambda-H\Lambda H^*\ge 0$, i.e., that the $3\times 3$ matrices $$ \left[\frac{1-g(\zeta_i)\overline{g(\zeta_j)}} {1-\zeta_i\overline{\zeta}_j}\right]_{i,j=1}^3\quad\mbox{and}\quad \left[\frac{1-h(\zeta_i)\overline{h(\zeta_j)}} {1-\zeta_i\overline{\zeta}_j}\right]_{i,j=1}^3 $$ are positive semidefinite for all choices of $\zeta_1,\zeta_2,\zeta_3\in\D$. Then it follows from the complex Hindmarsh theorem that the functions $g,h: \, \D\to\mathbb {C}$ are complex-analytic and belong to the classical Schur class $\mathcal S$. Substituting their power series expansions $g(\zeta)={\displaystyle\sum_{k=0}^\infty\zeta^k g_k}$ and $h(\zeta)={\displaystyle\sum_{k=0}^\infty\zeta^k h_k}$ into \eqref{op} leads us to the power series expansion for $f$ on $\D$: \begin{equation} f(\zeta)=g(\zeta)+ h(\zeta){\bf j}=\sum_{k=0}^\infty \zeta^k f_k\quad\mbox{with}\quad f_k=g_k+h_k{\bf j}. \label{2.8a} \end{equation} We now extend the latter power series to the whole $\mathbb B$ by simply letting \begin{equation} F(z)=\sum_{k=0}^\infty z^k f_k\quad (z\in\mathbb B). \label{2.9} \end{equation} The resulting power series converges absolutely on $\mathbb B$ (as the sum of two converging series $g(z)$ and $h(z){\bf j}$) and agrees with $f$ on $\D$. We next show that $F$ agrees with $f$ throughout $\mathbb B$. \smallskip Let $\gamma$ be any point in $\mathbb B\setminus\D$. The points $\alpha:={\rm Re}(\gamma)+|{\rm Re}(\gamma)|{\bf i}$ and $\overline{\alpha}$ belong to $\D$ and are equivalent to $\gamma$. Observe that \begin{equation} \begin{array}{ll} (\gamma-\overline{\alpha})(\alpha-\overline{\alpha})^{-1}&=(\gamma-\overline{\gamma})^{-1}(\gamma-\overline{\alpha}), \\ (\alpha-\gamma)(\alpha-\overline{\alpha})^{-1}&=(\gamma-\overline{\gamma})^{-1}(\gamma-\alpha). \end{array} \label{2.9a} \end{equation} Since $F$ is left-regular by construction, we apply formula \eqref{2.4} to get \begin{align} F(\gamma)=&(\gamma-\overline{\alpha})(a-\overline{\alpha})^{-1}F(\alpha)+(\alpha-\gamma)(\alpha-\overline{\alpha})^{-1} F(\overline{\alpha})\notag\\ =&(\gamma-\overline{\gamma})^{-1}(\gamma-\overline{\alpha})f(\alpha)+(\gamma-\overline{\gamma})^{-1}(\gamma-\alpha)f(\overline{\alpha}), \label{2.10} \end{align} where the second equality follows due to \eqref{2.9a} and since $F$ agrees with $f$ on $\D$. On the other hand, we know that the Pick matrix $P_f(\alpha,\overline{\alpha},c)$ is positive semidefinite and satisfies the Stein identity \eqref{2.3}: \begin{equation} P_f(\alpha,\overline{\alpha},\gamma)-TP_f(\alpha,\overline{\alpha},\gamma)T^*=EE^*-NN^* \label{2.11} \end{equation} where $$ T=\begin{bmatrix} \alpha & 0 & 0 \\ 0 & \overline{\alpha} & 0 \\ 0 & 0 & \gamma\end{bmatrix},\quad E=\begin{bmatrix} 1 \\ 1\\ 1\end{bmatrix},\quad N=\begin{bmatrix} f(\alpha) \\ f(\overline{a}) \\ f(\gamma)\end{bmatrix}. $$ Let us introduce the row-vector $V=\begin{bmatrix} \gamma-\overline{a} & &\gamma-\alpha && \overline{\gamma}-\gamma\end{bmatrix}$. Since ${\rm Re}(\alpha)={\rm Re}(\gamma)$ and $|\alpha|=|\gamma$ by definition of $\alpha$, we have \begin{align*} VT&=\begin{bmatrix} (\gamma-\overline{\alpha})\alpha & (\gamma-\alpha)\overline{\alpha} & (\overline{\gamma}-\gamma)\gamma\end{bmatrix}=\gamma V,\\ VE&=\gamma-\overline{\alpha}+\gamma-\alpha+\overline{\gamma}-\gamma=2{\rm Re}(\gamma)-2{\rm Re}(\alpha)=0. \end{align*} Multiplying both parts of \eqref{2.11} by $V$ on the left and by $V^*$ on the right we get, on account of two last equalities, $$ VP_f(\alpha,\overline{\alpha},\gamma)V^*-\gamma VP_f(\alpha,\overline{\alpha},\gamma)V^*\overline{\gamma}=-VNN^*V^*. $$ Since $P_f(\alpha,\overline{\alpha},\gamma)$ is positive semidefinite, we have $VP_f(\alpha,\overline{\alpha},\gamma)V^*\ge 0$ and hence we can write the last equality as $$ (1-|c|^2)VP_f(\alpha,\overline{\alpha},\gamma)V^*=-|VN|^2. $$ The latter may occur only if $VP_f(\alpha,\overline{\alpha},\gamma)V^*=VN=0$. Thus, $$ VN=(c-\overline{\alpha})f(a)+(c-\alpha)f(\overline{\alpha})+(\overline{\gamma}-\gamma)f(\gamma)=0, $$ which implies \begin{equation} f(\gamma)=(\gamma-\overline{\gamma})^{-1}(\gamma-\overline{\alpha})f(\alpha)+(\gamma-\overline{\gamma})^{-1} (\gamma-\alpha)f(\overline{\alpha}). \label{2.12} \end{equation} Comparing \eqref{2.10} and \eqref{2.12} we conclude that $F(\gamma)=f(\gamma)$. Since $\gamma$ was chosen arbitrarily in $\mathbb B\setminus\D$, it follows that $F=f$ on $\mathbb B$. Since $F$ is left-regular on $\mathbb B$ by construction \eqref{2.9}, it follows that $f$ is left-regular on $\mathbb B$ as well. \qed \section{Schur-class of quaternionic formal power series} \setcounter{equation}{0} It turns out that the quaternionic Schur class can be defined without distinguishing the left and the right settings. Let us consider formal power series in one formal variable $z$ which commutes with quaternionic coefficients (which in turn, satisfy the same growth condition as in \eqref{2.2}): \begin{equation} g(z)=\sum_{k=0}^\infty z^kg_k=\sum_{k=0}^\infty g_k z^k \quad\mbox{with $\; \; g_k\in\mathbb H\; $ such that $\; \; \overline{\displaystyle\lim_{k\to\infty}}\sqrt[k]{|g_k|}\le 1$}. \label{3.1} \end{equation} For each $g\in\mathbb{H}[[z]]$ as in \eqref{3.1}, we define its {\em conjugate} by $\; g^\sharp(z)={\displaystyle\sum_{k=0}^\infty z^k \overline{g}_k}$. The anti-linear involution $g\mapsto g^\sharp$ can be viewed as an extension of the quaternionic conjugation $\alpha\mapsto \overline{\alpha}$ from $\mathbb{H}$ to $\mathbb{H}[[z]]$. We next define $g^{\boldsymbol{e_\ell}}(\alpha)$ and $g^{\boldsymbol{e_r}}(\alpha)$ (left and right evaluations of $g$ at $\alpha$) by \begin{equation} g^{\boldsymbol{e_\ell}}(\alpha)=\sum_{k=0}^\infty\alpha^k g_k,\quad g^{\boldsymbol{e_r}}(\alpha)=\sum_{k=0}^\infty g_k\alpha^k,\quad\mbox{if}\quad g(z)=\sum_{k=0}^n z^k g_k. \label{3.2} \end{equation} Observe that condition $\;\overline{\displaystyle\lim_{k\to\infty}}\sqrt[k]{|g_k|}\le 1$ imposed on the coefficients guarantees the absolute convergence of the series in \eqref{3.2} for all $\alpha\in\mathbb{H}$. Since multiplication in $\mathbb{H}$ is not commutative, left and right evaluations produce different results; however, equality $g^{\boldsymbol{e_r}}(\alpha)=\overline{g^{\sharp\boldsymbol{e_\ell}}(\overline{\alpha})}$ holds for any $\alpha\in\mathbb{H}$ as can be seen from \eqref{3.2} and the definition of $g^\sharp$. \smallskip In accordance with Definition \eqref{D:1.1} we define the left and the right Schur classes $\mathcal {QS}_{_L}$ and $\mathcal {QS}_{_R}$ as the sets of power series $f\in\mathbb{H}[[z]]$ such that $|g^{\boldsymbol{e_\ell}}(\alpha)|\le 1$ (respectively, $|g^{\boldsymbol{e_r}}(\alpha)|\le 1$) for all $\alpha\in\mathbb B$. But as was shown in \cite{abcs} (in slightly different terms), the classes $\mathcal {QS}_{_L}$ and $\mathcal {QS}_{_R}$ coincide. We now recall several results from \cite{abcs} in terms of the present setting. With a power series $g\in\mathbb{H}[[z]]$ as in \eqref{3.1}, we associate lower triangular Toepliz matrices \begin{equation} T_n(g)=\begin{bmatrix}g_{0} & 0 & \ldots & 0 \\ g_{1}& g_{0} & \cdot & \cdot \\ \cdot& \cdot & \cdot & 0 \\ g_{n-1}& \ldots & g_{1} & g_{0}\end{bmatrix}\quad\mbox{for}\quad n=1,2\ldots. \label{3.3} \end{equation} \begin{theorem} Let $g\in \mathbb{H}[[z]]$ be as in \eqref{3.1}. The following are equivalent: \begin{enumerate} \item $|g^{\boldsymbol{e_\ell}}(\alpha)|\le 1$ for all $\alpha\in\mathbb B$. \item $|g^{\boldsymbol{e_r}}(\alpha)|\le 1$ for all $\alpha\in\mathbb B$. \item The matrix $T_n(g)$ is contractive for all $n\ge 1$. \end{enumerate} \label{T:3.1} \end{theorem} We thus may talk about the Schur class $\mathcal {QS}\subset\mathbb{H}[[z]]$ of formal power series $g$ such that the matrix $T_n(g)$ is contractive for all $n\ge 0$. In the latter power series setting, Theorem \ref{T:1.3} can be formulated as follows: {\em if the function $f: \, \mathbb B\to \mathbb H$ is such that $3\times 3$ Pick matrices $P_f(z_1,z_2,z_3)$ are positive semidefinite for all $(z_1,z_2,z_3)\in\mathbb B^3$, then there is (a unique) $g\in\mathcal {QS}$ such that $f(\alpha)=g^{\boldsymbol{e_\ell}}(\alpha)$ for all $\alpha\in\mathbb B$.} The ``right" version of this theorem is based on dual Pick matrices $$ \widetilde{P}_f(z_1,\ldots,z_n)= \left[\sum_{k=0}^\infty \overline{z}_i^k(1-\overline{f(z_i)}f(z_j))z_j^k\right]_{i,j=1}^n. $$ \begin{theorem} Let $f: \, \mathbb B\to \mathbb H$ be given and let us assume that $3\times 3$ dual Pick matrices $\widetilde{P}_f(z_1,z_2,z_3)$ are positive semidefinite for all $(z_1,z_2,z_3)\in\mathbb B^3$. Then there is (a unique) $g\in\mathcal {QS}$ such that $f(\alpha)=g^{\boldsymbol{e_r}}(\alpha)$ for all $\alpha\in\mathbb B$. \label{T:3.2} \end{theorem} The proof is immediate: by Theorem \ref{T:1.3}, there is an $h\in\mathcal {QS}$ such that $\overline{f(\overline{\alpha})}=h^{\boldsymbol{e_\ell}}(\alpha)$ for all $\alpha\in\mathbb B$. Therefore, $f(\alpha)=\overline{h^{\boldsymbol{e_\ell}}(\overline{\alpha})}$ and it remains to choose $g=h^\sharp$ which belongs to $\mathcal {QS}$ by Theorem \ref{T:3.1}. \smallskip In the proof of Theorem \ref{T:1.3}, we actually showed that for any $g\in\mathcal {QS}$, there exist (unique) Schur-class functions $s,h:\D\to\overline{\D}$ so that \begin{equation} g(\zeta)=s(\zeta)+h(\zeta){\bf j}\quad \mbox{for all}\quad \zeta\in\D, \label{3.4} \end{equation} and the latter equality determines $g$ uniquely in the whole $\mathbb B$. The last question we address here is how to characterize the pairs $(s,h)$ of complex Schur functions producing via formula \eqref{3.4} a quaternionic Schur-class power series. \begin{theorem} Let $s$ and $h$ be Schur-class functions. The function $g$ be given by \eqref{3.4} belongs to $\mathcal {QS}$ if and only if the following matrix is positive semidefinite \begin{equation} \begin{bmatrix}{\bf I}_{n}-T_n(s)T_n(s)^*-T_n(h)T_n(h)^* & T_n(s)T_n(h)^\top-T_n(h) T_n(s)^\top\\ \\ \overline{T_n(h)}T_n(s)^*-\overline{T_n(s)}T_n(h)^*& {\bf I}_{n}-\overline{T_n(s)}T_n(s)^\top-\overline{T_n(h)}T_n(h)^\top \end{bmatrix}\ge 0 \label{3.5} \end{equation} for all $n\ge 0$ where ${\bf I}_{n}$ stands for the $n\times n$ identity matrix and $T_n$ is defined via formula \eqref{3.3}. \label{T:3.3} \end{theorem} {\bf Proof:} By Theorem \ref{T:3.1}, $g$ belongs to $\mathcal {QS}$ if and only if ${\bf I}_{n}-T_n(g)T_n(g)^*$ is positive semidefinite for all $n\ge 1$. It follows from \eqref{3.4} that \begin{align*} T_n(g)T_n(g)^*&=(T_n(s)+T_n(h){\bf j})(T_n(s)^*-{\bf j}T_n(h)^*)\\ &=T_n(s)T_n(s)^*+T_n(h){\bf j}T_n(s)^*-T_n(s){\bf j}T_n(h)^*+T_n(h)T_n(h)^*\\ &=T_n(s)T_n(s)^*+T_n(h)T_n(h)^*\\ &\quad +(T_n(h)T_n(s)^\top-T_n(s)T_n(h)^\top){\bf j}. \end{align*} Therefore, ${\bf I}_{n}-T_n(g)T_n(g)^*=A_1+A_2{\bf j}$ where $$ A_1={\bf I}_{n}-T_n(s)T_n(s)^*-T_n(h)T_n(h)^*, \quad A_2=T_n(s)T_n(h)^\top-T_n(h)T_n(s)^\top $$ and the statement follows immediately, by Remark \ref{R:2.2}.\qed \smallskip Note that positive semidefiniteness of diagonal blocks in \eqref{3.5} is equivalent to the inequality $|s(\zeta)|^2+|s(\zeta)|^2\le 1$ holding for all $\zeta\in\D$ which is necessary (since $|g(\zeta)|^2=|s(\zeta)|^2+|s(\zeta)|^2$) for $g$ to be in $\mathcal {QS}$ but not sufficient. \bibliographystyle{amsplain}
1,116,691,501,397
arxiv
\section{Introduction} Helium atom and helium-like ions are the simplest many-body systems containing two electrons which interact among themselves in addition to their interaction with the nucleus. The two-electron systems are therefore the ideal candidates for studying the electron correlation effects. The non-relativistic Hamiltonian of a two-electron system with a nuclear charge $Z$ is given by \begin{equation} \mathrm{H} = \frac{1}{2}\, \left[p_1^2 + p_2^2\right] - Z\, \left[\frac{1}{r_1} + \frac{1}{r_2} \right] + \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} \end{equation} where the first term correspond to the sum of the kinetic energy of each of the two electrons, the second term to the sum of the interactions between each of the electrons and the nucleus, and the last term to the electron correlation interaction between the two electrons. The second and the last term form the potential energy function of a bound two-electron system. If the Hamiltonian is used to solve the time-independent Schr\"{o}dinger equation \begin{equation} \mathrm{H} \Phi_n(\mathbf{r}_1, \mathbf{r}_2) = E_n \Phi_n(\mathbf{r}_1, \mathbf{r}_2) \end{equation} for any eigenstate $\Phi_n(\mathbf{r}_1, \mathbf{r}_2)$ of the system, the eigenenergies $E_n$ for the particular state are obtained. The major problem in many-body systems is the correlation term, coupled with the fact that the wavefunction of the system is never exactly known, which complicates the reduction of the Schr\"odinger equation of the many-body system to a single-particle equation. This makes the solution to the eigenvalue problem difficult. One has to therefore rely on some approximation methods in trying solve such a problem in order to obtain the correct eigenenergies and eigenvectors which may be useful for further estimation of many physical parameters like transition matrices, expectation values, polarizabilities and many others. Difficult theoretical approaches have been used in the past in dealing with the electron correlation problem. Some of these approaches include the variational Hyleraas method \cite{Hylleraas1929, Drake1999}, coupled channels method \cite{Barna2003}, the configuration interaction method \cite{Hasbani2000}, explicitly correlated basis and complex scaling method \cite{Scrinzi1998}. At present only the Hylleraas method, which includes the interelectronic distance as an additional free co-ordinate, yields the known absolute accuracy of the groundstate energy of the helium atom \cite{Pekeris1959}. Configuration interaction methods have also been proved to be accurate but they are quite expensive computationally. To overcome this computational challenge especially for really large systems, single active electron (SAE) methods become advantageous but they also require some approximations in developing the model potentials \cite{Parker2000, Parker1998} which can further be used to generate the eigenvectors and energies. The development of the SAE models has become an active field of study taking different approximations \cite{Chang1976} like the independent particle approximation (IPA), multi-configurational Hartree-Fock (HF) \cite{Szabo1996}, density functional theory (DFT) \cite{Kohn1965}, random phase approximation (RPA) \cite{Linderberg1980}, and many others . The major limitation of SAE approximations is the inability to explain multiple electron features like double excitation, simultaneous excitation and ionization, double ionization but progress is being made towards the realization of these features. In this paper, an alternative multipole expansion is proposed. Based on this expansion, new modified spherical Bessel type functions are generated. In addition, we suggest an analytic expression \begin{equation} \begin{split} \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} &= {\frac{1}{\sqrt{r_1^2 + r_2^2}}\,\exp{\left\lbrace \frac{\mathbf{r}_1\cdot\mathbf{r}_2}{r_1^2 + r_2^2} \right\rbrace}} \label{eq:ct0} \end{split} \end{equation} to describe the electron-electron interaction term. \section{The Alternative Multipole Expansion} The correlation term can be written as \begin{equation} \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} = \frac{1}{r_>}\, (1 -2tx + t^2)^{-1/2} \label{eq:ct1} \end{equation} where ${t= \frac{r_<}{r_>}}$, $r_<(_>)$ corresponds to the lesser (greater) electronic radial distance between the two electrons. In Legendre polynomials, equation (\ref{eq:ct1}) is conventionally expressed as \cite{amo:BetheandSalpeter1957} % \begin{equation} \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} = \sum_{l=0}^{\infty} \frac{r_<^l}{r_>^{l+1}} P_l(\cos \theta) \label{eq:ct2} \end{equation} where $P_l(\cos \theta)$ are the Legendre Polynomials of order $l$, and $\theta$ is the relative azimuthal angle between the electron position vectors. In the alternative framework, the correlated term \begin{equation} \begin{split} (1 -2tx + t^2)^n &= \sum_{s=0}^{\infty} \left(\begin{matrix} n \\ s \end{matrix} \right)\, (y_0(t))^{n-s} (y_1(t))^{s}\,x^s\\ &= (y_0(t))^{n}\,\sum_{s=0}^{\infty} \left(\begin{matrix} n \\ s \end{matrix} \right)\, \left(\frac{y_1(t)}{y_0(x)}\right)^{s}\,x^s \label{eq:ct3} \end{split} \end{equation} in equation (\ref{eq:ct1}) is expressed in a binomial expansion, similar to Gegenbauer polynomial \cite{cp:Gregory2011, Abramowitz1965} with ${n=-1/2}$, and the functions ${y_0(t)= 1 + t^2}$ and ${y_1(t)= -2\, t}$ defined. Ideally, this is the point of departure with equation (\ref{eq:ct2}) where the expansion of the correlated term is done as a summation of functions of $t^s$ with ${s\geq 0}$ as the summation index. The next step involves re-writing the expansion \begin{equation} \begin{split} (1 -2tx + t^2)^n = (y_0(t))^{n}\,& \sum_{l=0}^{\infty}\,\sum_{s=0}^{\infty}\, \beta_s^{(\hat{l}/2)} \left(\begin{matrix} n \\ 2s + l \end{matrix} \right) \\ &\times \left(\frac{y_1(t)}{y_0(x)}\right)^{2s+l}\,P_l(x) \label{eq:ct4} \end{split} \end{equation} % with $x^s$ as a function of the Legendre polynomials whose symmetry relations are of practical significance in the simplification of integrals using spherical co-ordinates. The coefficients $\beta_s^{(\hat{l}/2)}$ have an intrinsic connection between the index $s$ of $x^s$ and the Legendre polynomials $P_l(x)$, and ${\hat{l} = l}$ for even $l$ and ${\hat{l} = l-1}$ for odd $l$. The exact recursive pattern for the coefficients $\beta_s^{\hat{l}/2}$ is subject to further investigation. Below, we present the pattern % \begin{equation} \begin{split} \beta_s^{(0)} &= \frac{(2l+1)}{(2l+2s+1)}\\ \beta_s^{(1)} &= \frac{(2l+1)\,2^1\,(s+1)}{(2l+2s+1)\,(2l+2s-1)}\\ \beta_s^{(2)} &= \frac{(2l+1)\,2^2(s+1)\,(s+2)}{(2l+2s+1)(2l+2s-1)(2l+2s-3)}\\ \vdots\\ \beta_s^{(k)} &= \frac{(2l+1)\,2^k(s+k)!\,(2l+2s-(2k+3))!!}{(2l+2s+1)!!\,s!} \label{eq:ct5} \end{split} \end{equation} corresponding to ${l \leq 5}$ but generalized for all $l$ values. Substituting ${n=-1/2}$ and the variables $y_0(t)$ and $y_1(t)$ into equation (\ref{eq:ct4}) and simplifying leads to \begin{equation} \begin{split} \frac{1}{\sqrt{1 -2tx + t^2}} = & \frac{1}{\sqrt{1+t^2}}\,\sum_{l=0}^{\infty}\,\sum_{s=0}^{\infty}\, \beta_s^{(\hat{l}/2)} \frac{(2l + 4s+1)!!}{(2s+l)!}\\ & \times \left(\frac{t}{1+t^2}\right)^{2s+l}\,P_l(x). \label{eq:ct6} \end{split} \end{equation} % The correlation interaction in equation (\ref{eq:ct1}) can be expressed as a multipole summation series % \begin{equation} \begin{split} \frac{1}{r_1\, \sqrt{1 -2tx + t^2}} = \frac{4\,\pi}{r_1\,\sqrt{1+t^2}}\sum_{l,m=0}^{\infty}\,\tilde{j}_l(t)\, Y_l^{m*}(\hat{r}_1)Y_l^{m}(\hat{r}_2). \label{eq:ct7} \end{split} \end{equation} % where $Y_l^{m}$ are the spherical harmonics and % \begin{equation} \begin{split} \tilde{j}_l(t) = \frac{1}{2l+1}\sum_{s=0}^{\infty}\, \beta_s^{(\hat{l}/2)} \frac{(2l + 4s+1)!!}{(2s+l)!}\, \left(\frac{t}{1+t^2}\right)^{2s+l}\label{eq:ct8} \end{split} \end{equation} % are the corresponding modified spherical Bessel type functions. If one considers that ${t=\tan \alpha}$, and using the trigonometric relations ${1+\tan^2 \alpha = \sec^2 \alpha}$ and ${\sin 2\alpha =2\sin \alpha \cos \alpha}$, the modified spherical Bessels type functions simplify to \begin{equation} \begin{split} \tilde{j}_l(\alpha) = \frac{1}{2l+1}\sum_{s=0}^{\infty}\, \beta_s^{(\hat{l}/2)} \frac{(2l + 4s+1)!!}{(2s+l)!}\, \left(\frac{1}{2}\sin 2 \alpha\right)^{2s+l}\label{eq:ct9}. \end{split} \end{equation} The properties of the modified spherical Bessel type functions presented here need to be investigated further. Intuitively, we think that they belong to the family of the hyperspherical functions which usually have some recurrence relations. Equation (\ref{eq:ct9}) integrates the two electron co-ordinates as a correlated pair with ${r_1 =h \cos \alpha}$ and ${r_2 =h \sin \alpha}$ where $h$ is the distance between the two interacting electrons, equivalent to the hypotenuse of a right-angled triangle formed by the orthogonal vectors $\mathbf{r}_1$ and $\mathbf{r}_2$. The first four orders of the modified spherical Bessel type functions, each with the first four terms of the expansion are: \begin{equation} \begin{split} \tilde{j}_0(\alpha) &= 1 + \frac{1}{3}\,\frac{5!!}{2!\,2^2}\, \sin^2(2\alpha) + \frac{1}{5}\,\frac{9!!}{4!\,2^4}\, \sin^4(2\alpha) \\& + \frac{1}{7}\,\frac{13!!}{6!\,2^6} \sin^6(2\alpha) + \cdots \\ \tilde{j}_1(\alpha) &= \frac{1}{2}\, \sin(2\alpha) + \frac{1}{5}\,\frac{7!!}{3!\,2^3}\, \sin^3(2\alpha) + \frac{1}{7}\,\frac{11!!}{5!\,2^5}\, \sin^5(2\alpha) \\ & + \frac{1}{9}\,\frac{15!!}{7!\,2^7}\, \sin^7(2\alpha) + \cdots \\ \tilde{j}_2(\alpha) &= \frac{2}{5\times 3}\, \frac{5!!}{2!\,2^2} \sin^2(2\alpha) + \frac{4}{7 \times 5}\, \frac{9!!}{4!\,2^4}\, \sin^4(2\alpha)\\ &+ \frac{6}{9 \times 7}\, \frac{13!!}{6!\,2^6}\, \sin^6(2\alpha) + \frac{8}{11 \times 9}\,\frac{17!!}{8!\,2^8}\, \sin^8(2\alpha) + \cdots \\ \tilde{j}_3(\alpha) &= \frac{2}{7 \times 5}\, \frac{7!!}{3!\,2^3}\,\sin^3(2\alpha) + \frac{4}{9 \times 7}\,\frac{11!!}{5!\,2^5}\, \sin^5(2\alpha)\\ &+ \frac{6}{11 \times 9}\,\frac{15!!}{7!\,2^7}\, \sin^7(2\alpha) + \frac{8}{13 \times 11}\,\frac{19!!}{9!\,2^9}\, \sin^9(2\alpha) + \cdots \label{eq:ct10} \end{split} \end{equation} % If one considers only the first term of each modified spherical Bessel type functions, then the correlation term can be expressed as \begin{equation} \begin{split} \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} &\approx \frac{\cos \alpha}{r_1}\, \sum_{l=0} (2l+1)\,\left(\frac{1}{2}\sin 2 \alpha\right)^{l} P_l(\cos \theta) \\ &\approx \frac{1}{\sqrt{r_1^2 + r_2^2}}\, \sum_{l=0} (2l+1)\, \left(\frac{r_1r_2}{r_1^2 + r_2^2}\right)^{l} P_l(\cos \theta) \label{eq:ct11}. \end{split} \end{equation} Our analytical expression in equation (\ref{eq:ct0}) is obtained from an intuitive consideration of this alternative multipole expansion series. The simplification using trigonometry in equation (\ref{eq:ct9}) implies that the two interacting electrons are mutually orthogonal to each other as expected from the principles of quantum mechanics. This geometry simplifies further the correlation interaction to \begin{equation} \begin{split} \frac{1}{|\mathbf{r}_1-\mathbf{r}_2|} = \frac{1}{\sqrt{r_1^2 + r_2^2}} \label{eq:ct12} \end{split} \end{equation} which needs to be disentangled further. The proposed alternative multipole expansion, or any other method, can be used to approximate this coupled interaction while employing the fact that the vectors are orthogonal to each other in order to simplify the problem. Equation (\ref{eq:ct12}) is exactly similar to the hyperradius definition introduced by Macek \cite{Macek1968} in hyperspherical method. As opposed to the hyperspherical method in which the Hamiltonian of the two-electron system is expressed in terms of the hyperradius and the hyperangles \cite{Macek1968}, in this work we introduce separability of the Hamiltonian leading to an independent particle approximation solution to the Schr\"odinger equation but with the correlation effects fully embedded into the single electron Hamiltonian. \section{Helium-like System Pseudopotential} Using the alternative multipole expansion, we developed the non-relativistic helium-like system pseudopotential \begin{equation} V(r) = -\frac{Z}{r} + \frac{1}{2} V_{\mathrm{scr}}(r,r') \label{eq:pot1} \end{equation} for the independent particle Hamiltonian, where the first term is the interaction between the active electron and the nuclear charge $Z$, and $V_{\mathrm{scr}}$ is the central screening potential resulting from the other electron given by equation (\ref{eq:ct12}). Factor $1/2$ is based on the assumption that the correlation energy is shared equally between the two correlated electrons. This assumption should be accurate if the two electrons have identical quantum states (or identical principal quantum numbers). We have considered the two electrons to be indistinguishable, correlated, and likely to exchange their relative positions. By minimising the potential function in equation (\ref{eq:pot1}) by differentiating the function with respect to any of the radial co-ordinates and equating the derivative to zero yields the relation \begin{equation} \begin{split} \frac{1}{\sqrt{r_1^2 + r_2^2}} = \frac{\sqrt[3]{2Z}}{r_{1}} = \frac{\sqrt[3]{2Z}}{r_{2}} \label{eq:ct13} \end{split} \end{equation} which introduces separability of the correlated term. We have used equation (\ref{eq:ct13}) as the screening potential in equation (\ref{eq:pot1}) to solve the time independent Schr\"odinger equation using an independent particle model \begin{equation} \begin{split} \langle E \rangle = \sum_{i=1,2} & [\langle \phi_{\beta}(\mathbf{r}_i)\mid \mathrm{H}_i \mid \phi_{\beta'}(\mathbf{r}_i) \rangle \\ &+ \langle \phi_{\beta}(\mathbf{r}_i)\mid \mathrm{H}_i \mid \phi_{\beta'}(\mathbf{r}_i) \rangle \delta_{\beta \beta'}] \label{eq:ip1} \end{split} \end{equation} where the two-electron wavefunction has been expanded interms of the Slater-type orbitals and $\beta =\{n,l,m\}$ define the set of quantum numbers corresponding to any particular state. The first term of equation (\ref{eq:ip1}) emanates from the direct integral where no electron exchange is involved while the second term is the exchange integral which is non-vanishing only if $\beta = \beta'$. The interaction Hamiltonian $\mathrm{H}_n$ \begin{equation} \mathrm{H}_n = \frac{1}{2}\,p_i^2 + V_n^{\mathrm{eff}}(\mathbf{r}_i,\mathbf{p}_i, \mathbf{s}_i) \label{eq:ham1} \end{equation} is defined for each independent electron with the index $n\geq 0$ taking integer values. The effective potential $V_n^{\mathrm{eff}}$ is a summation \begin{equation} V_n^{\mathrm{eff}}(\mathbf{r}_i,\mathbf{p}_i, \mathbf{s}_i) = \sum_{i=0}^n V_{i} \label{eq:pot2} \end{equation} of some of the several terms of interaction drawn from equation ($39.14$) of Bethe and Salpeter \cite{amo:BetheandSalpeter1957}. Here we have explicitly mentioned and simplified further only the interactions that have been included in this work. The first being the non-relativistic potential term $V_{0}$ \begin{equation} V_{0}(\mathbf{r}_i) = -\frac{Z}{r_i} + \frac{\sqrt[3]{2Z + \chi_{\mathrm{corr}}(Z)}}{2\,r_i}, \label{eq:pot2a} \end{equation} evaluated using equations (\ref{eq:pot1}) and (\ref{eq:ct13}) and it incorporates the electron correlation term. The spin-spin interaction correction term $V_{1}$ can be simplified as \begin{equation} \begin{split} V_{1}(\mathbf{r}_i)& = \frac{\alpha^2}{2\,r_{ij}^3}\left(\mathbf{s}_i \cdot \mathbf{s}_j - \frac{3(\mathbf{s}_i \cdot \mathbf{r}_{ij})(\mathbf{s}_j \cdot \mathbf{r}_{ij})}{\mathbf{r}_{ij}^2} \right)\\ & = \frac{1}{2c^2}\,\frac{-2\,(\mathbf{s}_i \cdot \mathbf{s}_j)}{r_{ij}^3} \\ & \approx \frac{[2Z + \chi_{\mathrm{corr}}(Z)]}{(2c)^2\,r_i^3} \label{eq:pot2b} \end{split} \end{equation} having used equation (\ref{eq:ct13}) and where $c$ is the reciprocal of the fine structure constant ($\alpha$). Considering the current definition of the electron correlation term, the first term of this spin-spin interaction as defined in equation (39.14) of Bethe and Salpeter \cite{amo:BetheandSalpeter1957} vanishes because of the boundary conditions of the wavefunction and the Dirac delta condition. The approximation in equation (\ref{eq:pot2b}) is based on making a classical argument that $(\mathbf{s}_i \cdot \mathbf{s}_j)$ is equal to $-1/4$ instead of the quantum mechanical prescribed value of $-3/4$ for the singlet states. This is equivalent to considering only a third of singlet spin-spin interaction term value because the spins are assumed to be aligned parallel or antiparallel to one particular direction. The term $V_4$ \begin{equation} \begin{split} V_{2}(\mathbf{r}_i)& =\frac{1}{2} \frac{1}{(2c)^2}\, \nabla \cdot (-\nabla V(\mathbf{r}_i)) \\ & = -\frac{1}{2} \frac{1}{(2c)^2}\,\frac{\partial ^2 V(r_i)}{\partial r_i^2} \\ & = \frac{1}{(2c)^2}\,\left( \frac{Z}{r_i^3} - \frac{\sqrt[3]{2Z + \chi_{\mathrm{corr}}(Z)}}{2\,r_i^3} \right) \end{split} \end{equation} is a characteristic of the Dirac theory with the potential function $V(\mathbf{r}_i)$ already defined in equation (\ref{eq:pot1}). The classical relativistic correction $V_{3}$ \begin{equation} \begin{split} V_{3}(\mathbf{r}_{12}, \mathbf{p}_{i,j}) &= -\frac{1}{2\,c^2}\, \frac{1}{r_{12}} \left(\mathbf{p}_i \cdot \mathbf{p}_j + \frac{\mathbf{r}_{ij}\cdot (\mathbf{r}_{ij} \cdot \mathbf{p}_{i})\mathbf{p}_j }{\mathbf{r}_{ij}} \right)\\ &= -\frac{1}{2c^2}\, \frac{1}{r_{12}}\,[2\,(\mathbf{p}_i \cdot \mathbf{p}_j)]\\ &= -\frac{1}{2c^2}\, \frac{1}{r_{12}}\,[p_i^2 + p_j^2 - P^2]\\ &= -\frac{1}{2c^2}\, \frac{\sqrt[3]{2Z + \chi_{\mathrm{corr}}(Z)}}{r_{i}}\,[p_i^2 + p_j^2 - P^2] \end{split} \end{equation} to the interaction between electrons. Here $\mathbf{P} = \mid \mathbf{p}_i - \mathbf{p}_j \mid $ vanishes if $i=j$. This term reduces to \begin{equation} V_{3}(\mathbf{r}_{i}, \mathbf{p}_{i}) = -\frac{1}{2c^2}\, \frac{\sqrt[3]{2Z + \chi_{\mathrm{corr}}(Z)}}{r_{i}}\,p_i^2 \end{equation} if it is separated for each of the individual electron co-ordinates. The finite mass correction term $V_{4}$ \begin{equation} V_{4}(\mathbf{r}_i) = -\frac{1}{M}\, \mathrm{H}_{\infty} \label{eq:pot2c} \end{equation} has been obtained from reference \cite{amo:Bransden1990} with $\mathrm{H}_{\infty}$ as the Hamiltonian of the system without the finite mass correction, ${1/M}$ is the electron-nucleon mass ratio. The scalar function $\chi_{\mathrm{corr}}$ in $V_{n=0,1,2,3}$ \begin{equation} \chi_{\mathrm{corr}}(Z) = \gamma^Z\,Z\,(Z-2) \label{eq:pot3} \end{equation} is a fitting function optimized to offer the additional correction $V_{5}$ for the ionic systems but vanishes for the helium atom. The adjustable parameter $\gamma=1.0821$ yields good quantitative agreement with experimental results for the groundstate energies of ionic systems investigated. We have used the Hamiltonian as defined in equation (\ref{eq:ham1}) and diagonalized it in a B spline spectral basis set having a box radius of $400$ au, $1200$ B splines of order $k=10$, and a non-linear knot sequence. As already stated, the goal was to test efficiency of the present method proposed in this work. With the analytical expression of the electron correlation term, it was also found desirable to include some corrections to the Schr\"odinger equation for two-electron systems that could be evaluated without further complexities. The inclusion of the correction terms also show the relative importance of the additional interactions as compared to the non-relativistic terms. The non-relativistic eigenvalue for the groundstate energy of helium resulting from this method is in good agreement with the experimental value as shown in table \ref{tab1}. Furthermore, the discrepancy between the experimental ground state potential and the obtained theoretical non-relativistic value is properly accounted for by including some of the correction terms like spin-spin coupling, classical relativistic correction, the characteristic Dirac theory term, and the finite mass correction term. We can therefore consider the theoretical value $-2.9103$ from our calculations to be the correct non-relativistic threshold groundstate energy for helium atom. The very accurate groundstate energy as calculated by the Hylleraas method \cite{Drake1999}, from this hypothesis, includes all the corrections beyond the non-relativistic energy. This explanation may be justified based on the fact that the accurate value obtained using the Hylleraas method is very close to the experimentally obtained values of Bergeson \textit{et. al.}\cite{Bergeson1998} and Eikama \textit{et. al.} \cite{Eikama1997}. The experimental values are expected to incorporate all orders of correction beyond the non-relativistic Hamiltonian to the groundstate energy value, including all QED and finite mass corrections. The method adopted in this work, if ascertained to be valid, can be a great numerical feat emanating from the use of perturbative methods to account for the most significant terms responsible for the groundstate energy of helium atom without using any adjustable parameters. We have also determined the excitation energies of the $2s^2$ and $2p^2$ autoionizing states from this method to be $59.22$ eV and $59.20$ eV respectively against the known experimental values of $57.8$ eV and $62.2$ eV \cite{Rudd1964} respectively. Although the present method seems to be almost exact for the groundstate eigenvalue for helium, the discrepancy between the theoretical and the experimental values of the $2s^2$ and $2p^2$ singlet autoionizing states shows that corrections included may still not be sufficient for accurate description of these states. \begin{table}[!ht] \centering \begin{tabular}{lllllll} \hline State & $H_{0}$ & $H_{1}$ & $H_{2}$ & $H_{3}$ & $H_{4}$ & Exp.t \\ \hline \hline $1s^2$& -2.9103 & -2.8996 &-2.8968 & -2.9040 & -2.9036 & -2.9037 \\ $2s^2$& -0.7276 & -0.7263 & -0.7259 & -0.7268 & -0.7267 & -0.7787 \\ $2p^2$& -0.7276 & -0.7276 &-0.7276 & -0.7276 &-0.7275 & -0.6169 \\ \hline \end{tabular} \caption{Some numerically calculated eigenvalues using the present model potential versus the reference experimental values for helium groundstate \cite{Bergeson1998, Eikama1997} and autoionizing levels \cite{Rudd1964}. The ${H_{0}=\frac{1}{2}\,p_i^2 + V_0}$ represents the theoretical non-relativistic Hamiltonian, ${H_{n=1,2, \cdots}= H_0 + \sum_{i=1}^n V_i}$ is the effective Hamiltonian including correction terms $V_i$ already defined in equations (\ref{eq:pot2a})-(\ref{eq:pot2c}).} \label{tab1} \end{table} We extended the method to other two-electron systems for $1\le Z \le 6$ nuclear charges. Table \ref{tab2} shows the groundstate energies for the two-electron systems corresponding to the present non-relativistic model and the extra corrections outlined. The additional $H_5$ data is obtained if an additional confinement is introduced by the fitting function defined in equation (\ref{eq:pot3}). \begin{table}[!ht] \centering \begin{tabular}{llllllll} \hline $Z$ & $H_{0}$ & $H_{1}$ & $H_{2}$ & $H_{3}$ & $H_{4}$& $H_{5}$ & Exact \\ \hline \hline $1$& -0.2739 & -0.2737 & -0.2736 & -0.2738 & -0.2737 & -0.5285 &-0.528 \\ $2$& -2.9103 & -2.8996 & -2.8968 & -2.9040 & -2.9036 & -2.9036 &-2.9037 \\ $3$& -8.7482 & -8.6756 &-8.6555 & -8.6966 & -8.6959 & -7.3794 & -7.28 \\ $4$& -18.000 & -17.746 &-17.675 & -17.803 & -17.802 & -13.913 &-13.66 \\ $5$& -30.776 & -30.137 & -29.963 & -30.257 & -30.255 & -22.340 & -22.03 \\ $6$& -47.148 & -45.825 &-45.474 &-46.041 & -46.040 & -32.413 & -32.41 \\ \hline \end{tabular} \caption{Similar to table \ref{tab1} but for groundstate eigenvalues of helium-like systems. All the columns, except the additional $H_{5}$ column, take a zero value for the fitting function defined in equation (\ref{eq:pot3}). The exact values have been extracted from ref. \cite{amo:Bransden1990}.} \label{tab2} \end{table} From table \ref{tab2}, one can observe that there is a systematic deviation of the present results from the exact experimental values of the groundstate energies of the ionic systems despite its success with the helium atom. However, if the present model is applied with the additional correction introduced by the fitting function defined in equation (\ref{eq:pot3}), quite a good agreement with the expected results is achieved. This seems to suggest that there is an additional potential present in the ionic species due to the net charge in the system, but absent in the neutral atom. \section{Conclusion} We have developed an alternative multipole expansion of the electron-electron correlation term which suggests that the two interacting electrons are mutually perpendicular to each other. This simplifies the interaction term making the Schr\"odinger equation separable for each of the two-electron co-ordinates. We use this separability to obtain a non-relativistic threshold energy of the helium atom in its groundstate. We also show perturbatively that the experimental ground state energy value includes additional higher order corrections to the calculated non-relativistic energy. The classical relativistic corrections and the spin-spin coupling offer the most dominant corrections to the non-relativistic limit. Furthermore, the present method predicts a systematic deviation of the calculated non-relativistic groundstate energies of the two-electron ions relative to the experimental values despite its success with the helium atom. A slight modification to the derived electron correlation term is intuitively introduced to account for this discrepancy. If the present method is justified, the discrepancy in the ionic helium-like systems suggest that there is an additional interactions, due to the charge surplus in the system, not accounted for by the known corrections to the two-electron problem. Despite the success of the proposed method with the groundstate energy of helium atom, the large deviations for the helium-like ions as well as the autoionizing levels warrant further investigation. One can also see the possibility of improving this method further as a solution to the many-body problem. \section{Acknowledgement} We are grateful to NACOSTI and DAAD for funding this project, and to AG Moderne Optik of Humboldt Universit\"at zu Berlin for providing the computational resources used in this work. \bibliographystyle{apsrev}
1,116,691,501,398
arxiv
\section{Introduction} In this work, we are aimed to study a particular class of higher curvature gravity models which we call Ricci polynomial gravity. Models of higher curvature gravity have been extensively studied for at least two reasons. One reason is to improve the ultra violet behavior in the quest for a consistent quantum theory of gravity. The other one comes from cosmological studies, where the higher curvature corrections play a role for the remedy of the extra unknown source which is often called dark energy. Of all models of higher curvature gravity, the most well studied examples are $f(R)$ gravity \cite{Bergmann,Buchdahl,Starobinsky,Felice,Nojiri} and Lanczos-Lovelock gravity \cite{Lanczos,Lovelock,Charmousis}. Other important models include the so-called critical gravity \cite{Lu1,Deser0}, Conformal or Weyl gravity \cite{Weyl,O'Raifeartaigh,Fradkin} and the various versions of massive gravities \cite{vanDam:1970vg,Zakharov:1970cc,Deser1,Deser2,Bergshoeff,Li}. Despite the fact that many of the models behave well in one aspect or another, many of them contain ghost or propagating massive degrees of freedom. In fact, the ghost free conditions and the removal of massive degrees of freedom impose very strong constraints in model construction for higher curvature gravities. The actions of the class of models which we will study have a very particular structure: the whole Lagrangian density is consisted of a polynomial of order $N$ in Ricci curvature, with the $k$-th order term being $\overset{(k)}{\mathcal{R}}=R^{\mu_1}{}_{\mu_2} R^{\mu_2}{}_{\mu_3}\cdots R^{\mu_k}{}_{\mu_1}$. By properly choosing the coefficients in front of each terms in the Lagrangian density, the models can be made free of ghost as well as scalar degrees of freedom around at least one of the permitted vacua in any spacetime dimension $n>2$ and for any order $N\geq 4$. To our knowledge, this type of higher curvature gravity has never been studied in the literature before. \section{Field equation and vacuum solutions} Before rush into the action of the Ricci polynomial gravity, let us first prepare some notations for later convenience. For any nonnegative integer $k$, we define the tensors $\overset{(k)}{\mathcal{R}}_{\mu\nu}$ on the spacetime manifold $(\mathcal{M},g_{\mu\nu})$, \begin{align*} & \overset{(0)}{\mathcal{R}}_{\mu\nu} = g_{\mu\nu}, \\ & \overset{(1)}{\mathcal{R}}_{\mu\nu} = R_{\mu\nu}, \\ & \overset{(k)}{\mathcal{R}}_{\mu\nu} = \overset{(k-1)}{\mathcal{R}}_{\mu\sigma} R^\sigma{}_\nu, \end{align*} where $R_{\mu\nu}$ is the Ricci tensor. Clearly we have \[ \underbrace{R_\mu{}^\cdot R_\cdot{}^\cdot \cdots R_\cdot{}^\cdot R_\cdot{}_\nu}_k = \underbrace{R_\nu{}_\cdot R^\cdot{}_\cdot \cdots R^\cdot{}_\cdot R^\cdot{}_\mu}_k \] so that $\overset{(k)}{\mathcal{R}}_{\mu\nu}$ is symmetric in $\mu\leftrightarrow \nu$. Naturally we define $\overset{(k)}{\mathcal{R}}$ as the trace of $\overset{(k)}{\mathcal{R}}_{\mu\nu}$: \[ \overset{(k)}{\mathcal{R}} = g^{\mu\nu} \overset{(k)}{\mathcal{R}}_{\mu\nu}. \] Now let us consider the Ricci polynomial gravity of order $N$ in $n$ spacetime dimensions, which may be denoted the RicPoly${}_{(n,N)}$ model for short. The action is given as \begin{align} S = \frac{1}{2\kappa} \int \mathrm{d}^n x \sqrt{|g|} \sum^N_{k=1} \alpha_k \overset{(k)}{\mathcal{R}}. \label{action} \end{align} For $N=1$ or $\{\alpha_k\}=\{\alpha_1,0,\cdots,0\}$ we wish the standard general relativity to be recovered, therefore, $\alpha_1$ is fixed to be unity. The first order variation of the action reads \begin{align} \delta S =& \frac{1}{2\kappa}\int \mathrm{d}^n x \sqrt{|g|} \Big(\sum^N_{k=1} \alpha_k \overset{(k)}{\mathcal{H}}_{\mu\nu}\Big) \delta g^{\mu\nu} + \frac{1}{2\kappa}\int \mathrm{d}^n x \sqrt{|g|} \nabla^\sigma \Big(\sum^N_{k=1} k \alpha_k \overset{(k)}{\mathcal{B}}_\sigma \Big), \label{dS} \end{align} where $\mathcal{B}_\sigma$ is some complicated vector expression which is linear in $\delta g_{\mu\nu}$ and its covariant derivatives, and \begin{align} \overset{(k)}{\mathcal{H}}_{\mu\nu} = k \overset{(k)}{\mathcal{R}}_{\mu\nu} - \frac{1}{2} \overset{(k)}{\mathcal{R}} g_{\mu\nu} + \frac{k}{2} \big(\square \overset{(k-1)}{\mathcal{R}}_{\mu\nu} + g_{\mu\nu} \nabla_\alpha \nabla_\beta \overset{(k-1)}{\mathcal{R}}{}^{\alpha\beta}\big) - k \nabla^\sigma \nabla_{(\mu} \overset{(k-1)}{\mathcal{R}}{}_{\nu)\sigma}, \label{Hform1} \end{align} which are divergence-free for all $k\geq 1$, \begin{align} \nabla^\mu \overset{(k)}{\mathcal{H}}_{\mu\nu} = 0. \label{divfree} \end{align} Assuming that we can add an appropriate boundary counter term to cancel the extra total divergence term in \eqref{dS}, we arrive at the following field equation, \begin{align} H_{\mu\nu} = \sum^N_{k=1} \alpha_k \overset{(k)}{\mathcal{H}}_{\mu\nu} = 0. \label{fe} \end{align} The divergence-free condition \eqref{divfree} ensures that we can supplement the original action \eqref{action} by ordinary minimally coupled matter sources. Now retaining to the sourceless field equation, it is obvious to see that all solutions of the equation $R_{\mu\nu} = \chi g_{\mu\nu}$ are also solutions of \eqref{fe}, with the constant $\chi$ determined by \begin{align} A_{(n,N)}(\chi) = 0, \qquad A_{(n,N)}(x)\equiv\sum^N_{k=1} \alpha_k \big(k-\frac{n}{2}\big) x^k. \label{Aseries} \end{align} For arbitrarily chosen coefficients $\{\alpha_k, k\geq2\}$, nonzero solutions to \eqref{Aseries} are not guaranteed to exist. Nevertheless, $\chi=0$ is always a solution, which means that Ricci flat manifolds (including the Minkowski spacetime) are always vacuum solutions of Ricci polynomial gravity. We will see in the next section that the ghost free condition does not allow all the coefficients $\alpha_k$ to be chosen arbitrarily. On the contrary, they must be subject to a set of algebraic constraints, which, when fulfilled, do allow for nonzero $\chi$ to arise as solution to \eqref{Aseries}. This would, in turn, mean that the RicPoly${}_{(n,N)}$ model may be ghost free and possess multiple vacua with at least some vacuum having nonzero effective cosmological constant. This explains why we did not include a bare cosmological constant term (i.e. the $k=0$ term) in the action \eqref{action}. \section{Linear perturbations} Assume that the field equation admits an Einstein manifold with metric $\bar g_{\mu\nu}$ satisfying \begin{align} R_{\mu\nu}(\bar g)=\chi \bar g_{\mu\nu} \label{bgEin} \end{align} as a vacuum, and let us consider fluctuations around this background metric. The spacetime metric with fluctuation is denoted $g_{\mu\nu} = \bar g_{\mu\nu} + \delta g_{\mu\nu}$, where $\delta g_{\mu\nu}$ is to be considered as ``small'' deviation from the background metric. It is customary to denote $\delta g_{\mu\nu}$ as $h_{\mu\nu}$, and also $h^{\mu\nu}=-\delta g^{\mu\nu} = -\bar g^{\mu\rho}\bar g^{\nu\sigma} h_{\rho\sigma}$, $h=h^\mu{}_\mu$, $A_\mu = \nabla^\nu h_{\mu\nu}$, where $\nabla_\mu$ is the covariant derivative associated with the background metric $\bar g_{\mu\nu}$. Expanding the field equation to linear order in $h_{\mu\nu}$, we get \begin{align} \delta H_{\mu\nu} &= \sum_{k=1}^{N} \alpha_k \Big[ k \delta \overset{(k)}{\mathcal{R}}_{\mu\nu} - \frac{n}{2} \chi^k h_{\mu\nu} - \frac{k}{2} \chi^{k-1} \bar{g}_{\mu\nu} (\nabla_\sigma A^\sigma - \square h - \chi h) \Big] \nonumber \\ &\quad+ \sum^{N-1}_{k=1} \alpha_{k+1} \Big[ \frac{k+1}{2} \big(\Box \delta \overset{(k)}{\mathcal{R}}_{\mu\nu} + \bar g_{\mu\nu} \nabla^\alpha \nabla^\beta \delta \overset{(k)}{\mathcal{R}}_{\alpha\beta}\big) - (k+1) \nabla^\sigma \nabla_{(\mu} \delta \overset{(k)}{\mathcal{R}}{}_{\nu)\sigma}\Big] = 0, \label{lin1} \end{align} where \begin{align} \delta \overset{(k)}{\mathcal{R}}_{\mu\nu} &= - \Big( \frac{1}{2} \sum^{k}_{i=1} \overset{(i-1)}{\mathcal{R}}_\mu{}^\alpha \overset{(k-i)}{\mathcal{R}}_\nu{}^\beta \Delta_L + \sum^{k-1}_{i=1} \overset{(i)}{\mathcal{R}}_\mu{}^\alpha \overset{(k-i)}{\mathcal{R}}_\nu{}^\beta \Big) h_{\alpha\beta}, \label{dR2} \\ \Delta_L h_{\mu\nu} &= \square h_{\mu\nu} + \nabla_\mu \nabla_\nu h -2 \big(\nabla_{(\mu} A_{\nu)} + R^\rho{}_{(\mu}h_{\nu)\rho} - R_\mu{}^{\alpha}{}_{\nu}{}^{\beta} h_{\alpha\beta}\big), \label{dR1} \end{align} and all curvature tensors appearing in \eqref{lin1}-\eqref{dR2} are evaluated on the background geometry. The operator $\Delta_L$ is known as the Lichnerowicz operator. Using \eqref{bgEin}, we can simplify \eqref{dR2} into the form \begin{align} \delta \overset{(k)}{\mathcal{R}}_{\mu\nu} = - \Big(\frac{1}{2} k \chi^{k-1} \Delta_L + (k-1) \chi^k \Big) h_{\mu\nu}. \label{dREin} \end{align} Further, if we take the background spacetime to be a maximally symmetric manifold with \begin{align} R_{\mu\alpha\nu\beta}(\bar g) = \frac{\chi}{n-1} (\bar g_{\mu\nu}\bar g_{\alpha\beta}-\bar g_{\mu\beta}\bar g_{\alpha\nu}), \quad R_{\mu\nu}(\bar g) = \chi \bar g_{\mu\nu}, \quad R(\bar g) = n \chi, \label{maxsym} \end{align} eq.\eqref{dR1} can be simplified into \begin{align} & \Delta_L h_{\mu\nu} = \square h_{\mu\nu} + \nabla_\mu \nabla_\nu h - 2 \nabla_{(\mu} A_{\nu)} - \frac{2n\chi}{n-1} h_{\mu\nu} + \frac{2\chi}{n-1} h \bar g_{\mu\nu}. \label{Lichmax} \end{align} Inserting \eqref{Lichmax} into \eqref{dREin} and then into \eqref{lin1}, we will get the fully expanded linear perturbation equation. We need to proceed differently for background vacua with $\chi=0$ and $\chi\neq 0$. \textbf{(a) Minkowski background} If $\chi = 0$, i.e. Minkowski background, eq.\eqref{lin1} will be significantly simplified, yielding \begin{align} & \alpha_2 \big[\square^2 h_{\mu\nu} + \eta_{\mu\nu} \square (\square h - \partial^\sigma A_\sigma) - \square \partial_\mu \partial_\nu h -2 \square \partial_{(\mu} A_{\nu)} +2 \partial_\mu\partial_\nu \partial^\sigma A_\sigma\big] \nonumber\\ &\qquad +\big[\square h_{\mu\nu} - \eta_{\mu\nu} (\square h - \partial^\sigma A_\sigma) + \partial_\mu \partial_\nu h -2 \partial_{(\mu} A_{\nu)}\big] = 0, \label{le} \end{align} where the second line corresponds to the $\alpha_1(=1)$ terms. The absence of $\alpha_k (k>2)$ in \eqref{le} implies that the terms of order $k>2$ do not affect the perturbations around the Minkowski background, the only complexity comes from the quadratic Ricci tensor term. If, in addition, we let $\alpha_2 = 0$, the linearized field equation \eqref{le} will be the same as the linearized standard Einstein equation, which describes a normal massless spin-2 field and so the fluctuation around the Minkowski vacuum is ghost free. If $\alpha_2 \neq 0$, and for example when $n = 4$ we can decompose $h_{\mu\nu}$ into its irreducible parts by defining \[ \bar{h}_{\mu\nu} = h_{\mu\nu} - \frac{1}{4} h \eta_{\mu\nu} \] and choose the gauge \[ \partial^\nu \bar{h}_{\mu\nu} = A_\mu - \frac{1}{4} \partial_\mu h = 0, \] then the linearized field equation \eqref{le} becomes \begin{align} \square \Big(\square + \frac{1}{\alpha_2}\Big) \bar{h}_{\mu\nu} + (\eta_{\mu\nu} \square - \partial_\mu\partial_\nu) \Big(\square - \frac{1}{2 \alpha_2}\Big) h =0, \end{align} from which we see that either the trace part or the trace free part contains a ghost degree of freedom. The same is true had we chosen any other spacetime dimensions. Combined together, we conclude that $\alpha_2 = 0$ should be the ghost free condition in the case with $\chi = 0 $. \textbf{(b) (A)dS background} If $\chi \neq 0$, it will be convenient to work in the gauge $A_\mu = \nabla_\mu h$. The trace of eq.\eqref{lin1} gives \begin{align} \big[ B_{(n,N)}(\chi) \square + \chi A'_{(n,N)}(\chi) \big] h =0, \label{tr} \end{align} where $A'_{(n,N)}(\chi)$ is the derivative of $A_{(n,N)}(x)$ with respect to $x$ evaluated at $x=\chi$, and \[ B_{(n,N)}(\chi) =\frac{1}{4}\sum^{N-1}_{k=1} \alpha_{k+1}(k+1) \big[n(k-2)+2\big] \chi^k. \] So, as long as $A'_{(n,N)}(\chi) \neq 0$, eq. \eqref{tr} will describe a propagating scalar mode except when \[ B_{(n,N)}(\chi) = 0, \] in which case we learn from eq \eqref{tr} that $h =0$, and thus $A_\mu = \nabla^\nu h_{\mu\nu} = 0$, i.e. $h_{\mu\nu}$ is traceless and transverse, therefore eq. \eqref{lin1} becomes \begin{align} \delta H_{\mu\nu}& =-\Big\{\frac{C_{(n,N)}(\chi)}{4\chi} \Box^2 + \Big[\frac{n-1}{2} - C_{(n,N)}(\chi)\Big] \frac{1}{n-1} \Box \nonumber \\ &\qquad \quad + \Big[\big(1-\frac{n}{2}\big) C_{(n,N)}(\chi) - (n-1) \Big] \frac{\chi}{(n-1)^2} \Big\} h_{\mu\nu}= 0, \label{h1} \end{align} where \[ C_{(n,N)}(\chi) = \sum^{N-1}_{k=1} \alpha_{k+1} k (k+1) \chi^k. \] If $C_{(n,N)}(\chi) = 0$, we get \begin{align} \Big(\Box - \frac{2\chi}{n-1} \Big) h_{\mu\nu} =0, \label{h2} \end{align} which describes a massless spin-2 mode; if $C_{(n,N)}(\chi) \neq 0$, eq. \eqref{h1} can be rearranged in the form \begin{align} \Big(\Box - \frac{2\chi}{n-1} - m_1^2\Big) \Big(\Box - \frac{2\chi}{n-1} - m_2^2\Big) h_{\mu\nu} =0, \label{h3} \end{align} with the two mass parameters $m_{1,2}$ satisfying \[ m_1^2 m_2^2=-\frac{n}{2} \Big(\frac{2 \chi}{n-1}\Big)^2. \] Thus, in the case when $C_{(n,N)}(\chi) \neq 0$, we will have a massive spin-2 field and a spin-2 ghost. To summarize, in the case $\chi \neq 0$ and if we wish to remove the propagating scalar mode altogether, the ghost free condition will read \begin{align} A_{(n,N)}(\chi)= 0, \quad B_{(n,N)}(\chi)=0, \quad C_{(n,N)}(\chi)= 0, \quad A'_{(n,N)}(\chi) \neq 0. \label{GF} \end{align} Looking into the detailed structure of the polynomials $A_{(n,N)}(\chi), B_{(n,N)}(\chi)$ and $C_{(n,N)}(\chi)$, we will see that whenever the first 3 equalities in \eqref{GF} are satisfied, we will have \[ A'_{(n,N)}(\chi)=1-\frac{n}{2} < 0 \qquad(\mbox{for }n>2). \] So the last condition is not really necessary. An equivalent form of the ghost free condition \eqref{GF} can be written as \begin{align} \sum^{N-1}_{k=1} \alpha_{k+1} \chi^k = -\sum^{N-1}_{k=1} \alpha_{k+1} k \chi^k = \sum^{N-1}_{k=1} \alpha_{k+1} k^2 \chi^k = \frac{2}{n}-1. \label{GF2} \end{align} For any $N>1$, this amounts to solve $N-1$ coefficients out of 3 equations. Therefore, for $N<4$, \eqref{GF2} will give rise to a system of over determined algebraic equations for the coefficients $\{\alpha_k, 2\leq k \leq N\}$, and it turns out that such a system of over determined equations is inconsistent. If $N=4$, \eqref{GF2} constitutes a system of determined algebraic equations, whose solution is characterized purely by the value of $n$ and $\chi$. If $N>4$, \eqref{GF2} constitutes a system of undetermined equations, which implies that $N-4$ of the coefficients $\{\alpha_k\}$ can be chosen arbitrarily. Since $\chi$ itself is to be considered as a free parameter in the action, there will be as many as $N-3$ free parameters in the ghost free RicPoly${}_{(n,N)}$ models. It is worth noting that the above ghost free conditions ensure only the removal of ghost fluctuations around the vacuum \eqref{maxsym}. However, since $A_{(n,N)}(x)$ is a polynomial of order $N$ in $x$, it can have up to $N$ roots in principle, and each of the roots gives a vacuum for the RicPoly${}_{(n,N)}$ model. So, the removal of ghost fluctuations around one of the vacua does not imply the removal of ghost fluctuations around other vacua. For $N>4$ models we can take the liberty of deliberately choosing the values of the extra $N-4$ free parameters to make the model ghost free also at some other vacua. For instance, if $N=5$ we can set $\alpha_2=0$ to make the model ghost free around the Minkowski vacuum. In any case, we think of the fact that the RicPoly${}_{(n,N)}$ model can be ghost free around some of the vacua but not around all of them as a feature rather than defect, because, in cosmology, ghost like degrees of freedom are sometimes necessary to facilitate accelerating expansion. Tables \ref{tab1} through \ref{tab3} give the list of coupling coefficients for all the RicPoly${}_{(n,N)}$ models of order $N=4,5,6$ in dimensions $n=3,4,\cdots,10$ which are ghost and scalar free around the maximally symmetric vacuum described by \eqref{maxsym} with $\chi\neq 0$. We did not fix the free parameter $\alpha_2$ in the case $N=5$ and $\alpha_2,\alpha_3$ in the case $N=6$, because there is no obvious reasons to make the model ghost free around which extra vacua besides the maximally symmetric one given by \eqref{maxsym}. \begin{table}[htbp] \begin{center}\begin{tabular}{cccc} \hline \hline $n$ & $\alpha_2 $ & $\alpha_3 $ & $\alpha_4 $ \\ \hline 3 & $-\frac{2}{\chi} $ & $\frac{8}{3\chi^2} $ & $-\frac{1}{\chi^3} $ \\ 4 & $-\frac{3}{\chi} $ & $\frac{4}{\chi^2} $ & $-\frac{3}{2\chi^3} $ \\ 5 & $-\frac{18}{5\chi}$ & $\frac{24}{5\chi^2} $ & $-\frac{9}{5\chi^3} $ \\ 6 & $-\frac{4}{\chi} $ & $\frac{16}{3\chi^2} $ & $-\frac{2}{\chi^3} $ \\ 7 & $-\frac{30}{7\chi}$ & $ \frac{40}{7\chi^2}$ & $-\frac{15}{7\chi^3}$ \\ 8 & $-\frac{9}{2\chi} $ & $ \frac{6}{\chi^2} $ & $-\frac{9}{4\chi^3} $ \\ 9 & $-\frac{14}{3\chi}$ & $ \frac{56}{9\chi^2}$ & $-\frac{7}{3\chi^3} $ \\ 10 & $-\frac{24}{5\chi}$ & $\frac{32}{5\chi^2} $ & $-\frac{12}{5\chi^3}$ \\ \hline \hline \end{tabular} \caption{Coupling coefficients for $N=4$ models}\label{tab1} \end{center} \end{table} \begin{table}[htbp] \begin{center}\begin{tabular}{cccc} \hline \hline $n$ & $\alpha_3 $ & $\alpha_4 $ & $\alpha_5 $ \\ \hline 3 & $-\frac{3\alpha_2}{\chi}-\frac{10}{3\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{5}{\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{2}{\chi^4} $ \\ 4 & $-\frac{3\alpha_2}{\chi}-\frac{5}{\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{15}{2\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{3}{\chi^4} $ \\ 5 & $-\frac{3\alpha_2}{\chi}-\frac{6}{\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{9}{\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{18}{5\chi^4} $ \\ 6 & $-\frac{3\alpha_2}{\chi}-\frac{20}{3\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{10}{\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{4}{\chi^4} $ \\ 7 & $-\frac{3\alpha_2}{\chi}-\frac{50}{7\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{75}{7\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{30}{7\chi^4} $ \\ 8 & $-\frac{3\alpha_2}{\chi}-\frac{15}{2\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{45}{4\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{9}{2\chi^4} $ \\ 9 & $-\frac{3\alpha_2}{\chi}-\frac{70}{9\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{35}{3\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{14}{3\chi^4} $ \\ 10 & $-\frac{3\alpha_2}{\chi}-\frac{8}{\chi^2} $ & $\frac{3\alpha_2}{\chi^2}+\frac{12}{\chi^3} $ & $-\frac{\alpha_2}{\chi^3}-\frac{24}{5\chi^4} $ \\ \hline \hline \end{tabular} \caption{Coupling coefficients for $N=5$ models}\label{tab2} \end{center} \end{table} \begin{table}[htbp] \begin{center}\begin{tabular}{cccc} \hline \hline $n$ & $\alpha_4 $ & $\alpha_5 $ & $\alpha_6 $ \\ \hline 3 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{5}{\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{8}{\chi^4} $ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{10}{3\chi^5}$ \\ 4 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{15}{2\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{12}{\chi^4} $ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{5}{\chi^5} $ \\ 5 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{9}{\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{72}{5\chi^4}$ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{6}{\chi^5} $ \\ 6 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{10}{\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{16}{\chi^4} $ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{20}{3\chi^5}$ \\ 7 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{75}{7\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{120}{7\chi^4}$ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{50}{7\chi^5}$ \\ 8 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{45}{4\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{18}{\chi^4} $ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{15}{2\chi^5}$ \\ 9 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{35}{3\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{56}{3\chi^4}$ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{70}{9\chi^5}$ \\ 10 & $-\frac{6\alpha_2}{\chi^2}-\frac{3\alpha_3}{\chi}-\frac{12}{\chi^3} $ & $\frac{8\alpha_2}{\chi^3}+\frac{3\alpha_3}{\chi^2}+\frac{96}{5\chi^4}$ & $-\frac{3\alpha_2}{\chi^4}-\frac{\alpha_3}{\chi^3}-\frac{8}{\chi^5} $ \\ \hline \hline \end{tabular} \caption{Coupling coefficients for $N=6$ models} \label{tab3} \end{center} \end{table} It is illustrative to consider the energy for the massless tensor perturbation mode \eqref{h2}, which may be evaluated via calculating the second order variation of the action \eqref{action} under the maximally symmetric background \eqref{maxsym}. To be more concrete, since the first variation of the action \eqref{action} reads \[ \delta S = \frac{1}{2 \kappa} \int \mathrm{d}^n x \sqrt{|g|} H_{\mu\nu} \delta g^{\mu\nu} = -\frac{1}{2 \kappa} \int \mathrm{d}^n x \sqrt{|g|} H_{\mu\nu} h^{\mu\nu}, \] the second variation can be written as \[ \delta^2 S = -\frac{1}{2 \kappa} \int \mathrm{d}^n x \sqrt{|g|}\, \Big(h^{\mu\nu} \delta H_{\mu\nu}+\delta h^{\mu\nu} H_{\mu\nu} +\frac{1}{2}H_{\alpha\beta} h^{\alpha\beta}g^{\mu\nu}h_{\mu\nu} \Big), \] where the last two terms can be dropped because $H_{\mu\nu}$ vanishes due to the field equation, and $\delta H_{\mu\nu}$ in the first term is given by \eqref{h1}, if the background metric is chosen to be \eqref{maxsym}. Under our choice of ghost free condition $C_{(n,N)}(\chi) = 0$, we have \[ \delta H_{\mu\nu} = - \frac{1}{2}\Big( \Box - \frac{2 \chi}{n-1} \Big) h_{\mu\nu}, \] and hence, \begin{align*} \delta^2 S &= -\frac{1}{2 \kappa} \int \mathrm{d}^n x \sqrt{|\bar{g}|} (h^{\mu\nu} \delta H_{\mu\nu}) \\ &= -\frac{1}{2 \kappa} \int \mathrm{d}^n x \sqrt{|\bar{g}|} \Big( \frac{1}{2} \nabla^{\sigma} h^{\mu\nu} \nabla_\sigma h_{\mu\nu} + \frac{\chi}{n-1} h^{\mu\nu} h_{\mu\nu} \Big) \equiv \int \mathrm{d}^n x \mathcal{L}_2, \end{align*} where the second equality holds up to a total divergence. The momentum density that is conjugates to $h_{\mu\nu}$ reads \begin{align*} \pi^{\mu\nu} = \frac{\delta \mathcal{L}_2}{\delta(\dot{h}_{\mu\nu})} = - \frac{1}{2 \kappa} \sqrt{|\bar{g}|} \nabla^0 h^{\mu\nu}. \end{align*} Therefore the canonical Hamiltonian is \begin{align*} H &= \int \mathrm{d}^{n-1} x (\pi^{\mu\nu} \dot{h}_{\mu\nu} - \mathcal{L}_2) = - \frac{1}{2\kappa} \int \mathrm{d}^{n-1} \sqrt{|\bar g|} (\nabla^0 h^{\mu\nu} \dot{h}_{\mu\nu} - h^{\mu\nu} \delta H_{\mu\nu})\\ &= - \frac{1}{2\kappa} \int \mathrm{d}^{n-1} \sqrt{|\bar g|} \nabla^0 h^{\mu\nu} \dot{h}_{\mu\nu}, \end{align*} where in the last step we have used the perturbation equation to set $\delta H_{\mu\nu}=0$. The final result for the Hamiltonian is then identified as the energy of the perturbation mode, which is the same as that for standard Einstein gravity and was known to be positive \cite{Lu1}. It remains to consider the case $B_{(n,N)}(\chi)\neq 0$, i.e. when the propagating scalar mode is present. In this case, eq.\eqref{tr} can be rearranged in the form \[ \left(\square+\frac{\chi A'_{(n,N)}(\chi)}{B_{(n,N)}(\chi)}\right)h=0, \] therefore, depending on the value of the expression $\frac{\chi A'_{(n,N)}(\chi)} {B_{(n,N)}(\chi)}$, $h$ may corresponds to either a massive/massless scalar mode, or a scalar ghost mode. Identifying the complete ghost free conditions when the scalar mode is present will be a complicated task and we shall postpone it to future works. \section{RicPoly${}_{(4,4)}$: black hole and cosmological solutions} Among the various choices for $N$ and $n$, the simplest and most promising example case which is ghost free at least around one of the vacua with $\chi\neq 0$ is RicPoly${}_{(4,4)}$. The action for this simplest model reads \begin{align} S = \frac{1}{2\kappa}\int \mathrm{d}^4 x \sqrt{|g|} \Big(R - \frac{3}{\chi}\overset{(2)}{\mathcal{R}} + \frac{4}{\chi^2}\overset{(3)}{\mathcal{R}} -\frac{3}{2\chi^3} \overset{(4)}{\mathcal{R}} \Big). \label{poly44} \end{align} The vacuum structure of this model is governed by the zeros of the polynomial $A_{(4,4)}(x)$, which, under the ghost free condition, reads \[ A_{(4,4)}(x)=-\frac{3x^4}{\chi^3}+\frac{4x^3}{\chi^2}-x. \] In this case, there are 4 real roots \begin{align} x_i\equiv \Lambda_i=0,\chi,\frac{1}{6}(1+\sqrt{13})\chi,\frac{1}{6}(1-\sqrt{13})\chi, \label{cosmo} \end{align} each corresponds to an Einstein manifold $R_{\mu\nu}^{(i)}=\Lambda_i g_{\mu\nu}^{(i)}$ ($i=i,2,3,4$) as a vacuum solution for the model \eqref{poly44}. Note that these are not necessarily the same as the maximally symmetric vacua around which we made linear perturbations in the last section. Instead, they can be generic Einstein manifolds with effective cosmological constants $\Lambda_i$. By the way, let us mention that the existence of multiple vacua has also been observed in \cite{Boulware:1985wk}, which, of course studies a different model of extended gravity. To be more specific, let us consider static spherically symmetric black hole solutions. The usual Schwarzschild-(A)dS black holes fall into this class of solutions: \begin{align} &\mathrm{d}s^2=-f_i(r)\mathrm{d}t^2 +f_i(r)^{-1}\mathrm{d}r^2 +r^2\mathrm{d}\Omega_2^2,\\ &f_i(r)=1-\frac{2M}{r}-\frac{\Lambda_i r^2}{3}. \end{align} This looks all the same like the standard general relativity with a cosmological constant. What makes a big difference is that there are simultaneously 4 such solutions with the same mass parameter $M$ but different $\Lambda_i$ given in \eqref{cosmo}. Since the radii of the event horizons of the black holes are determined by the equations \[ f_i(r)=0, \] it is clear that black holes of the same mass in Ricci polynomial gravity can have different radii. Consequently, when considering the thermodynamic behaviors of these black holes, the temperatures should be different for different solutions. Moreover, since Ricci polynomial gravity belongs to the class of higher curvature gravities, the entropies of the black holes should be evaluated by Wald entropy formula rather than Bekenstein-Hawking formula, and it is naturally expected that they differ from the entropies of the black holes with the same metric in Einstein gravity. The entropies for black holes with different $\Lambda_i$ should also differ from each other even though the mass parameters are identical. These features suggest that there may be rich structures in the thermodynamics - especially concerning phase transitions - for the black hole solutions. Before ending, let us pay a few words on the cosmological implications of the effective cosmological constants. It is evident from \eqref{cosmo} that provided $\chi\neq0$ (which is implied when we write the action \eqref{poly44}), the 4 effective cosmological constants will have different signs. For instance, if $\chi>0$, then two of the effective cosmological constants will be possitive, one is negative and one is zero. Different signatures of $\Lambda_i$ signifies different asymptotics of the solutions, and this can have profound implications when cosmological solutions are being considered. For simplicity, let us present the spatially flat FRW cosmological solutions of the field equations. The FRW metrics are given by \begin{align} &\mathrm{d}s^2=-\mathrm{d}t^2 +a_i(t)(\mathrm{d}x^2+\mathrm{d}y^2+\mathrm{d}z^2),\\ &a_i(t)=1,\exp\Big(\frac{2\sqrt{3\chi}}{3}t\Big), \exp\Big(\frac{\sqrt{2\chi}}{3}\sqrt{1+\sqrt{13}}t\Big), \exp\Big(\frac{\sqrt{2\chi}}{3}\sqrt{1-\sqrt{13}}t\Big). \end{align} Some of these solutions are complex and should be neglected. For instance, let us assume $\chi>0$. Then the last solution is complex, and we are left with two accelerating expanding solutions and one non-expanding solution. It is tempting to consider such a possibility that the universe begins from one accelerating expanding phase, then, after a short period, experience a phase transition to the non-expanding phase for another period of time, and finally make a second transition to the other accelerating phase at late time. This rough picture feels close to the current observational universe, even without considering the contribution of ordinary matter sources. So, if refined with ordinary matter contribution, this may led to a useful model for the evolution of the universe. \section{Concluding Remarks} Our preliminary study has revealed several interesting features of the Ricci polynomial gravities. First of all, the inclusion of terms of higher order in Ricci curvature improves the ultra violet behavior. By power counting, such models may be super renormalizable. Secondly, unlike other higher curvature gravity models which often develop ghost degrees of freedom, the Ricci polynomial gravity models can be made free of ghost as well as massive degrees of freedom at least around one of their maximally symmetric vacua, and provided the order $N$ is high enough, the ghost free conditions are not too restrictive so that there can be as many as $N-3$ free parameters in the action. The existence of multiple Einstein vacua with different effective cosmological constants is the third feature which may have profound implications in black hole physics as well as in cosmological contexts. We hope to make deeper understandings about these models in future works. \section*{Notes Added} Recently, long after the first version of the present work appeared on arXiv, Y.~Z.~Li, H.~S.~Liu and H.~L\"u published the paper \cite{Li:2017ncu} on arXiv, which also studies some Ricci curvature-extended gravity models. Their models are quite different from our's, because they also included Ricci scalar factors in the Lagrangian. Even though, it is interesting to observe that the so-called quasi-topological conditions in \cite{Li:2017ncu} is actually a synonym to our ghost free condition. In this sense, our models under the ghost free conditions actually belong to a simpler class of quasi-topological Ricci polynomial gravity models as compared to those in \cite{Li:2017ncu}. \section*{Acknowledgement} This work is supported by the National Natural Science Foundation of China under the grant No. 11575088. \providecommand{\href}[2]{#2}\begingrou \footnotesize\itemsep=0pt \providecommand{\eprint}[2][]{\href{http://arxiv.org/abs/#2}{arXiv:#2}}
1,116,691,501,399
arxiv
\section{Introduction} The relationship between string theory and perturbative field theories has been thoroughly investigated for many years. The study of non-perturbative effects in string theory and their comparison with field theory is instead much more recent. In particular, only after the introduction of D branes it has been possible to significantly improve our knowledge and address some non-perturbative issues using string theory. For instance, it has been shown~ \cite{Witten:1995im,douglas,Green:2000ke} that the instanton sectors of ${\mathcal N}=4$ supersymmetric Yang-Mills (SYM) gauge theory can be described in string theory by systems of D3 and D(--1) branes (or D-instantons) in flat space. In fact, the excitations of the open strings stretching between two D(--1) branes or between a D3 brane and a D-instanton, are in one-to-one correspondence with the moduli of the SYM instantons in the so-called ADHM construction (for comprehensive reviews on the subject see, for instance, Ref. ~\cite{Dorey:2002ik}). The above remarks have been further substantiated~ \cite{Billo:2002hm} by showing that the tree-level string scattering amplitudes on disks with mixed boundary conditions for a D3/D(--1) system lead, in the infinite tension limit $\alpha^\prime\to 0$, to the effective action on the instanton moduli space of the SYM theory. Furthermore, it has been proved that the same disk diagrams also yield the classical field profile of the instanton solution, and that these mixed disks effectively act as sources for the various components of the gauge supermultiplet. This approach can be easily adapted to describe SYM theories with $\mathcal{N}=2$ and $\mathcal{N}=1$ supersymmetry and their instantons: instead of considering D3/D(--1) systems in flat space, one has simply to place the branes at suitable orbifold singularities. This description of gauge theories by means of systems of D-branes has proven to be very useful also to account for the deformations induced by non-trivial gravitational backgrounds. For instance, non-commutative gauge theories and their instanton sectors can be efficiently described by considering D3/D(--1) brane systems in a Kalb-Ramond background~ \cite{Billo:2005fg} . Similarly, by placing the branes in a particular R-R background, non-anti-commutative gauge theories can be described and the corresponding non-anti-commutative instanton solutions can be determined~ \cite{Billo:2004zq} . More recently, the string description of instantons has lead to new developments. In fact it has been shown in several different contexts that these stringy instantons may dynamically generate new types of terms in the low-energy effective action of the SYM theory with very interesting phenomenological implications ~\cite{recent} . In this contribution, which is mainly based on Ref.~ \cite{Billo:2006jm} , we will concentrate on ${\mathcal N}=2$ SYM theory in four dimensions and study the non-perturbative low energy effective action induced by instantons when a R-R graviphoton background is switched on. We show how to determine the non-perturbative gravitational F-terms in the ${\mathcal N}=2$ prepotential exploiting localization methods~ \cite{Nekrasov:2002qd,Losev:2003py} . The instanton sectors of ${\mathcal N}=2$ SYM theory with gauge group $\mathrm{SU}(N)$ can be described by a system of $N$ fractional D3 branes and $k$ fractional D-instantons in the orbifold $\mathbb{R}^{4}\times \mathbb{C}\times\mathbb{C}^2/\mathbb{Z}_2$ where the orbifold group $\mathbb{Z}_2$ acts as the inversion of the last two complex coordinates. The gauge theory degrees of freedom are described by the massless excitations of the open strings stretching between two D3 branes, and can be assembled into a ${\cal N}=2$ chiral superfield in the adjoint of $\mathrm{SU}(N)$ \begin{equation} \label{Pm1} \Phi(x,\theta) = {\phi(x)} + \theta{\Lambda(x)} +\frac 12\,\theta\sigma^{\mu\nu} \theta \,{F_{\mu\nu}^+(x)} + \cdots \end{equation} String amplitudes for the string vertices corresponding to the SYM fields of \eq{Pm1} on {disks} attached to the {D3 branes} give rise, in the limit {$\alpha'\to 0$} with {gauge quantities fixed}, to the tree level (microscopic) {$\mathcal{N}=2$} action for $\mathrm{SU}(N)$ SYM. We are interested in studying the low energy effective action on the {Coulomb branch} parameterized by the {v.e.v.'s} ${\langle \Phi_{uv} \rangle} = {a_u}\,\delta_{uv}$ of the adjoint chiral superfields breaking $\mbox{SU}(N) \to \mbox{U}(1)^{N-1}$. {F}rom now on we will focus for simplicity on the $\mbox{SU}(2)$ theory broken to $\mathrm{U}(1)$, and thus we will deal with a single chiral superfield $\Phi$ and a single v.e.v. $a$. Up to two-derivatives, $\mathcal{N}=2$ supersymmetry constrains the effective action for {$\Phi$} to be of the form \begin{equation} \label{Seff} S_{\mbox{\tiny eff}}[{\Phi}] = \int d^4x \,d^4\theta\, {\mathrm{F}}({\Phi}) + \mathrm{c.c}~, \end{equation} where the prepotential $\mathrm{F}(\Phi)$ is a holomorphic homogeneous function of degree two. The main purpose of our discussion is to show how the instanton corrections to the prepotential arise in our string set-up, also in presence of a non-trivial supergravity background. In particular we will show that the low energy excitations of the D(--1)/D(--1) and D3/D(--1) open strings in the $\mathbb{Z}_2$ orbifold exactly account for the ADHM instanton moduli of the $\mathcal{N}=2$ theory and that the integration measure on the moduli space is recovered from disk amplitudes. Moreover, we show that the $\mathcal{N}=2$ super-instanton solution for the fields appearing in \eq{Pm1} is obtained by computing one-point open string amplitudes on mixed disks. The fact that the instanton center and its super-partners decouple from the D3/D(--1) and D(--1)/D(--1) interaction show that these moduli actually play the role of the $\mathcal{N}=2$ superspace coordinates; therefore the instanton induced prepotential $\mathrm{F}(\Phi)$ may be identified with the ``centered partition function'' of D-instantons coupled to $\Phi$~ \cite{Fucito:1996ua,Hollowood:2002ds} . This identification opens the way to several generalizations. In particular, one may extend the above procedure to include also non-perturbative gravitational terms in the effective action by computing the instanton partition function in a non-trivial $\mathcal{N}=2$ supergravity background. In our context, this amounts to compute the D(--1)/D(--1) and D3/D(--1) open string interactions in presence of a closed string background, and obtain the deformed integration measure on the instanton moduli space from mixed disk diagrams involving both open and closed string vertices. In particular we will consider a $\mathcal{N}=2$ graviphoton background with self-dual field strength $\mathcal{F}^+$ and determine the instanton induced prepotential $\mathrm{F}(\Phi, W)$, where $W$ is the Weyl superfield whose first component is related to the graviphoton field strength $\mathcal{F}^+$. In this way we may obtain from mixed disk amplitudes the gravitational F-terms of the form $(R^+)^2\,({\cal F}^+)^{2h-2}$, where $R^+$ is the self-dual Riemann curvature tensor, which have been previously computed~ \cite{Antoniadis:1993ze} from topological string amplitudes at genus $h$. Therefore, the application of localization methods to the instanton calculus shows an interesting relation~ \cite{Nekrasov:2002qd,Losev:2003py} with the topological string which is worth further investigation. \section{The D3/D(--1) system} As we mentioned above, the $k$ instanton sector of a four-dimensional SYM theory with gauge group $\mathrm{SU}(N)$ can be described by a bound state of $N$ D$3$ and $k$ D(--1) branes~ \cite{Witten:1995im} of fractional type in the space ${\mathbb{R}^4} \times \mathbb{M}^6$. The amount of supersymmetry possessed by the SYM theory depends on the features of the internal six-dimensional space $\mathbb{M}^6$. If we want to describe ${\cal N}=2$ gauge theories, we can choose $\mathbb{M}^6$ to be the orbifold $\mathbb{C} \times{\mathbb{C}^2/\mathbb{Z}_2}$. In the D3/D(--1) system the string coordinates $X^{\cal M}$ and $\psi^{\cal M}$ (${\cal M}=1,\ldots,10$) obey different boundary conditions on the two types of branes. Specifically, on the D(--1) branes we have Dirichlet boundary conditions in all directions, while on the D3 branes the longitudinal fields $X^\mu$ and $\psi^\mu$ ($\mu=1,2,3,4$) satisfy Neumann boundary conditions, and the transverse fields $X^a$ and $\psi^a$ ($a=5,\ldots,10$) obey Dirichlet boundary conditions. The presence of the space-filling D3 branes and the orbifold projection break the 10-dimensional Euclidean invariance group ${\mbox{SO}(10)}$ into $\mbox{SO}(4)\!\times\! \mbox{SO}(2)\!\times\! \mbox{SO}(4)_{\rm int}$ and therefore the 10-dimensional (anti-chiral) spin fields $S^{\dot{\mathcal{A}}}$ decompose as follows \begin{equation} \label{sdec} S^{\dot{\mathcal{A}}}\! \to\! (S_{\alpha} S^{-} S_A, S_{\alpha} S^{+} S^{\dot A}, S^{\dot\alpha} S^{-} S^{\dot A}, S^{\dot\alpha} S^{+} S_A)~. \end{equation} where $S_\alpha$ ($S^{\dot\alpha}$) are $\mathrm{SO}(4)$ Weyl spinors of positive (negative) chirality, $S_A$ ($S^{\dot A}$) are $\mathrm{SO}(4)_{\rm int}$ Weyl spinors of positive (negative) chirality and $S^\pm$ are $\mathrm{SO}(2)$ spin fields with weight $\pm \frac 12$. The chiral spin fields $S_A$ are even under the orbifold generator, while the anti-chiral ones $S^{\dot A}$ are odd. Since we consider a single type of fractional D3-branes, the Chan-Paton factors for the open string excitations are invariant under the orbifold. The orbifold projection therefore reduces the massless spectrum to the bosonic states which arise by acting on the vacuum with the even fields $\Psi= (\psi_5+{\rm i}\psi_6)/\sqrt{2}$ and $\Psi^{\dagger}= (\psi_5-{\rm i}\psi_6)/\sqrt{2}$ and to the fermionic states associated with the invariant spin fields $S_{\alpha} S^{-} S_A$ or $S^{\dot\alpha} S^{+} S_A$. For our present purposes it is enough to specify the spectrum of excitations of the open strings with at least one end-point on the D-instantons, which, as explained in Ref.~ \cite{Billo:2002hm} , describe the ADHM instanton moduli. Let us first consider the strings that have both ends on the D($-1$) branes and therefore describe the neutral moduli: in the NS sector the physical excitations are $a_\mu$, $\chi$ and $\chi^{\dagger}$, whose corresponding vertex operators are \begin{equation} \label{vertA} V_a(z)={a^\mu}\,\psi_{\mu}(z) \,{\rm e}^{-\phi(z)}~,~~ V_\chi(z)={\chi^{\dagger}}\,\Psi(z)\,{\rm e}^{-\phi(z)}~,~~ V_{\chi^{\dagger}}(z)={\chi}\,\Psi^{\dagger}(z)\,{\rm e}^{-\phi(z)} \end{equation} where $\phi$ is the chiral boson of the superghost bosonization. In the R sector we find eight fermionic moduli which are conventionally denoted by $M^{\alpha A}$ and $\lambda_{\dot\alpha A}$, and correspond to the following vertex operators \begin{equation} \label{vertM'} V_{M}(z)\,=\, M^{\alpha A}\, S_{\alpha}(z)S^-(z) S_A(z)\,{\rm e}^{-\frac{1}{2}\phi(z)}~, ~~ V_{\lambda}(z)\,=\, {{\lambda_{\dot\alpha A}}}\,S^{\dot\alpha}(z) S^+(z)S^A(z) \,{\rm e}^{-\frac{1}{2}\phi(z)} ~. \end{equation} Let us now consider the open strings that start on a D3 and end on a D($-1$) brane, or vice-versa and describe the charged moduli. These strings are characterized by the fact that the four directions along the D3 branes have mixed boundary conditions. Thus, in the NS sector the four fields $\psi^\mu$ have integer-mode expansions with zero-modes that represent the $\mathrm{SO}(4)$ Clifford algebra and the corresponding physical excitations are organized in two bosonic Weyl spinors of $\mathrm{SO}(4)$. These are denoted by $w_{\dot\alpha}$ and $\bar w_{\dot\alpha}$ respectively, and are described by the following vertex operators \begin{equation} \label{vertexw} V_w(z) \,=\,{w}_{\dot\alpha}\, \Delta(z)\, S^{\dot\alpha}(z) \,{\rm e}^{-\phi(z)}~,~~ V_{\bar w}(z) \,=\,{\bar w}_{\dot\alpha}\, \bar\Delta(z)\, S^{\dot\alpha}(z)\, {\rm e}^{-\phi(z)}~. \end{equation} Here $\Delta(z)$ and $\bar\Delta(z)$ are the bosonic twist and anti-twist fields with conformal dimension $1/4$, that change the boundary conditions of the $X^\mu$ coordinates from Neumann to Dirichlet and vice-versa by introducing a cut in the world-sheet. In the R sector the fields $\psi^\mu$ have, instead, half-integer mode expansions so that there are fermionic zero-modes only in the common transverse directions. Thus, the physical excitations of this sector form two fermionic Weyl spinors of ${\rm SO}(4)_{\rm int}$ which are denoted by $\mu^A$ and $\bar \mu^A$ respectively, and correspond to the following vertex operators \begin{equation} \label{vertexmu} V_\mu(z) \,=\,{\mu}^{A}\, \Delta(z)\,S^-(z)S_{A}(z)\, {\rm e}^{-\frac{1}{2}\phi(z)}~~,~~ V_{\bar\mu}(z) \,=\,{{\bar \mu}}^{A}\, \bar\Delta(z)\,S^-(z)S_{A}(z)\, {\rm e}^{-\frac{1}{2}\phi(z)}~. \end{equation} A systematic analysis~ \cite{Billo:2002hm} shows that, in the limit $\alpha'\to 0$, the scattering amplitudes involving the above vertex operators give rise to the following ``action'' \begin{eqnarray} {S_{\rm mod}} = & {\mbox{tr}_k}&\Big\{ -2\,[\chi^{\dagger},a'_\mu][\chi,{a'}^\mu] + \chi^{\dagger}{\bar w}_{\dot\alpha} w^{\dot\alpha}\chi + \chi{\bar w}_{\dot\alpha} w^{\dot\alpha} \chi^{\dagger} \nonumber\\ &&\!\!\!\!\!\!\!\!\!\!-{\mbox{i}}\, \frac{\sqrt 2}{4}\,M^{\alpha A}\epsilon_{AB}[\chi^{\dagger},M_{\alpha}^{B}] + {\mbox{i}}\, \frac{\sqrt 2}{2}\,{\bar \mu}^A \epsilon_{AB} \mu^B\chi^{\dagger} \\ &&\!\!\!\!\!\!\!\!\!\!-{\mbox{i}} D_c \big({w_{\dot\alpha}(\tau^c)^{\dot\alpha}_{\dot\beta} \bar{w}_{\dot\beta} +{\mbox{i}} \bar\eta_{\mu\nu}^c \big[{a'}^\mu,{a'}^\nu\big]}\big) + {\mbox{i}} {\lambda}^{\dot\alpha}_{\,A}\big( {\bar{\mu}^A{w}_{\dot\alpha}+ \bar{w}_{\dot\alpha}{\mu}^A + \big[a'_{\alpha\dot\alpha},{M'}^{\alpha A}\big]\big)}\! \Big\}~. \nonumber \label{smod} \end{eqnarray} where ${\rm tr}_k$ means trace over ${\mathrm U}(k)$, $\bar\eta^c$ ($c=1,2,3$) are the anti-self dual 't Hooft symbols, and $\tau^c$ are the Pauli matrices. In (\ref{smod}) there appear also three auxiliary fields $D_c$ whose string representation is provided by the following vertex operators~ \cite{Billo:2002hm} \begin{equation} \label{vertaux} V_D(z) \,=\, \frac{1}{2} D_c\,\bar\eta_{\mu\nu}^c\,\psi^\nu(z) \psi^\mu(z)~. \end{equation} As is well known~ \cite{Dorey:2002ik} , by simply taking ${\rm e}^{-S_{\rm mod}}$ one obtains the measure on the instanton moduli space, while by varying $S_{\rm mod}$ with respect to $D_c$ and ${\lambda}^{\dot\alpha}_{~A}$ one finds the bosonic and fermionic ADHM constraints. Notice that $S_{\rm mod}$ does not depend on the instanton center $x_0^{\mu}$ nor on its super-partners $\theta^{\alpha A}$, which are, respectively, the $U(1)$ components of $a^{\mu}$ and $M^{\alpha A}$, namely \begin{equation} \begin{aligned} {a'}^\mu &= x_0^\mu\,\mbox{1\!\negmedspace1}_{[k]\times[k]} + y^\mu_c\,T^c~~, \\ {M}^{\alpha A}&=\theta^{\alpha A}\,\mbox{1\!\negmedspace1}_{[k]\times[k]} + {\zeta}^{\alpha A}_c\,T^c~. \end{aligned} \label{xtheta} \end{equation} The moduli $x_0^{\mu}$ and $\theta^{\alpha A}$ actually play the role of the $\mathcal{N}=2$ superspace coordinates, while the remaining moduli, collectively denoted by ${\widehat{\cal M}_{(k)}}$ are the so-called ``centered moduli''. Using this decomposition, the $k$-instanton partition function can then be written as \begin{equation} Z_{(k)}= \int d^4x_0\, d^4\theta \,{\widehat Z_{(k)}} \label{z} \end{equation} where \begin{equation} {\widehat Z_{(k)}}= \int d{\widehat{\cal M}_{(k)}}\, {\rm e}^{-\frac{8\pi^2k}{g^2}-S_{\rm mod}({\widehat{\cal M}_{(k)}})} \label{zk} \end{equation} is the centered partition function. It is worth pointing out that among the ``centered moduli'' $\widehat{\cal M}_{(k)}$ there is the singlet part of the anti-chiral fermions $\lambda_{\dot\alpha A}$ which is associated to the supersymmetries that are preserved both by the D3 and by the D(--1) branes \footnote{This is to be contrasted with the $\theta^{\alpha A}$ defined in (\ref{xtheta}), which are associated to the supersymmetries preserved by the D3 but broken by the D(--1) branes.}. Thus, despite the suggestive notation of Eq. (\ref{z}), one may naively think that the full D-instanton partition function cannot yield an F-term in the effective action, {\it i.e.} an integral on half superspace, due to the presence of the anti-chiral $\lambda_{\dot\alpha A}$'s among the integration variables. Actually, this is not true since the $\lambda_{\dot\alpha A}$'s, including its singlet part, do couple to other instanton moduli (see the last terms in Eq. (\ref{smod})) and their integration correctly enforces the fermionic ADHM constraints on the moduli space. Therefore, the instanton partition function (\ref{z}) does indeed yield non-perturbative F-terms. Things are very different instead for the exotic instantons that have been recently considered in the literature~ \cite{recent} . In this case, due to the different structure of the charged moduli, the $\lambda_{\dot\alpha A}$'s do not couple to anything and in order to get a non-vanishing result, they have to be removed from the spectrum, for example with an orientifold projection. \section{The instanton solution from mixed disks} The construction of the ADHM instanton moduli and of their integration measure in terms of open strings given in the previous section clearly shows that gauge theory instantons are described by systems of D3/D($-1)$ branes. There is however an even more convincing evidence in favor of this identification, namely the fact that the mixed disks of the D3/D($-1)$ brane system are the source for the instanton background of the super Yang-Mills theory, and that the classical instanton profile can be obtained from open string amplitudes. For simplicity we will discuss only the case of instanton number $k=1$ in a $\mathrm{SU}(2)$ gauge theory, but no substantial changes occur in our analysis if one considers higher values of $k$ and other gauge groups (see Ref.~ \cite{Billo:2002hm} for these extensions). Let us then consider the emission of the $\mathrm{SU}(2)$ gauge vector field $A_\mu^c$ from a mixed disk. The simplest diagram which can contribute to this process contains two boundary changing operators $V_{\bar w}$ and $V_{w}$ and no other moduli, and is shown in Fig. \ref{fig:md2}. \begin{figure}[t] \begin{center} \psfrag{mu}{ $A_\mu^c$} \psfrag{I}{$ $} \psfrag{p}{\small $p$} \psfrag{w}{\small $\bar w$} \psfrag{wb}{\small $w$} \includegraphics[width=0.31\textwidth]{fig1.eps} \end{center} \caption{The mixed disk that describes the emission of a gauge vector field $A_\mu^c$ with momentum $p$ represented by the outgoing wavy line.}\label{fig:md2} \end{figure} \noindent The amplitude associated to this diagram is \begin{equation} \label{dia1} \left\langle {\cal V}_{A^c_\mu(-p)}\right\rangle_{\rm mixed~disk} \,\equiv\, \Big\langle\hskip -5pt\Big\langle V_{\bar w}\,{\cal V}_{A^c_\mu(-p)}\,V_{w} \Big\rangle\hskip -5pt\Big\rangle \,=\, {\cal A}^c_\mu(p ;{\bar w, w}) \end{equation} where ${\cal V}_{A^c_\mu(-p)}$ is the gluon vertex operator with {outgoing} momentum $p$ and {without} polarization, namely \begin{equation} \label{vert2} {\cal V}_{A^c_\mu(-p)} = 2{\mbox{i}}\, T^c\left( \partial X_\mu \,-\, {\mbox{i}} \,p\cdot \psi\, \psi_\mu\right)\, \mathrm{e}^{-{\mbox{i}} p\cdot X(z)} \end{equation} where $T^c$ is the adjoint $\mathrm{SU}(2)$ generator. Note that that the amplitude (\ref{dia1}) carries the structure and the quantum numbers of an emitted gauge vector field of $\mathrm{SU}(2)$. The evaluation of the amplitude (\ref{dia1}) is quite straightforward and the result is~ \cite{Billo:2002hm} \begin{equation} \label{ampl} {\cal A}^c_\mu(p ;{\bar w, w}) = {\rm i}\rho^2\, p^\nu\,\bar\eta^c_{\nu\mu} \,{\rm e}^{-{\rm i} p\cdot x_0} \end{equation} where we have defined $\bar w^{\dot\alpha} w_{\dot\alpha}=2\rho^2$. By taking the Fourier transform of (\ref{ampl}), after inserting a free propagator, we obtain \begin{equation} {\cal A}_\mu^c(x) \,=\,\int\frac{d^4p}{(2\pi)^2}\,\frac{1}{p^2}\,{\cal A}^c_\mu(p ;{\bar w, w}) \,{\rm e}^{{\rm i} p\cdot x} \,=\, \frac{2\rho^2\,\bar\eta^c_{\mu\nu}\,(x-x_0)^\nu}{(x-x_0)^4}~~. \label{solution} \end{equation} Eq. (\ref{solution}) is the leading term in the large distance expansion ({\it i.e.} $|x-x_0|\gg\rho$) of the $\mathrm{SU}(2)$ instanton with center $x_0$ and size $\rho$ in the \emph{singular gauge}, namely \begin{equation} {\cal A}_\mu^c(x) = \frac{2\rho^2 \,\bar\eta^c_{\mu\nu}(x - x_0)^\nu}{ (x - x_0)^2 \Big[(x-x_0)^2 + \rho^2\Big]}\simeq \frac{2\rho^2 \,\bar\eta^c_{\mu\nu}\,(x - x_0)^\nu}{ (x - x_0)^4}\left(1 - {\frac{\rho^2}{(x-x_0)^2}} + \ldots\right)~. \label{solution1} \end{equation} This result explicitly shows that mixed disk diagrams, like that of Fig. \ref{fig:md2}, are the source for the classical gauge instanton. Note that the amplitude (\ref{dia1}) is a 3-point function from the point of view of the two dimensional conformal field theory on the string world sheet, but is a 1-point function from the point of view of the four-dimensional gauge theory on the D3 branes. In fact, the two boundary changing operators $V_{\bar w}$ and $V_{w}$ that appear in (\ref{dia1}) just describe non-dynamical parameters on which the background depends. Furthermore, the fact that the gluon field (\ref{solution}) is in the singular gauge is not surprising, because in our set-up the gauge instanton is produced by a D$(-1)$ brane which is a point-like object inside the D3 brane world-volume. Thus it is natural that the gauge connection exhibits a singularity at the location $x_0$ of the D-instanton. An obvious question at this point is whether also the subleading terms in the large distance expansion (\ref{solution1}) have a direct interpretation in string theory. Since such terms contain higher powers of $\rho^2\sim \bar w^{\dot\alpha} w_{\dot\alpha}$, one expects that they are associated to mixed disks with more insertions of boundary changing operators. This expectation has been explicitly confirmed in Ref.~\cite{Billo:2002hm} , so that one can conclude that mixed disks with the emission of a gauge vector field do indeed reproduce the complete $k=1$ instanton solution. \section{Deformed $\mathcal{N}=2$ instanton calculus} \label{sec:n2} In this section we analyze the instanton moduli space of $\mathcal{N}=2$ gauge theories in a non-trivial supergravity background. In particular we turn on a (self-dual) field strength for the graviphoton of the $\mathcal{N}=2$ supergravity multiplet and see how it modifies the instanton moduli action. This graviphoton background breaks Lorentz invariance in space-time (leaving the metric flat) but it allows to explicitly perform instanton calculations and establish a direct correspondence with the localization techniques that have been recently discussed in the literature~ \cite{Nekrasov:2002qd,Flume:2002az,Losev:2003py} . In order to systematically incorporate the gravitational background in the instanton action, let us first discuss how to include the interactions among the instanton moduli and the gauge fields. Then, let us consider all correlators involving {D3/D3} fields, and in particular the scalar ${\phi}$ in presence of {$k$ D-instantons}. It turns out~ \cite{Green:1997tv,Green:2000ke,Billo:2002hm} that the dominant contribution to the $n$-point function $\langle{\phi_1}\ldots {\phi_n}\rangle$ is from {$n$ one-point} amplitudes on disks with moduli insertions. The result can therefore be encoded in extra moduli-dependent vertices for {$\phi$'s}, {\it i.e.} in {extra terms} in the moduli action containing such {one-point} functions \begin{equation} \mathcal{S}_{\rm mod}({\phi};{\mathcal{M}_{(k)}}) = {\phi}({x}) J_\phi({\widehat{\mathcal{M}}_{(k)}}) + \mathcal{S}_{\rm mod}({\widehat{\mathcal{M}}_{(k)}})~, \label{smod1} \end{equation} where {$x$} is the instanton center (previously denoted by $x_0$) and ${\phi}({x}) J_\phi({\widehat{\mathcal{M}}_{(k)}})$ is given by the disk diagrams with boundary (partly) on the D(--1)'s describing the emission of a $\phi$. To determine the complete action $\mathcal{S}_{\mbox{\tiny mod}}({\phi};{\mathcal{M}})$ we have to systematically compute all mixed disks with a scalar {$\phi$} emitted from the D3 boundary. Other non-zero diagrams involving the instanton moduli and the super-partners of $\phi$ can be obtained using the Ward identities of the supersymmetries that are broken by the D(--1) branes. Therefore, the complete superfield-dependent moduli action $\mathcal{S}_{\rm mod}({\Phi};{\mathcal{M}_{(k)}})$ can be obtained from (\ref{smod1}) by simply letting ${\phi(x)} \rightarrow {\Phi}({x},\theta)$, with $\Phi(x,\theta)$ defined in \eq{Pm1}. Let us now extend this argument to the supergravity background we want to include. The field content of {$\mathcal{N}=2$ supergravity}, namely the metric $h_{\mu\nu}$, the gravitini $\psi_\mu^{\alpha A}$ and the graviphoton $C_\mu$ can be organized in a {chiral Weyl multiplet} \begin{equation} {W^+_{\mu\nu}(x,\theta)}= {\mathcal{F}_{\mu\nu}^+(x)} + \theta {\chi_{\mu\nu}^+(x)}+\frac{1}{2} \,\theta\sigma^{\lambda\rho} \theta\,{R^+_{\mu\nu\lambda\rho}(x)} +\cdots \label{weyl} \end{equation} where the self-dual tensor ${\mathcal{F}_{\mu\nu}^+(x)}$ can be identified on-shell with the graviphoton field strength, $R^+_{\mu\nu\lambda\rho}(x)$ is the self-dual Riemann curvature tensor and $\chi_{\mu\nu}$ is the gravitino field strength. All these fields belong to the massless sector of {type IIB strings} on ${\mathbb{R}^{4}}\times \mathbb{C}\times {\mathbb{C}^2/\mathbb{Z}_2}$. In particular, the graviphoton vertex is given by% \footnote{A {different} R-R field, with a {similar} structure, will be useful: \begin{equation*} {V_{\bar{\mathcal{F}}}}(z,\bar z) \! = \! \frac{1}{4\pi} {\bar{\mathcal{F}}^{\alpha\beta \hat A\hat B}}(p) \Big[S_\alpha(z)S^+(z)S^{\dot A}(z)\mathrm{e}^{-\frac{1}{2}\varphi(z)} {S}_\beta(\bar z)S^+(z){S}^{\dot B}(\bar z)\mathrm{e}^{-\frac{1}{2}{\varphi}(\bar z)}\Big]\mathrm{e}^{{\mbox{i}} p\cdot X(z,\bar z)}~. \label{Vbarf} \end{equation*} } \begin{equation} {V_{\mathcal{F}}}(z,\bar z) \!=\! \frac{1}{4\pi}{\mathcal{F}^{\alpha\beta AB}}(p) \Big[S_\alpha(z)S^-(z)S_A(z)\mathrm{e}^{-\frac{1}{2}\varphi(z)} {S}_\beta(\bar z)S^-(z){S}_B(\bar z)\mathrm{e}^{-\frac{1}{2}{\varphi}(\bar z)}\Big]\mathrm{e}^{{\mbox{i}} p\cdot X(z,\bar z)} \end{equation} where the left-right movers identification on disks has already been taken into account. The bi-spinor graviphoton polarization is given by \begin{equation} {\mathcal{F}^{(\alpha\beta) [AB]}} = \frac{\sqrt 2}{4}\, {\mathcal{F}_{\mu\nu}^+}\big(\sigma^{\mu\nu})^{\alpha\beta}\,\epsilon^{AB} \end{equation} and corresponds to a R-R 3-form $\mathcal{F}_{\mu\nu z}$ with one index in the $\mathbb{C}$ internal direction. To determine the contribution of the graviphoton to the field-dependent moduli action we have to consider disk amplitudes with open string moduli vertices on the boundary and closed string graviphoton vertices in the interior, which survive in the field theory limit $\alpha'\to 0$. \begin{figure}[t] \begin{center} \psfrag{f}{\small $\bar{\mathcal{F}}^+$} \psfrag{mm}{\small $M$} \psfrag{m}{\small $M$} \includegraphics[width=0.18\textwidth]{fig3.eps} \end{center} \caption{Mixed disk describing the coupling among the instanton moduli and a closed string field.}\label{fig:3} \end{figure} It turns out that very few diagrams (like the one represented in Fig. \ref{fig:3}) contribute in this limit. They can be easily evaluated and for instance for the diagram in Fig. \ref{fig:3} one finds \begin{equation} \Big\langle\hskip -5pt\Big\langle {V_{M} V_{M}}{V_{\bar{\mathcal F}}} \Big\rangle\hskip -5pt\Big\rangle =\frac{1}{4\sqrt 2}\mbox{tr}_k\Big\{{M^{\alpha A}M^{\beta B}} {\bar{\mathcal{F}}^+_{\mu\nu}} \Big\}(\sigma^{\mu\nu})_{\alpha\beta}\epsilon_{AB}~. \label{mmbare} \end{equation} Other diagrams, connected by the broken supersymmetries, have the effect of promoting the dependence of the moduli action to the full Weyl multiplet, {\it i.e.} ${\mathcal{F}^+_{\mu\nu}} \to {W^+_{\mu\nu}}(x,\theta)$. In this way the superfield-dependent moduli action $\mathcal{S}_{\rm mod}({\Phi},{W^+};{\widehat{\mathcal{M}}_{(k)}})$ is obtained from the previous results. Integrating over the moduli, one gets the effective action, and hence the prepotential at instanton number $k$, namely \begin{equation} \label{effacW} \begin{aligned} S_{\rm eff}^{(k)}[{\Phi},{W^+}] &=\int d^4x \, d^4\theta\,\, d{\widehat{\mathcal{M}}_{(k)}}\, \mathrm{e}^{-\frac{8\pi k}{g^2} - \mathcal{S}_{\rm mod}({\Phi},{W^+};{\widehat{\mathcal{M}}_{(k)}})} \\ &=\int d^4x \, d^4\theta\,\,\, \mathrm{F}^{(k)}({\Phi},{W^+}) ~. \end{aligned} \end{equation} Since {$\Phi(x,\theta)$} and {$W_{\mu\nu}^+(x,\theta)$} are constant with respect to the integration variables ${\widehat{\mathcal{M}}_{(k)}}$, we can actually compute $\mathrm{F}^{(k)}$ by reducing them to a constant value, {\it i.e.} ${\Phi}(x,\theta) \to {a}$ and ${W^+_{\mu\nu}}(x,\theta) \to {f_{\mu\nu}}$. In this case the prepotential becomes just a function of the scalar and graviphoton v.e.v.'s and is determined by a ``deformed'' moduli action depending on $a, {\bar a}, f, {\bar f}$: \begin{eqnarray} \label{defmodac} &&\hskip -0.2cm \mathcal{S_{\rm mod}}({a,\bar a};{f,\bar f};{\widehat{\mathcal{M}}_{(k)}}) = - \mbox{tr}_k\Big\{ \big([\chi^{\dagger},a'_{\alpha\dot\beta}]+2{\bar f_c} (\tau^c a')_{\alpha\dot\beta}\big) \big([\chi,{a'}^{\dot\beta\alpha}]+2{f_{c}}(a'\tau^c)^{\dot\beta \alpha}\big) \nonumber \\ &&\hskip -0.2cm -\big(\chi^{\dagger}{\bar w}_{\dot\alpha}-{\bar w}_{\dot\alpha}\,{\bar a}\big) \big( w^{\dot\alpha}\chi- {a}\,w^{\dot\alpha}\big) -\big(\chi{\bar w}_{\dot\alpha} -{\bar w}_{\dot\alpha}\,{a}\big) \big(w^{\dot\alpha}\chi^{\dagger} -{\bar a}\,w^{\dot\alpha}\big) \Big\} \\ &&\hskip -0.2cm +{\mbox{i}}\, \frac{\sqrt 2}{2}\, \mbox{tr}_k\Big\{{\bar \mu}^A \epsilon_{AB} \big( \mu^B\chi^{\dagger} -{\bar a}\,\mu^B\big) -\frac{1}{2}\,M^{\alpha A}\epsilon_{AB}\big([\chi^{\dagger},M_{\alpha}^{B}] +2\,{\bar f_c}\, (\tau^c)_{\alpha\beta}M^{\beta B}\big)\Big\} \nonumber \\ &&\mbox{tr}_k\Big\{-{\mbox{i}} D_c \big({w_{\dot\alpha}(\tau^c)^{\dot\alpha}_{\dot\beta} \bar{w}_{\dot\beta} +{\mbox{i}} \bar\eta_{\mu\nu}^c \big[{a'}^\mu,{a'}^\nu\big]}\big) + {\mbox{i}} {\lambda}^{\dot\alpha}_{\,A}\big( {\bar{\mu}^A{w}_{\dot\alpha}+ \bar{w}_{\dot\alpha}{\mu}^A + \big[a'_{\alpha\dot\alpha},{M'}^{\alpha A}\big]\big)}\Big\}~. \nonumber \end{eqnarray} Notice that the ADHM constraints, appearing in the last line of (\ref{defmodac}), are not modified by the graviphoton background. In the action (\ref{defmodac}) the v.e.v.'s ${a},{f}$ and ${\bar a},{\bar f}$ are not on the same footing. Indeed, one can write \begin{equation} \mathcal{S}_{\rm mod}({a,\bar a};{f,\bar f};{\widehat{\mathcal{M}}_{(k)}}) = {Q}\,\Xi \end{equation} where {$Q$} is the scalar component of the twisted supercharges, {\it i.e.} \begin{equation} \label{Qscalar} {Q} \equiv \frac{1}{2}\,\epsilon_{\dot\alpha\dot\beta}\,Q^{\dot\alpha\dot\beta}~, \end{equation} where the topological twist acts as $Q^{\dot\alpha B}\stackrel{\mbox{\tiny top. twist}}{\longrightarrow} Q^{\dot\alpha\dot\beta}$. It turns out that the parameters $\bar a$, $\bar f$ appear in $\mathcal{S}_{\rm mod}$ only through the gauge fermion $\Xi$, and thus the instanton partition function and the prepotential $\mathrm{F}^{(k)}$ in \eq{effacW} are in fact independent of $\bar a$,$\bar f$, because their variation with respect to these parameters is {$Q$-exact}. {F}rom the explicit expression of $\mathcal{S_{\rm mod}}({a,0};{f,0})$ the general form of the prepotential $\mathrm{F}^{(k)}({a};{f})$ can be easily deduced; reinstating the superfields it reads \begin{equation} \label{Fkpw} \mathrm{F}^{(k)}({\Phi}, {W^+}) = \sum_{h=0}^\infty {c_{k,h}} \, {\Phi^2} \left(\frac{\Lambda}{{\Phi}}\right)^{4k}\!\left(\frac{{W^+}} {{\Phi}}\right)^{2h}~. \end{equation} Summing over the instanton sectors we obtain the full non-perturbative prepotential \begin{equation} \label{Fnp} \mathrm{F}_{\mbox{\tiny n.p.}}({\Phi},{W^+}) = \sum_{k=1}^\infty \mathrm{F}^{(k)}({\Phi},{W^+}) = \sum_{{h}=0}^\infty C_{{h}}(\Lambda,{\Phi}) {(W^+)^{2h}}~, \end{equation} where \begin{equation} \label{Chis} C_{{h}}(\Lambda,{\Phi}) = \sum_{k=1}^\infty {c_{k,h}} \,\frac{\Lambda^{4k}}{{\Phi}^{4k+2{h}-2}}~. \end{equation} This gives rise to many different terms in the effective action, which is obtained, see \eq{effacW}, by integrating the prepotential over $d^4x\, d^4\theta$. In particular, saturating the $\theta$ integration with four $\theta$'s all from ${W^+}$ we get gravitational F-terms in the $\mathcal{N}=2$ effective action involving the curvature tensor and graviphoton field strength \begin{equation} \label{R2W} \int d^4x \,\,C_{{h}}(\Lambda,{\phi})\,({R^+})^2 ({\mathcal{F}^{+}})^{2h-2}~. \end{equation} The stringy instanton calculus accounts thus for such F-terms, and it gives a way to compute them, because the coefficients $C_{k,h}$ can be explicitly determined by performing the integrals over the instanton moduli space. This is a formidable task, that was finally performed in Refs.~ \cite{Nekrasov:2002qd,Flume:2002az,Losev:2003py} , using a suitable ``deformation'' of the moduli action which localizes the integrals. This localization deformation exactly coincides with \eq{defmodac} if we set \begin{equation} \label{floc} {f_c}=\frac{{\varepsilon}}{2}\,\delta_{3c}~,~~ {\bar f_c} = \frac{{\bar\varepsilon}}{2}\,\delta_{3c}~,~~ \end{equation} (and moreover ${\varepsilon}={\bar \varepsilon}$). As we remarked above, $Z^{(k)}({a},{\varepsilon})$ does not depend on {$\bar\varepsilon$}. However, ${\bar\varepsilon}=0$ is a limiting case and some care is needed. In fact, while $\mathrm{F}^{(k)}({a};{\varepsilon})$ is well-defined, the complete partition function $Z^{(k)}({a};{\varepsilon})$ diverges because of the (super)volume integral $\int d^4x \,d^4\theta$. The presence of {$\bar\varepsilon$} regularizes the superspace integration by a Gaussian term, leading to the following effective rule: \begin{equation} \label{epsirule} \int d^4x\, d^4\theta \to 1/\varepsilon^2~; \end{equation} one can then work with the \emph{full} instanton partition function. Moreover, the $a$ and $\varepsilon,\bar\varepsilon$ deformations localize completely the integration over moduli space which can then be evaluated explicitly~ \cite{Nekrasov:2002qd,Flume:2002az,Losev:2003py} . With $\bar\varepsilon\not=0$, {\it i.e.} with complete localization, the trivial superposition of several instantons of charges $k_i$ contributes to the sector $k= \sum k_i$; such disconnected configurations do \emph{not} contribute instead when $\bar\varepsilon=0$. The partition function computed by localization thus corresponds in this case to the exponential of the non-perturbative prepotential, namely \begin{equation} \label{ZvsF} \begin{aligned} Z({a};{\varepsilon}) &= \sum_{k=1}^\infty Z^{(k)}({a};{\varepsilon}) = \exp\left(\frac{\mathcal{F}_{\mbox{\tiny n.p.}}({a}, {\varepsilon})}{{\varepsilon^2}}\right) = \exp\left(\sum_{k=1}^\infty \frac{\mathcal{F}^{(k)}({a}, {\varepsilon})}{{\varepsilon^2}}\right) \\ & = \exp\left(\sum_{h=0}^\infty \sum_{k=1}^\infty {c_{k,h}} \frac{{\varepsilon^{2h-2}}}{{a^{2h-2}}} \left(\frac{\Lambda}{{a}}\right)^{4k} \right)~. \end{aligned} \end{equation} In conclusion, the computation via localization techniques of the multi-instanton partition function $Z({a};{\varepsilon})$ determines the coefficients ${c_{k,h}}$ which appear in the gravitational F-terms of the $\mathcal{N}=2$ effective action \eq{R2W} via the expression of $C_{{h}}(\Lambda,{\phi})$ given in \eq{Fnp}. The very same gravitational F-terms can been extracted in a completely different way by considering topological string amplitudes at genus $h$ on suitable Calabi-Yau manifolds~ \cite{Bershadsky:1993cx,Antoniadis:1993ze} . In our computation the role of the genus $h$ Riemann surface is played by a (degenerate) surface with the same Euler number made by $2h$ disconnected disks, instead of $h$ handles. The two different roads to determine the $F$-couplings of \eq{R2W} must lead to the same result. This is a very natural way to state the conjecture by N. Nekrasov~ \cite{Nekrasov:2002qd} that the coefficients arising in the $\varepsilon$-expansion of multi-instanton partition functions match those appearing in higher genus topological string amplitudes on Calabi-Yau manifolds. \section*{Acknowledgements} We would like to thank F. Fucito and I. Pesando for many useful discussions. We thank the Galileo Galilei Institute for hospitality and support. This work is partially supported by the European Commission FP6 Programme MRTN-CT-2004-005104, in which A.L. is associated to University of Torino, and by Italian MUR under contract PRIN-2005023102.
1,116,691,501,400
arxiv
\section{Introduction} \label{sec:intro} Understanding the formation and dynamics of complex spatial structures or patterns has been of continuing interest due to the fundamental importance of predicting and controlling system properties and material functions. However, a comprehensive understanding is hindered by the fact that the processes involved are usually nonlinear, nonequilibrium, can span a variety of length and time scales, and are highly influenced by the complex coupling with materials growth and processing conditions. Typical examples include the growth of strained solid films and the formation of nanostructures such as quantum dots or nanowires, which involve the interplay among microscopic crystalline structure, mesoscopic or nanoscale surface pattern, topological defects (e.g., dislocations), as well as various growth parameters such as temperature, misfit strain, growth rate, and film thickness \cite{re:politi00,re:stangl04,re:huang08}. The system dynamics and evolution are further complicated in alloy samples, due to the additional coupling to spatial/temporal variation of alloy composition particularly in the case of phase separation \cite{re:gunton83,re:guyer95}. To address these complex phenomena a variety of theoretical modeling and simulation methods have been developed, which can be roughly characterized via the level of description that they focus on. At the microscopic level capturing crystalline details, atomistic modeling techniques such as Monte Carlo (MC) or molecular dynamics (MD) have been widely adopted. For example, nanostructure (e.g., islands/pits) formation during strained film epitaxy has been studied via the kinetic MC method incorporating elastic interaction and strain energy \cite{re:nandipati06,re:lung05,re:baskaran10}, while detailed structure and dynamics of crystal defects like grain boundaries and dislocations have been simulated by MD \cite{re:wolf05,re:derlet09}. However, the limitation of small length and time scales addressed in these atomistic methods leads to large computational demands and hence the restriction of system size and evolution time range that can be accessed. Such limitation can be overcome via continuum modeling methods, including continuum elasticity theory used in strained film growth \cite{re:spencer91,re:guyer95,re:huang02a,re:huang02b,re:huang03a,% re:huang03b,re:tu07,re:desai10} and the well-known phase field models that have been applied to a wide range of areas such as crystal growth, nucleation, phase separation, solidification, defect dynamics, etc. \cite{re:elder94,re:elder01,re:kassner01,re:echebarria04,re:granasy06}. These continuum approaches feature coarse-grained, long-wavelength scales and diffusive time dynamics, but are not formulated for the short-wavelength scales associated with microscopic crystalline details. To incorporate the advantages of these approaches and hence be able to simultaneously model crystalline details on length and time scales of experimental relevance, the phase field crystal (PFC) \cite{re:elder02,re:elder04,re:elder07} model and the related amplitude representation \cite{re:goldenfeld05,re:athreya06,re:athreya07,re:chan10,re:yeon10} were developed recently. The PFC model incorporates the small length scales of the crystalline structure with diffusive time scales by describing the dynamics of the atomic number density field $\rho$, a continuum field variable that is spatially periodic at atomic length scales in crystalline state \cite{re:elder02,re:elder04}. To alleviate the limitation imposed by the necessity of describing small length scales, an amplitude representation was developed to describe slowly varying envelope or amplitude functions while maintaining the basic features of crystalline states, particularly elasticity, plasticity and multiple crystal orientations. Both the original PFC and corresponding amplitude representation have be extended to binary alloys \cite{re:elder04b,re:elder07,re:elder10}. In the binary case the amplitude representation describes the amplitude and phase of the density field \cite{re:goldenfeld05,re:athreya06,re:athreya07,re:yeon10} and also the concentration profile \cite{re:elder10}, which is assumed to vary on ``slow'' scales compared to atomic lattice spacing. A wide range of phenomena has been studied via this PFC method for both pure and binary material systems, including solidification \cite{re:elder07,re:elder10,re:teeffelen09}, grain nucleation and growth \cite{re:goldenfeld05,re:backofen07,re:yeon10,re:tegze09}, phase segregation \cite{re:elder07,re:elder10}, quantum dot growth during epitaxy \cite{re:huang08,re:elder10,re:wu09,re:huang10}, surface energy anisotropy \cite{re:wu07,re:majaniemi09}, formation and melting of dislocations and grain boundaries \cite{re:berry06,re:berry08b,re:mellenthin08,re:spatschek10}, commensurate/incommensurate transitions \cite{re:achim06,re:ramos08}, sliding friction \cite{re:achim09,re:ramos10}, and glass formation \cite{re:berry08}. In addition, recent work has been conducted to extend the modeling to incorporate faster time scales associated with mechanical relaxation \cite{re:stefanovic06,re:stefanovic09}, and to develop new efficient computational methods \cite{re:athreya07,re:cheng08,re:hirouchi09,re:tegze09b}. The PFC model can be connected to microscopic theory via classical density functional theory (DFT) of freezing \cite{re:elder07,re:majaniemi07,re:majaniemi08,re:jaatinen09,re:teeffelen09}. It has been found that the PFC free energy functional can be derived from classical DFT for either pure materials or binary mixtures \cite{re:ramakrishnan79,re:haymet81,re:rick90,re:denton90,re:singh91,re:lowen94}, by approximating the two-point direct correlation function with a truncated Fourier series and expanding the ideal-gas part of the DFT free energy functional in a power series of $\rho$ and $\psi$ (up to 4th order) \cite{re:elder07}. While this connection provides insight into the parameters that enter PFC models, the approximations used are quite drastic and the resulting model is a poor approximation of classical DFT \cite{re:jaatinen09}. A similar connection could be made with the atomic density formulation of Jin and Khachaturyan \cite{re:jin06} which is similar in form to the classical DFT, although the parameters that enter are given a different physical interpretation. The main difficulty in directly simulating classical DFT is that the solutions for $\rho$ are very sharply peaked around the lattice positions (at least in metallic crystals), while simple PFC models predict very smooth sinusoidal profiles. This difference makes numerical simulations of a simple PFC model much simpler than classical DFT as the former model's grid spacing can be a factor of ten larger than the latter's, so that in three dimensions a PFC model can simulate systems three orders of magnitudes larger than classical DFT with the same memory requirements. In addition it has been shown that a simple PFC can be adjusted to match many material properties, such as surface energy and its anisotropy, bulk moduli, and the miscibility gap in three-dimensional (3D) bcc iron \cite{re:jaatinen09} and the velocity of liquid/solid fronts in two-dimensional (2D) hexagonal crystal of colloids \cite{re:teeffelen09}. Another benefit of PFC modeling is the ability to efficiently simulate microstructure dynamics. At present, PFC dynamics has been largely introduced phenomenologically using time-dependent Ginzburg-Landau type dynamics \cite{re:elder02,re:elder07}. Recent progress includes the derivation of hydrodynamic evolution equations for crystalline solids based on the Poisson bracket formalism and the simplification to PFC equations \cite{re:majaniemi07,re:majaniemi08}. Very recently research has been conducted to connect the PFC-type models with microscopic dynamics (Smoluchowski equation) via dynamical density functional theory (DDFT) \cite{re:teeffelen09}. These results were also based on the truncation of DFT free energy up to two-point correlation function, and for single-component systems. In this paper we provide a systematic derivation of PFC dynamics from DDFT, for both single-component and binary systems that involve the evolution of atomic number density and alloy concentration fields (see Sec. \ref{sec:ddft}). Our derivation includes contributions from three-point direct correlation functions, which have been shown important for the DFT calculations \cite{re:haymet83,re:curtin88}. The original PFC models can be recovered via the lowest order approximation of our DDFT results, with the PFC parameters being connected to quantities of DFT correlation functions. Our calculations can be directly extended to incorporate fourth and higher order correlation functions in DFT. To complete the PFC methodology for binary systems, the full amplitude equation formalism is established for a 2D system with hexagonal/triangular crystalline symmetry. It incorporates the effects of species-dependent atomic mobility and average (zeroth-mode) atomic density that are usually coupled with the dynamics of structural amplitudes and concentration field during system evolution but absent in previous studies of binary PFC. As shown in Sec. \ref{sec:ampl}, the standard multiple-scale expansion is first applied to derive the lowest order amplitude equations, followed by a hybrid approach that we develop here to obtain the equations incorporating all orders of expansion. Furthermore, stochastic amplitude equations are derived for both single-component and binary PFC models, showing the corresponding noise dynamics as well as its coupling due to different atomic mobilities of system components (see Sec. \ref{sec:noise}). As has been discussed in previous research, the advantage of the amplitude equation representation can be revealed via its large increase of computational efficiency due to the large length and time scales involved \cite{re:goldenfeld05,re:athreya06,re:yeon10} and also its amenability to advanced numerical schemes such as adaptive mesh refinement method \cite{re:athreya07}. Furthermore, these amplitude equations are more amenable to analytic calculations as shown in recent studies of surface nanostructure formation in strained epitaxial films \cite{re:huang08,re:huang10} as well as in most recent results for establishing the correspondence between PFC type models and traditional phase field approaches \cite{re:elder10}. To further illustrate these advantages, in Sec. \ref{sec:appl} we present a sample application of the derived binary PFC amplitude equations to the phenomenon of surface segregation and alloy intermixing. This is of particular importance in material growth (e.g., group VI or III-V semiconductor thin film epitaxy \cite{re:moison89,re:walther01,% re:cederberg07,re:walther97,re:denker05,re:gerling01,re:dorin03,re:pearson04}), but rather limited information and understanding is available to date. We focus on both liquid-solid(crystal) coexistence profile and the coherent growth of strained solid layers, and show the control of intra- and inter-layer diffusion by varying material parameters including solute expansion coefficient (due to different atomic sizes), misfit strain in alloy layers, and the mobility difference between alloy components. This study provides an understanding of mass transport mechanisms during material growth and evolution. The dynamic processes of strained layer growth as well as the associated composition overshooting phenomenon are obtained in our calculations in Sec. \ref{sec:appl}. The results are compared to experimental findings of vertical composition segregation or surface enrichment as widely encountered during the growth of various alloy heterostructure systems such as InAs/GaAs, Ge/Si, GaAs/GaSb, InP/InGaAs, etc. \cite{re:moison89,re:walther01,re:cederberg07,re:walther97,% re:denker05,re:gerling01,re:dorin03,re:pearson04}. \section{Derivation of PFC dynamics via dynamical density functional theory} \label{sec:ddft} \subsection{Single-component systems} \label{sec:ddft_pure} We start from the DDFT equation governing the evolution of a time-dependent local atomic number density field $\rho({\bm r},t)$, \begin{equation} \frac{\partial \rho({\bm r},t)}{\partial t} = {\bm \nabla} \cdot \left [ M \rho({\bm r},t) {\bm \nabla} \frac{\delta {\cal F}}{\delta \rho} \right ], \label{eq:ddft} \end{equation} which was first proposed phenomenologically \cite{re:evans79,re:dieterich90} and was later derived by various groups via microscopic Brownian dynamics \cite{re:marconi99,re:archer04a,re:marconi08} and Hamiltonian dynamics and hydrodynamics \cite{re:chan05} (see Ref. \cite{re:teeffelen09} for a brief review). The DDFT equations for binary A/B systems are similar to Eq. (\ref{eq:ddft}), with $\rho({\bm r},t)$ replaced by $\rho_i({\bm r},t)$ ($i=A,B$; see Sec. \ref{sec:ddft_binary} below), which has also been derived recently from Brownian dynamics (the Smoluchowski equation) \cite{re:archer05a}. In Eq. (\ref{eq:ddft}) the mobility is $M=D/k_BT$, where $D$ is the diffusion coefficient and $T$ is temperature. In classical DFT the free energy functional ${\cal F}$ can be expanded as \cite{re:ramakrishnan79,re:haymet81} \begin{eqnarray} \frac{{\cal F}[\rho]}{k_BT} &=& \int d{\bm r} \left[ \rho \ln (\rho / \rho_l) - \delta \rho \right] - \frac{1}{2!} \int d{\bm r_1} d{\bm r_2} \delta \rho({\bm r_1}) C^{(2)}({\bm r_1}, {\bm r_2}) \delta \rho({\bm r_2}) \nonumber\\ &-&\frac{1}{3!} \int d{\bm r_1} d{\bm r_2} d{\bm r_3} C^{(3)}({\bm r_1}, {\bm r_2}, {\bm r_3}) \delta \rho({\bm r_1}) \delta \rho({\bm r_2}) \delta \rho({\bm r_3}) + \cdots, \label{eq:F_rho_C3} \end{eqnarray} where $\rho_l$ is the reference liquid state density taken at liquid/solid coexistence, $\delta \rho = \rho - \rho_l$ and $C^{(n)}$ is the $n$-point direct correlation function of the liquid phase at $\rho_l$. It is important that the correlation functions are taken from the liquid state to maintain rotational invariance. Details of the correlation functions depend on the specific material systems studied and are usually calculated via various approximations \cite{re:rick90,re:singh91,re:lowen94}. Following the original PFC approach \cite{re:elder07}, the Fourier component of the two-point correlation function $\hat{C}^{(2)}$ is expanded as a power series of wavenumber $q$ to fit up to its first peak, i.e., \begin{equation} \hat{C}^{(2)}(q)=-\hat{C}_0+\hat{C}_2 q^2-\hat{C}_4 q^4+\cdots, \label{eq:C2} \end{equation} where $\hat{C}_0$, $\hat{C}_2$, and $\hat{C}_4$ are fitting parameters that can be connected to material properties such as isothermal compressibility of liquid phase, bulk modulus and lattice constant of crystal state \cite{re:elder07,re:jaatinen09}. For the three-point correlation function $C_3$, its Fourier transform yields $$ C^{(3)}({\bm r_1}, {\bm r_2}, {\bm r_3}) = \frac{1}{(2\pi)^6} \int d{\bm q} d{\bm q'} e^{i{\bm q} \cdot ({\bm r_1}-{\bm r_2})} e^{i{\bm q} \cdot ({\bm r_2}-{\bm r_3})} \hat{C}^{(3)}({\bm q}, {\bm q'}). $$ The simplest approximation is to keep only the zero wavenumber mode, i.e., \begin{equation} \hat{C}^{(3)}({\bm q},{\bm q'}) \simeq \hat{C}^{(3)}({\bm q}={\bm q'}=0)=-\hat{C}_0^{(3)}, \label{eq:C3} \end{equation} as adopted in the DFT studies of hard spheres \cite{re:haymet83,re:smithline87} and Lennard-Jones mixtures \cite{re:rick89}. This can be justified from the previous results of hard-spheres DFT calculations that nonzero wavenumber components of $\hat{C}^{(3)}$ have been shown to yield minor contributions \cite{re:haymet83,re:smithline87} and that as order $n$ increases, the oscillation details of $\hat{C}^{(n)}$ become less and less relevant compared to the zero wavenumber mode \cite{re:zhou00}. Defining the rescaled atomic density field $n=(\rho - \rho_l)/\rho_l$ and using the approximations (\ref{eq:C2}) and (\ref{eq:C3}), the free energy functional (\ref{eq:F_rho_C3}) becomes \begin{equation} \Delta {\cal F}/\rho_l k_B T = \int d {\bm r} \left [ (1+n)\ln(1+n) + \frac{1}{2} B^x n \left ( 2R^2 \nabla^2 + R^4 \nabla^4 \right ) n + \frac{1}{2} B_l' n^2 + \frac{1}{3} \tilde{B} n^3 \right ], \label{eq:F_C3} \end{equation} where $\Delta {\cal F} = {\cal F} [\rho] - {\cal F}[\rho_l]$, and \begin{equation} B_l'=\rho_l \hat{C}_0=B^{\ell}-1, \qquad B^x = \rho_l \hat{C}_2^2 / 4\hat{C}_4, \qquad R=\sqrt{2\hat{C}_4/\hat{C}_2}, \qquad \tilde{B} = \rho_l^2 \hat{C}_0^{(3)}/2. \end{equation} Substituting Eq. (\ref{eq:F_C3}) into the DDFT equation (\ref{eq:ddft}), which can be rewritten as \begin{equation} \frac{\partial n}{\partial t} = M' {\bm \nabla} \cdot \left [ (1+n) {\bm \nabla} \frac{\delta {\cal F}}{\delta n} \right ] \label{eq:ddft_n} \end{equation} (with $M'=M/\rho_l$), we find \cite{re:notes_pfc} \begin{equation} \frac{\partial n}{\partial t} = D \left \{ \nabla^2 \left [ -( B^x - B^{\ell} ) n + B^x \left ( R^2 \nabla^2 + 1 \right )^2 n + \tau n^2 + v n^3 \right ] + B^x {\bm \nabla} \cdot \left [ n {\bm \nabla} \left ( R^2 \nabla^2 + 1 \right )^2 n \right ] \right \}, \label{eq:pfc} \end{equation} where $\tau = -(B^x-B^{\ell}+1)/2+\tilde{B}$, $v=2\tilde{B}/3$, and we have used the relation $M =D/k_B T$. Note that if only the two-point correlation function in the DFT free energy (\ref{eq:F_rho_C3}) was used it would yield $\tilde{B}=v=0$, and Eq. (\ref{eq:pfc}) reduces to a form equivalent to the PFC1 model given in Ref. \cite{re:teeffelen09}. However, this would then be a 2nd-order dynamic equation due to the absence of $n^3$ term, and as found in our numerical tests, is more difficult to converge at long enough time compared to the full 3rd-order Eq. (\ref{eq:pfc}). It is convenient to rescale Eq. (\ref{eq:pfc}) via a length scale $R$, a time scale $R^2/DB^x$, and $n \rightarrow \sqrt{v/B^x}~ n$, leading to \begin{equation} \frac{\partial n}{\partial t} = \nabla^2 \left [ -\epsilon n + \left ( \nabla^2 + q_0^2 \right )^2 n + g_2 n^2 + n^3 \right ] + g_0 {\bm \nabla} \cdot \left [ n {\bm \nabla} \left ( \nabla^2 + q_0^2 \right )^2 n \right ], \label{eq:pfc_re} \end{equation} where \begin{equation} \epsilon = (B^x - B^{\ell})/B^x, \quad q_0=1, \quad g_2 = \tau / \sqrt{v B^x}, \quad g_0 = \sqrt{B^x / v}. \label{eq:para_pfc} \end{equation} The original PFC equation is recovered by considering that (${\bm \nabla} \cdot [ n {\bm \nabla} ( \nabla^2 + q_0^2 )^2 n]$) is of higher order compared to term $\nabla^2 ( \nabla^2 + q_0^2 )^2 n$. This can be obtained via a simple scale analysis: $n \sim {\cal O}(\epsilon^{1/2})$ and $( \nabla^2 + q_0^2 )^2 n \sim {\cal O}(\epsilon^{3/2})$ (see also Sec. \ref{sec:multiscale} for more detail of scale expansion). Thus to lowest order approximation, Eq. (\ref{eq:pfc_re}) can be reduced to the original PFC model equation that has been widely used: \begin{equation} \frac{\partial n}{\partial t} = \nabla^2 \left [ -\epsilon n + \left ( \nabla^2 + q_0^2 \right )^2 n + g_2 n^2 + n^3 \right ]. \end{equation} This derivation procedure can be readily extended to incorporate higher order direct correlation functions of DFT (e.g., four-point, five-point, etc.) and thus to include higher order terms such as $n^4$, $n^5$, ..., in the PFC model. Similarly, these high-order correlation functions can be effectively approximated to lowest order via the zero wavenumber modes, based on the recent DFT calculations \cite{re:zhou00}. For example, the contribution ${\cal F}^{(4)}$ of free energy functional from the four-point correlation function is given by \begin{equation} {\cal F}^{(4)}/k_BT = - \frac{1}{24} \int d{\bm r_1} d{\bm r_2} d{\bm r_3} d{\bm r_4} C^{(4)}({\bm r_1}, {\bm r_2}, {\bm r_3}, {\bm r_4}) \delta \rho({\bm r_1}) \delta \rho({\bm r_2}) \delta \rho({\bm r_3}) \delta \rho({\bm r_4}). \end{equation} Assuming $\hat{C}^{(4)}({\bm q},{\bm q'},{\bm q''}) \simeq \hat{C}^{(4)}({\bm q}={\bm q'}={\bm q''}=0)=-\hat{C}_0^{(4)}$, the free energy functional (\ref{eq:F_C3}) is now \begin{equation} \Delta {\cal F}/\rho_l k_B T = \int d {\bm r} \left [ (1+n)\ln(1+n) + \frac{1}{2} B^x n \left ( 2R^2 \nabla^2 + R^4 \nabla^4 \right ) n + \frac{1}{2} B_l' n^2 + \frac{1}{3} \tilde{B} n^3 + \frac{1}{4} \tilde{B}_4 n^4 \right ], \label{eq:F_C4} \end{equation} where $\tilde{B}_4 = \rho_l^3 \hat{C}_0^{(4)}/6$. The dynamic equation for $n$ would then be \begin{equation} \frac{\partial n}{\partial t} = D \left \{ \nabla^2 \left [ -( B^x - B^{\ell} ) n + B^x \left ( R^2 \nabla^2 + 1 \right )^2 n + \tau n^2 + v n^3 + u n^4 \right ] + B^x {\bm \nabla} \cdot \left [ n \left ( R^2 \nabla^2 + 1 \right )^2 {\bm \nabla} n \right ] \right \}, \label{eq:pfc_C4} \end{equation} where $v=2\tilde{B}/3 + \tilde{B}_4$ and $u=3\tilde{B}_4/4$. Again the last term of Eq. (\ref{eq:pfc_C4}) is of higher order and can be neglected in the lowest order approximation. \subsection{Binary systems} \label{sec:ddft_binary} For a binary system with components A and B, the DDFT equations describing the dynamics of the respective atomic density fields $\rho_A$ and $\rho_B$ are given by \cite{re:archer05a} \begin{equation} \frac{\partial \rho_A}{\partial t} = {\bm \nabla} \cdot \left [ M_A \rho_A {\bm \nabla} \frac{\delta {\cal F}}{\delta \rho_A} \right ], \qquad \frac{\partial \rho_B}{\partial t} = {\bm \nabla} \cdot \left [ M_B \rho_B {\bm \nabla} \frac{\delta {\cal F}}{\delta \rho_B} \right ], \label{eq:ddft_AB} \end{equation} where $M_{A(B)}$ is the atomic mobility for specie $A$ ($B$). The corresponding classical density functional free energy (hereafter referred to as "DFT" for short) is of the form \cite{re:rick90,re:denton90,re:singh91,re:lowen94} \begin{eqnarray} &{\cal F}/k_BT =& \int d{\bm r} \sum_{i=A,B} \left [ \rho_i \ln \frac{\rho_i }{\rho_l^i} - \delta \rho_i \right ] \nonumber\\ && - \sum_{n=2}^{\infty}\frac{1}{n!} \int d{\bm r_1} \cdots d{\bm r_n} \sum_{i,...,j=A, B} C_{i...j}^{(n)}({\bm r_1}, \cdots, {\bm r_n}) \delta \rho_i({\bm r_1}) \cdots \delta \rho_j ({\bm r_n}), \end{eqnarray} where $\rho_l^i$ is the reference liquid state density of component $i$, $\delta \rho_i = \rho_i - \rho_l^i$, and $C_{i...j}^{(n)}$ refers to the $n$-point direct correlation function between components $i,...,j=A, B$. Up to three-point correlation functions, we have \begin{eqnarray} {\cal F}/k_BT &=& \int d{\bm r} \left [ \rho_A \ln(\rho_A/\rho_l^A)-\delta \rho_A +\rho_B \ln(\rho_B/\rho_l^B)-\delta \rho_B \right ] \nonumber\\ &-& \frac{1}{2} \int d{\bm r_1} d{\bm r_2} \left [ \delta \rho_A({\bm r_1}) C_{AA}^{(2)}({\bm r_1}, {\bm r_2}) \delta \rho_A({\bm r_2}) + \delta \rho_B({\bm r_1}) C_{BB}^{(2)}({\bm r_1}, {\bm r_2}) \delta \rho_B({\bm r_2}) + 2 \delta \rho_A({\bm r_1}) C_{AB}^{(2)}({\bm r_1}, {\bm r_2}) \delta \rho_B({\bm r_2}) \right ] \nonumber\\ &-&\frac{1}{6} \int d{\bm r_1} d{\bm r_2} d{\bm r_3} \left [ C_{AAA}^{(3)}({\bm r_1},{\bm r_2}, {\bm r_3}) \delta \rho_A({\bm r_1}) \delta \rho_A({\bm r_2}) \delta \rho_A({\bm r_3}) + C_{BBB}^{(3)}({\bm r_1},{\bm r_2}, {\bm r_3}) \delta \rho_B({\bm r_1}) \delta \rho_B({\bm r_2}) \delta \rho_B({\bm r_3}) \right. \nonumber\\ && \left. + 3C_{AAB}^{(3)}({\bm r_1},{\bm r_2}, {\bm r_3}) \delta \rho_A({\bm r_1}) \delta \rho_A({\bm r_2}) \delta \rho_B({\bm r_3}) + 3C_{ABB}^{(3)}({\bm r_1},{\bm r_2}, {\bm r_3}) \delta \rho_A({\bm r_1}) \delta \rho_B({\bm r_2}) \delta \rho_B({\bm r_3}) \right ]. \label{eq:F_AB} \end{eqnarray} Similar to the single-component case discussed in Sec. \ref{sec:ddft_pure}, the correlation functions $C_{ij}^{(2)} ({\bm r_1}, {\bm r_2})$ and $C_{ijk}^{(3)}({\bm r_1},{\bm r_2}, {\bm r_3})$ ($i,j,k=A,B$) are expanded in Fourier space as \begin{eqnarray} &\hat{C}_{ij}^{(2)}(q)=-\hat{C}_0^{ij}+\hat{C}_2^{ij} q^2-\hat{C}_4^{ij} q^4 +\cdots,& \nonumber\\ &\hat{C}_{ijk}^{(3)}({\bm q},{\bm q'}) \simeq \hat{C}_{ijk}^{(3)} ({\bm q}={\bm q'}=0)=-\hat{C}_0^{ijk}.& \label{eq:C_expan} \end{eqnarray} As in the original binary PFC model, we introduce an atomic density field $n$ and a concentration field $\psi$ via \begin{equation} n = \frac{\rho - \rho_l}{\rho_l} = \frac{\rho_A + \rho_B - \rho_l}{\rho_l}, \qquad \psi = \frac{\rho_A - \rho_B}{\rho} = \frac{\rho_A - \rho_B}{\rho_A + \rho_B}, \label{eq:ndN} \end{equation} where $\rho_l = \rho_l^A + \rho_l^B$, and hence \begin{equation} \rho_A = \frac{\rho_l}{2} (1+n)(1+\psi), \qquad \rho_B = \frac{\rho_l}{2} (1+n)(1-\psi). \label{eq:rho_AB} \end{equation} Substituting Eqs. (\ref{eq:C_expan})--(\ref{eq:rho_AB}) into (\ref{eq:F_AB}), we can express the free energy functional in terms of $n$ and $\psi$: \begin{eqnarray} &\Delta {\cal F}/\rho_l k_BT =& \int d{\bm r} \left \{ (1+n)\ln(1+n) + \frac{1}{2} (1+n) \left [ (1+\psi) \ln(1+\psi) +(1-\psi) \ln(1-\psi) \right ] \right. \nonumber\\ && + \beta(\psi) n + \frac{1}{2} B_l'(\psi) n^2 + \frac{1}{3} \tilde{B}(\psi) n^3 + \frac{1}{2} \beta_2 \psi^2 + \frac{1}{3} \beta_3 \psi^3 \label{eq:F_ndN} \\ && \left. + \frac{(1+n)}{2} \left ( 2B^x(\psi) R^2 \nabla^2 + B^x(\psi) R^4 \nabla^4 \right ) n + \frac{K}{2} \left | {\bm \nabla} [(1+n)\psi] \right |^2 + \frac{\kappa}{2} \left ( \nabla^2 [(1+n)\psi] \right )^2 \right \}, \nonumber \label{eq:freeC3} \end{eqnarray} where \begin{eqnarray} \beta (\psi) &=& \beta_0 \psi + \beta_1 \psi^2 + \beta_3 \psi^3 \nonumber\\ &=& \frac{\rho_l}{4} \left [ \delta \hat{C}_0 + \frac{\rho_l^B - \rho_l^A}{2} \delta \hat{C}_0^{(3)} \right ] \psi + \left [ \beta_2 + \frac{\rho_l^2}{16} \delta \hat{C}_0^{(3)} \right ] \psi^2 + \frac{\rho_l^2}{16} \Delta \hat{C}_0^{(3)} \psi^3, \nonumber\\ B_l'(\psi) &=& B^{\ell}(\psi) -1 = B_0^{\ell}-1 + B_1^{\ell} \psi + B_2^{\ell} \psi^2 + B_3^{\ell} \psi^3 \nonumber\\ &=& \rho_l \left [ \hat{\bar{C}}_0 + \frac{\rho_l^B-\rho_l^A}{8} \hat{\tilde{C}}_0^{(3)} \right ] + \frac{\rho_l}{2} \left [ \delta \hat{C}_0 + \frac{\rho_l^B-\rho_l^A}{2} \delta \hat{C}_0^{(3)} + \frac{\rho_l}{4} \hat{\tilde{C}}_0^{(3)} \right ] \psi \nonumber\\ &+& \frac{\rho_l}{4} \left [ \Delta \hat{C}_0 + \frac{\rho_l^B-\rho_l^A}{2} \Delta \hat{C}_0^{(3)} + \rho_l \delta \hat{C}_0^{(3)} \right ] \psi^2 + \frac{\rho_l^2}{8} \Delta \hat{C}_0^{(3)} \psi^3, \nonumber\\ \tilde{B}(\psi) &=& \frac{\rho_l^2}{16} \left [ 8\hat{\bar{C}}_0^{(3)} + 3\hat{\tilde{C}}_0^{(3)} \psi + 3\delta \hat{C}_0^{(3)} \psi^2 + \Delta \hat{C}_0^{(3)} \psi^3 \right ] = \tilde{B}_0 + \tilde{B}_1 \psi + \tilde{B}_2 \psi^2 + \beta_3 \psi^3, \nonumber\\ \beta_2 &=& \frac{\rho_l}{4} \left [ \Delta \hat{C}_0 + \frac{\rho_l^B - \rho_l^A}{2} \Delta \hat{C}_0^{(3)} \right ], \qquad \Delta \beta = \beta_1 - \beta_2 = \frac{\rho_l^2}{16} \delta \hat{C}_0^{(3)}, \qquad \tilde{B}_2 = 3\Delta \beta, \nonumber\\ B_1^{\ell} &=& 2 \beta_0 + \frac{2}{3} \tilde{B}_1, \qquad B_2^{\ell} = 4\beta_1 - 3\beta_2, \qquad B_3^{\ell} = 2\beta_3, \nonumber\\ B^x(\psi) &=& \frac{\rho_l \left ( \hat{\bar{C}}_2 + \delta \hat{C}_2 \psi/2 \right )^2}{4\left ( \hat{\bar{C}}_4 + \delta \hat{C}_4 \psi/2 \right )} = \frac{\rho_l \hat{\bar{C}}_2^2}{4\hat{\bar{C}}_4} \left [ 1 + \left ( \frac{\delta \hat{C}_2}{\hat{\bar{C}}_2} - \frac{\delta \hat{C}_4}{2\hat{\bar{C}}_4} \right ) \psi + \cdots \right ] = B_0^x + B_1^x \psi + \cdots, \nonumber\\ R &=& \sqrt{\frac{2 \left ( \hat{\bar{C}}_4 + \delta \hat{C}_4 \psi/2 \right )}{\hat{\bar{C}}_2 + \delta \hat{C}_2 \psi/2}} = \sqrt{\frac{2\hat{\bar{C}}_4}{\hat{\bar{C}}_2}} \left [ 1 + \frac{1}{4} \left ( \frac{\delta \hat{C}_4}{\hat{\bar{C}}_4} - \frac{\delta \hat{C}_2}{\hat{\bar{C}}_2} \right ) \psi + \cdots \right ] = R_0 + R_1 \psi + \cdots, \nonumber\\ B^xR^2 &=& \frac{\rho_l}{2} \left ( \hat{\bar{C}}_2 + \frac{1}{2} \delta \hat{C}_2 \psi \right ) = B_0^xR_0^2 (1+\alpha_2 \psi), \qquad \alpha_2 = \delta \hat{C}_2 / 2\hat{\bar{C}}_2, \nonumber\\ B^xR^4 &=& \rho_l \left ( \hat{\bar{C}}_4 + \frac{1}{2} \delta \hat{C}_4 \psi \right ) = B_0^xR_0^4 (1+\alpha_4 \psi), \qquad \alpha_4 = \delta \hat{C}_4 / 2\hat{\bar{C}}_4, \nonumber\\ K &=& -\frac{\rho_l}{4} \Delta \hat{C}_2, \qquad \kappa = \frac{\rho_l}{4} \Delta \hat{C}_4. \label{eq:parameters} \end{eqnarray} In the above formulae, the following has been defined from the correlation functions: \begin{eqnarray} &\bar{C} = \frac{1}{4} \left ( C_{AA}^{(2)} + C_{BB}^{(2)} + 2C_{AB}^{(2)} \right ), \qquad \delta C = C_{AA}^{(2)} - C_{BB}^{(2)}, \qquad \Delta C = C_{AA}^{(2)} + C_{BB}^{(2)} - 2C_{AB}^{(2)}, & \nonumber\\ &\bar{C}^{(3)} = \frac{1}{8} \left ( C_{AAA}^{(3)} + C_{BBB}^{(3)} + 3C_{AAB}^{(3)} + 3C_{ABB}^{(3)} \right ), \qquad \tilde{C}^{(3)} = C_{AAA}^{(3)} - C_{BBB}^{(3)} + C_{AAB}^{(3)} - C_{ABB}^{(3)}, \label{eq:CAB}\\ &\delta C^{(3)} = C_{AAA}^{(3)} + C_{BBB}^{(3)} - C_{AAB}^{(3)} - C_{ABB}^{(3)}, \qquad \Delta C^{(3)} = C_{AAA}^{(3)} - C_{BBB}^{(3)} -3 C_{AAB}^{(3)} + 3 C_{ABB}^{(3)}, \nonumber \end{eqnarray} and the ``$\,\,\hat{}$\,\," in Eq.~(\ref{eq:parameters}) refer to the Fourier coefficients in the expansions of Eq. (\ref{eq:C_expan}) where the numerical subscripts on the coefficients refer to the appropriate power of the expansion. For binary alloys the lattice constant is often approximated by Vegard's law, i.e., $R \simeq R_0 + R_1 \psi = R_0 (1 + \alpha \psi)$. In this expansion, near $\psi=0$ the solute expansion coefficient $\alpha$ is expressed as \begin{equation} \alpha = R_1/R_0 = \frac{1}{2} (\alpha_4 - \alpha_2). \label{eq:alpha} \end{equation} (In the dilute limit (i.e., $\psi \sim \pm 1$) it would be simple to expand $R$ around $\psi \sim \pm 1$ to obtain the solute expansion coefficient as well.) Using the simplification adopted in the original binary PFC \cite{re:elder10}, it is assumed that $B^x \simeq B_0^x$ and $R^2 \simeq R_0^2 (1+2\alpha \psi)$, $R^4 \simeq R_0^4(1+4\alpha \psi)$ via expansion, which corresponds to the assumption of $\alpha_2 \simeq 2\alpha$ and $\alpha_4 \simeq 4\alpha$ as obtained from Eqs. (\ref{eq:parameters}) and (\ref{eq:alpha}). In terms of the above definitions, the time derivatives of the variables $n$ and $\psi$ defined in Eq. (\ref{eq:ndN}) are given by \begin{equation} \frac{\partial n}{\partial t} = \frac{1}{\rho_l} \left ( \frac{\partial \rho_A}{\partial t} + \frac{\partial \rho_B}{\partial t} \right ), \qquad \frac{\partial \psi}{\partial t} = \frac{1}{\rho_l (1+n)} \left [ (1-\psi) \frac{\partial \rho_A}{\partial t} - (1+\psi) \frac{\partial \rho_B}{\partial t} \right ]. \label{eq:ndN_rho} \end{equation} From the DDFT equations (\ref{eq:ddft_AB}), the dynamics for $n$ and $\psi$ thus become \begin{eqnarray} &\partial n / \partial t =& M_1 {\cal D}_1 + M_2 {\cal D}_2, \nonumber\\ &\partial \psi / \partial t =& \frac{1}{1+n} \left [ (M_2-M_1 \psi) {\cal D}_1 + (M_1-M_2 \psi) {\cal D}_2 \right ], \label{eq:pfc_ndN} \end{eqnarray} where \begin{equation} M_1 = \frac{1}{2} k_BT \left ( M_A + M_B \right ), \qquad M_2 = \frac{1}{2} k_BT \left ( M_A - M_B \right ), \end{equation} and \begin{eqnarray} &{\cal D}_1 =& \frac{1}{\rho_l k_BT} \left \{ {\bm \nabla} \cdot \left [ (1+n) {\bm \nabla} \frac{\delta {\cal F}}{\delta n} \right ] - {\bm \nabla} \cdot \left [ ({\bm \nabla} \psi) \frac{\delta {\cal F}}{\delta \psi} \right ] \right \}, \nonumber\\ &{\cal D}_2 =& \frac{1}{\rho_l k_BT} \left \{ {\bm \nabla} \cdot \left [ (1+n) \psi {\bm \nabla} \frac{\delta {\cal F}}{\delta n} \right ] + {\bm \nabla} \cdot \left [ (1+n)(1-\psi^2) {\bm \nabla} \left ( \frac{1}{1+n} \frac{\delta {\cal F}}{\delta \psi} \right ) - (\psi {\bm \nabla} \psi) \frac{\delta {\cal F}}{\delta \psi} \right ] \right \}. \label{eq:D_12} \end{eqnarray} Using the free energy functional (\ref{eq:F_ndN}) as well as Eq. (\ref{eq:D_12}), the results of ${\cal D}_1$ and ${\cal D}_2$ (keeping all the terms) are \begin{eqnarray} &{\cal D}_1 =& \nabla^2 \left \{ - \left ( B_0^x-B_0^{\ell} \right ) n + \left ( B_1^{\ell} \psi + B_2^{\ell} \psi^2 \right ) n \right. \nonumber\\ && + \left [ -(B_0^x-B_0^{\ell}+1)/2+\tilde{B}_0 + \left ( B_1^{\ell}/2+\tilde{B}_1 \right ) \psi + \left ( B_2^{\ell}/2+\tilde{B}_2 \right ) \psi^2 \right ] n^2 \nonumber\\ && + \frac{2}{3} \left ( \tilde{B}_0 + \tilde{B}_1 \psi + \tilde{B}_2 \psi^2 \right ) n^3 + \frac{1}{3} B_3^{\ell} (1+n)^3 \psi^3 + \beta_0 \psi + \frac{1}{2} (\beta_1+\Delta \beta) \psi^2 \nonumber\\ && \left. + B_0^x \left ( R_0^2\nabla^2 + 1 \right )^2 n + B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) [(1+n)\psi] \right \} \nonumber\\ && + {\bm \nabla} \cdot \left \{ n {\bm \nabla} \left [ B_0^x \left ( R_0^2\nabla^2 + 1 \right )^2 n + B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) [(1+n)\psi] \right ] \right \} \nonumber\\ && + {\bm \nabla} \cdot \left \{ (1+n)\psi {\bm \nabla} \left [ B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) n + \left ( -K\nabla^2 + \kappa \nabla^4 \right ) [(1+n)\psi] \right ] \right \}, \label{eq:D1} \end{eqnarray} \begin{eqnarray} &{\cal D}_2 =& \nabla^2 \left \{ \beta_0 n + \left ( \beta_0/2 + \tilde{B}_1/3 \right ) n^2 + \frac{2}{9} \tilde{B}_1 n^3 + (1+\beta_2+2\Delta \beta n) (1+n) \psi \right. \nonumber\\ && + \frac{1}{2}(\beta_0+B_3^{\ell}) (1+n)^2 \psi^2 + \frac{1}{3}\tilde{B}_1 n^2 \psi^2 + \frac{2}{3} \Delta \beta (1+n)^3 \psi^3 \nonumber\\ && \left. + B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) n + \left ( -K\nabla^2 + \kappa \nabla^4 \right ) [(1+n)\psi] \right \} \nonumber\\ && + {\bm \nabla} \cdot \left \{ n {\bm \nabla} \left [ (1+n)(\beta_2+2\Delta \beta n) \psi + \beta_3 (1+n)^2 \psi^2 + B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) n \right. \right. \nonumber\\ && \left. \left. + \left ( -K\nabla^2 + \kappa \nabla^4 \right ) [(1+n)\psi] \right ] \right \} + {\bm \nabla} \cdot \left \{ \psi {\bm \nabla} \left [ \frac{2}{3} \tilde{B}_1 n \psi \right ] \right \} \nonumber\\ && + {\bm \nabla} \cdot \left \{ (1+n)\psi {\bm \nabla} \left [ -(B_0^x-B_0^{\ell}+1) n + \left ( \tilde{B}_0 + \frac{2}{3} \tilde{B}_1 \psi \right ) n^2 + B_0^x \left ( R_0^2\nabla^2 + 1 \right )^2 n \right. \right. \nonumber\\ && \left. \left. + B_0^x \left ( \alpha_2 R_0^2\nabla^2 + \frac{\alpha_4}{2} R_0^4\nabla^4 \right ) [(1+n)\psi] \right ] \right \}. \label{eq:D2} \end{eqnarray} At this point in the derivation it should be noted that no additional approximations beyond those going into the expansions of Eq. (\ref{eq:C_expan}) have been introduced. \subsubsection{Non-dimensional form of model} To simplify the results, the above binary PFC equations can be rescaled via defining a length scale $R_0$, a time scale $R_0^2/M_1B_0^x$, $n \rightarrow \sqrt{v/B_0^x}~ n$, and $\psi \rightarrow \sqrt{v/B_0^x}~ \psi$, yielding \begin{equation} \partial n / \partial t = {\cal D}_1 + m {\cal D}_2, \quad \partial \psi / \partial t = \frac{1}{1 + g_0 n} \left [ (m - g_0 \psi) {\cal D}_1 + (1 - m g_0 \psi) {\cal D}_2 \right ], \label{eq:bpfc_re} \end{equation} where \begin{equation} m=\frac{M_2}{M_1}=\frac{M_A-M_B}{M_A+M_B}, \quad g_0 = \sqrt{\frac{B_0^x}{v}}, \quad v=\frac{2}{3} \tilde{B}_0 = \frac{\rho_l^2}{3} \hat{\bar{C}}_0^{(3)}. \label{eq:m_g0_v} \end{equation} If keeping only terms up to 3rd order quantities of $n$ and $\psi$, the results of ${\cal D}_1$ and ${\cal D}_2$ are rescaled as \begin{eqnarray} &{\cal D}_1 =& \nabla^2 \left \{ -\epsilon n + \left ( \nabla^2 + q_0^2 \right )^2 n + \left ( g_1 \psi + g \psi^2 \right ) n + \left ( g_2 + \bar{g}_2 \psi \right ) n^2 + n^3 \right. \nonumber\\ && \left. + \bar{g} \psi + v_1 \psi^2 + u_1 \psi^3 + \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) \left [ (1 + g_0 n) \psi \right ] + g_0 \psi \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) n \right \} \nonumber\\ && + g_0 {\bm \nabla} \cdot \left \{ n {\bm \nabla} \left [ \left ( \nabla^2 + q_0^2 \right )^2 n + \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) \left ( (1 + g_0 n) \psi \right ) \right ] \right \} \nonumber\\ && + g_0 {\bm \nabla} \cdot \left [ \psi {\bm \nabla} \left ( - K_0 \nabla^2 + \kappa_0 \nabla^4 \right ) \left ( (1 + g_0 n) \psi \right ) \right ] - g_0 {\bm \nabla} \cdot \left [ \left ({\bm \nabla} \psi \right ) \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) n \right ] \nonumber\\ && + g_0^2 {\bm \nabla} \cdot \left \{ n \psi {\bm \nabla} \left [ \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) n + \left ( - K_0 \nabla^2 + \kappa_0 \nabla^4 \right ) \psi \right ] \right \}, \label{eq:re_D1} \end{eqnarray} \begin{eqnarray} & {\cal D}_2 =& \nabla^2 \left \{ \bar{g} n + (1+ g_0 n) \left ( \alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) n + \left ( 2v_1 \psi + w_2 \psi^2 \right ) n + \left ( v_2 + g \psi \right ) n^2 + g_3 n^3 \right. \nonumber\\ && \left. + w_0 \psi + v_0 \psi^2 + u_0 \psi^3 + \left ( - K_0 \nabla^2 + \kappa_0 \nabla^4 \right ) \left [ (1 + g_0 n) \psi \right ] \right \} \nonumber\\ && + g_0 {\bm \nabla} \cdot \left \{ n {\bm \nabla} \left [ w_0 \psi + \left ( - K_0 \nabla^2 + \kappa_0 \nabla^4 \right ) \left ( (1 + g_0 n) \psi \right ) \right ] \right \} - g_0 {\bm \nabla} \cdot \left [ \left ( {\bm \nabla} n \right ) \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) n \right ] \nonumber\\ && + g_0 {\bm \nabla} \cdot \left \{ \psi {\bm \nabla} \left [ \left ( -\epsilon + \gamma_1 \psi \right ) n + \gamma_2 n^2 + \left ( \nabla^2 + q_0^2 \right )^2 n + \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) \left ( (1 + g_0 n) \psi \right ) \right ] \right \} \nonumber\\ && + g_0^2 {\bm \nabla} \cdot \left \{ n \psi {\bm \nabla} \left [ \left ( \nabla^2 + q_0^2 \right )^2 n + \left (\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4 \right ) \psi \right ] \right \}, \label{eq:re_D2} \end{eqnarray} where the rescaled parameters are \begin{eqnarray} && \epsilon = \frac{B_0^x - B_0^{\ell}}{B_0^x}, \qquad q_0 = 1, \quad g = \frac{B_2^{\ell}}{v}, \quad g_2=\frac{g_0}{2} \left ( \frac{2\tilde{B}_0-1}{B_0^x} - \epsilon \right ), \quad v_1=\left ( \frac{\beta_1+\Delta \beta}{2B_0^x} \right ) g_0, \nonumber\\ &&w_0 = \frac{1+\beta_2}{B_0^x}, \quad u_0 = \frac{2\tilde{B}_2}{9v}, \quad K_0 = \frac{K}{B_0^x R_0^2} = -\frac{\Delta \hat{C}_2}{2\hat{\bar{C}}_2}, \quad \kappa_0 = \frac{\kappa}{B_0^x R_0^4} = \frac{\Delta \hat{C}_4}{4\hat{\bar{C}}_4}, \nonumber\\ && \bar{g}=\frac{\beta_0}{B_0^x}, \qquad g_1=\frac{B_1^{\ell}}{B_0^x} g_0, \qquad \bar{g}_2=\frac{B_1^{\ell} + 2\tilde{B}_1}{2v}, \qquad u_1 = \frac{B_3^{\ell}}{3v}, \nonumber\\ && w_2 = \frac{\beta_0+2B_3^{\ell}}{v}, \qquad v_2 = \left ( \frac{\beta_0/2+\tilde{B}_1/3}{B_0^x} \right ) g_0, \qquad g_3 = \frac{2\tilde{B}_1}{9v}, \nonumber\\ && v_0 = \left ( \frac{\beta_0+B_3^{\ell}}{2B_0^x} \right ) g_0, \qquad \gamma_1 = 3 (g_3 - u_1) / g_0, \qquad \gamma_2 = g_2 - v_1. \label{eq:para_re} \end{eqnarray} Note that from Eqs. (\ref{eq:parameters}) and (\ref{eq:C_expan}), $B_0^x$ can be rewritten as \begin{equation} B_0^x = \frac{\rho_l \hat{\bar{C}}_2^2}{4 \hat{\bar{C}}_4} = \rho_l \left ( \hat{\bar{C}}_0 + \hat{\bar{C}}_{\rm max} \right ), \label{eq:B0x} \end{equation} where $\hat{\bar{C}}_{\rm max}$ is the maximum of the first peak of the two-point correlation function $\hat{\bar{C}}$ in Fourier space. If $|\Delta \rho_l| = |\rho^A_l - \rho^B_l| \ll |\hat{\bar{C}}_0/\hat{\tilde{C}}_0^{(3)}|$, $B_0^{\ell} \sim 1 + \rho_l \hat{\bar{C}}_0$ from Eq. (\ref{eq:parameters}) and thus \begin{equation} \epsilon = \frac{B_0^x - B_0^{\ell}}{B_0^x} \sim \frac{\rho_l \hat{\bar{C}}_{\rm max} -1}{\rho_l \left ( \hat{\bar{C}}_0 + \hat{\bar{C}}_{\rm max} \right )} \sim \frac{\hat{\bar{C}}_{\rm max}}{\hat{\bar{C}}_0 + \hat{\bar{C}}_{\rm max}}. \end{equation} Usually $\hat{\bar{C}}_{\rm max} \ll \hat{\bar{C}}_0$. particularly when close to the melting point $T_m$, and hence $\epsilon$ can be viewed as a small variable (also used in amplitude equation expansion given below), proportional to $(T-T_m)/T_m$ as discussed in the original PFC model \cite{re:elder07}. \subsubsection{Simplification of scaled binary model} \label{sec:simplify} The rescaled PFC dynamic equations (\ref{eq:bpfc_re})--(\ref{eq:re_D2}) can be further simplified to a lower order form via a scale analysis. A simple scale analysis of Eqs. (\ref{eq:re_D1}) and (\ref{eq:re_D2}) yields $n, \psi \sim {\cal O}(\epsilon^{1/2})$ (e.g., from Eq. (\ref{eq:re_D1}) we have ${\cal O}(\epsilon n) \sim {\cal O}(n^3)$ and ${\cal O}(\psi) \sim {\cal O}(n)$, as is usually assumed). To simplify the results the following approximations are made: (i) Assume that $\{ |\hat{\bar{C}}_0|, |\hat{\bar{C}}_0^{(3)}|, |\delta \hat{C}_0^{(3)}| \} \gg \{ |\delta \hat{C}_0|, |\hat{\tilde{C}}_0^{(3)}|, |\Delta \hat{C}_0^{(3)}| \}$ and $|\rho_l^A - \rho_l^B|\ll|\rho_l^A+\rho_l^B|$. (An example case would be that the zeroth-mode ($q=0$) correlation functions between the same atomic species are of the same order, and are either significantly larger or significantly smaller than those between different ones; see Eq. (\ref{eq:CAB}).) Thus for the rescaled parameters in Eq. (\ref{eq:para_re}), we can estimate (based on the definitions in Eqs. (\ref{eq:parameters}) and (\ref{eq:CAB}), as well as Eqs. (\ref{eq:m_g0_v}) and (\ref{eq:B0x})) that $$ g_0, g, g_2, v_1, u_0 \sim {\cal O}(1) ~ {\rm or} ~ {\cal O}(\epsilon^{1/2}), \qquad \bar{g}, g_1, \bar{g}_2, u_1, w_2, v_2, g_3, v_0, \gamma_1, \gamma_2 \sim {\cal O}(\epsilon) ~{\rm or ~ higher}. $$ (ii) The concentration field $\psi$ is slowly varying in space, and we can keep only the lowest linear gradient terms for $\psi$. (iii) Similar to the single-component case in Sec. \ref{sec:ddft_pure}, it can be argued that in Eqs. (\ref{eq:re_D1}) and (\ref{eq:re_D2}), compared to the first terms $\nabla^2 \{ \cdots \}$, all other terms ($g_0 {\bm \nabla} \cdot \{ \cdots \}$) are of higher order. (iv) For linear terms in $n$, only $[-\epsilon + (\nabla^2 + q_0^2)^2] n$ is kept which will lead to periodic crystal structure in solid phases, while the other term $(\alpha_2 \nabla^2 + \frac{\alpha_4}{2} \nabla^4) n$ is neglected, which corresponds to ignoring the $n\psi$ related terms in the free energy functional (\ref{eq:F_ndN}) owing to to the much larger length scales of $\psi$ field \cite{re:elder07}. (v) It is assumed that $\alpha_2 \simeq \alpha_4/2 \simeq 2 \alpha$ (see the discussions below Eq. (\ref{eq:alpha})). To lowest order in ${\cal O}(\epsilon^{3/2})$ the above simplifications reduce the PFC equations (\ref{eq:bpfc_re}), (\ref{eq:re_D1}), and (\ref{eq:re_D2}) to \begin{equation} \partial n / \partial t = {\cal D}_1 + m {\cal D}_2, \quad \partial \psi / \partial t = m {\cal D}_1 + {\cal D}_2, \label{eq:pfc_npsi} \end{equation} where \begin{eqnarray} &{\cal D}_1 = \nabla^2 \left \{ \left ( - \epsilon + g \psi^2 \right ) n + \left ( \nabla^2 + q_0^2 \right )^2 n + g_2 n^2 + n^3 + v_1 \psi^2 + \etaa \left [ \psi \left ( \nabla^2 + \nabla^4 \right ) n + \left ( \nabla^2 + \nabla^4 \right ) (n \psi) \right ] \right \}, & \nonumber\\ &{\cal D}_2 = \nabla^2 \left [ \etaa n \left ( \nabla^2 + \nabla^4 \right ) n + \left ( w_0 + 2v_1 n + g n^2 \right ) \psi + u_0 \psi^3 - K_0 \nabla^2 \psi \right ],& \label{eq:D12} \end{eqnarray} with $\alpha_0 = g_0 \alpha$ the rescaled solute expansion coefficient. Equations (\ref{eq:pfc_npsi}) and (\ref{eq:D12}) recover the original binary PFC model with conserved dynamics for both $n$ and $\psi$ fields \cite{re:elder07,re:elder10}, except for the $v_1$ terms ($v_1 \psi^2$ and $2v_1 n \psi$), which account for additional coupling between the atomic density and concentration fields (or between small crystalline and ``slow'' concentration scales). This can also be seen via rewriting Eq. (\ref{eq:D12}) through an effective potential (or free energy functional) ${\cal F}_{\rm eff}$: \begin{eqnarray} &{\cal D}_1 = \nabla^2 \frac{\delta {\cal F}_{\rm eff}}{\delta n}, \qquad {\cal D}_2 = \nabla^2 \frac{\delta {\cal F}_{\rm eff}}{\delta \psi},& \label{eq:D12_eff} \\ &{\cal F}_{\rm eff} = \int d{\bm r} \left \{ - \frac{1}{2} \epsilon n^2 + \frac{1}{2} n \left ( \nabla^2 + q_0^2 \right )^2 n + \frac{1}{3} g_2 n^3 + \frac{1}{4} n^4 + \etaa n \left ( \nabla^2 + \nabla^4 \right ) (n \psi) \right.& \nonumber\\ &\left. + \frac{1}{2} (w_0 + 2v_1 n + g n^2) \psi^2 + \frac{1}{4} u_0 \psi^4 + \frac{1}{2} K_0 \left | {\bm \nabla} \psi \right |^2 \right \}.& \label{eq:F_eff} \end{eqnarray} In the rest of this work, all results, including the corresponding amplitude equation formalism, noise dynamics, and the related applications, are based on the simplified PFC dynamic equations (\ref{eq:pfc_npsi}) and (\ref{eq:D12}). The above results can also be derived and verified through two other alternative methods, as given in Appendix \ref{append:altern}. Furthermore, to include higher-order terms (e.g., $n^4$, $\psi^4$, ...) in both the free energy functional and the dynamic equations, we need to consider higher-order direct correlation functions (4-point, 5-point, etc.) as shown in the single-component case (Sec. \ref{sec:ddft_pure}), with similar derivation steps. \section{Amplitude equation formalism for binary PFC model} \label{sec:ampl} As discussed in Sec. \ref{sec:intro}, the PFC methodology includes model equations governing the dynamics of density and concentration fields as given above. This section will examine the long wavelength and time limits of the alloy PFC model by deriving its corresponding amplitude equations, which emerge after coarse-graining the model using a multiple-scale analysis. The amplitude representation for single-component PFC models has been well established \cite{re:goldenfeld05,re:athreya06,re:athreya07,re:chan10,re:yeon10}, while for binary systems the corresponding amplitude equations have been derived very recently, for both 2D hexagonal/triangular and 3D bcc and fcc crystalline structures \cite{re:elder10,re:spatschek10}. Here we focus on the 2D amplitude equations for the binary PFC model with hexagonal lattice structure, yielding a complete formulation incorporating the effects of different mobilities between alloy components and dynamic variation of the average atomic density, both of which are missing in the previous binary alloy amplitude formulation \cite{re:elder10}. It is straightforward to extend this calculation to 3D bcc or fcc structures. The derivation process involves two steps: the standard multiple scale expansion \cite{re:cross93} for lowest order amplitude equations (Sec. \ref{sec:multiscale}), and a new hybrid approach (combining results of multiple scale approach and the idea of ``Quick and Dirty'' renormalization-group (RG) method developed by Goldenfeld \textit{et al.} \cite{re:goldenfeld05,re:athreya06}) for full order amplitude equations (see Sec. \ref{sec:hybrid}). To apply the multiple scale analysis, the rescaled PFC equations (\ref{eq:pfc_npsi}) and (\ref{eq:D12}) are used. \subsection{Multiple scale expansion: Lowest order amplitude equations} \label{sec:multiscale} Following the standard procedure of multiple scale approach \cite{re:cross93}, in the limit of small $\epsilon$ (i.e., high temperature) we can separate ``slow'' spatial and temporal scales $(X=\epsilon^{1/2}x, Y=\epsilon^{1/2}y, T=\epsilon t)$ for structural profile/amplitudes from ``fast'' scales of the underlying crystalline lattice. Substituting \begin{equation} \partial_x \rightarrow \partial_x + \epsilon^{1/2} \partial_X, \qquad \partial_y \rightarrow \partial_y + \epsilon^{1/2} \partial_Y, \qquad \partial_t \rightarrow \epsilon \partial_T, \label{eq:deriv_xyt} \end{equation} and the expansions \begin{eqnarray} & n = \epsilon^{1/2} n^{(1/2)} + \epsilon n^{(1)} + \epsilon^{3/2} n^{(3/2)} + \epsilon^2 n^{(2)} + \cdots, & \nonumber\\ & \psi = \epsilon^{1/2} \psi^{(1/2)} + \epsilon \psi^{(1)} + \epsilon^{3/2} \psi^{(3/2)} + \epsilon^2 \psi^{(2)} + \cdots, & \end{eqnarray} into the PFC equations (\ref{eq:pfc_npsi}) and (\ref{eq:D12}), we can obtain the corresponding equations at each order of $\epsilon^{1/2}$. For simplicity, assume $m, \alpha_0, g, u_0, K_0 \sim {\cal O}(1)$, $g_2, v_1 \sim {\cal O}(\epsilon^{1/2})$, and $w_0 \sim {\cal O}(\epsilon)$ (as also assumed in Sec. \ref{sec:simplify} for model simplification). To ${\cal O}(\epsilon^{1/2})$ and ${\cal O}(\epsilon)$ we have \begin{equation} \nabla^2 \left [ {\cal L}_0 n^{(i)} - m K_0 \nabla^2 \psi^{(i)} \right ] =0, \qquad \nabla^2 \left [ m {\cal L}_0 n^{(i)} - K_0 \nabla^2 \psi^{(i)} \right ] =0, \end{equation} where $i=1/2$ or $1$, and ${\cal L}_0 = (\nabla^2 +q_0^2)^2$. This leads to $(1-m^2) \nabla^2 {\cal L}_0 n^{(i)} = 0$ and $(1-m^2) \nabla^4 \psi^{(i)} = 0$, with solutions \begin{equation} n^{(i)} = n_0^{(i)}(X,Y,T) + \sum_{j=1}^{3} \A_j^{(i)}(X,Y,T) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.}, \qquad \psi^{(i)} = \psi_0^{(i)}(X,Y,T), \label{eq:npsi_1} \end{equation} where ${\bm q}_j^0$ represent the three reciprocal lattice vectors for 2D hexagonal/triangular structure: ${\bm q_1^0} = -q_0 ( \sqrt{3} \hat{x}/2 + \hat{y}/2 )$, ${\bm q_2^0} = q_0 \hat{y}$, ${\bm q_3^0} = q_0 (\sqrt{3} \hat{x}/2 - \hat{y}/2 )$. $A_j$ are the slowly varying complex amplitudes of the modes ${\bm q}_j^0$, while $n_0$ and $\psi_0$ refer to the real amplitudes of the zero wavenumber neutral mode as a result of order parameter conservation \cite{re:matthews00}. Expanding to ${\cal O}(\epsilon^{3/2})$ yields (with ${\bm \nabla}_s = (\partial_X, \partial_Y)$, ${\bm \nabla} \cdot {\bm \nabla}_s = \partial_x \partial_X + \partial_y \partial_Y$, and $\nabla_s^2 = \partial_X^2 + \partial_Y^2$) \begin{eqnarray} \nabla^2 {\cal L}_0 n^{(3/2)} - m K_0 \nabla^4 \psi^{(3/2)} &=& \partial_T n^{(1/2)} + \left [ \nabla^2 - \nabla^2 \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right )^2 - q_0^4 \nabla_s^2 \right ] n^{(1/2)} - \nabla^2 \left [ g_2 {n^{(1/2)}}^2 + {n^{(1/2)}}^3 \right ] \nonumber\\ &-& g {\psi^{(1/2)}}^2 \nabla^2 n^{(1/2)} + \etaa \nabla^2 \left [ \psi^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} + \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) \left ( \psi^{(1/2)} n^{(1/2)} \right ) \right ] \nonumber\\ &+& m \left \{ \etaa \nabla^2 \left [ n^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} \right ] - g \psi^{(1/2)} \nabla^2 {n^{(1/2)}}^2 - 2v_1 \psi^{(1/2)} \nabla^2 n^{(1/2)} \right \}, \nonumber\\ m \nabla^2 {\cal L}_0 n^{(3/2)} - K_0 \nabla^4 \psi^{(3/2)} &=& m \left \{ \left [ \nabla^2 - \nabla^2 \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right )^2 - q_0^4 \nabla_s^2 \right ] n^{(1/2)} - \nabla^2 \left [ g_2 {n^{(1/2)}}^2 + {n^{(1/2)}}^3 \right ] \right. \nonumber\\ &-& \left. g {\psi^{(1/2)}}^2 \nabla^2 n^{(1/2)} + \etaa \nabla^2 \left [ \psi^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} + \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) \left ( \psi^{(1/2)} n^{(1/2)} \right ) \right ] \right \} \nonumber\\ &+& \partial_T \psi^{(1/2)} + \etaa \nabla^2 \left [ n^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} \right ] - g \psi^{(1/2)} \nabla^2 {n^{(1/2)}}^2 - 2v_1 \psi^{(1/2)} \nabla^2 n^{(1/2)}, \nonumber \end{eqnarray} which is equivalent to \begin{eqnarray} (1-m^2) \nabla^2 {\cal L}_0 n^{(3/2)} &=& \partial_T n^{(1/2)} - m \partial_T \psi^{(1/2)} + (1-m^2) \left \{ \left [ \nabla^2 - \nabla^2 \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right )^2 - q_0^4 \nabla_s^2 \right ] n^{(1/2)} - g {\psi^{(1/2)}}^2 \nabla^2 n^{(1/2)} \right. \nonumber\\ &-& \left. \nabla^2 \left [ g_2 {n^{(1/2)}}^2 + {n^{(1/2)}}^3 \right ] + \etaa \nabla^2 \left [ \psi^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} + \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) \left ( \psi^{(1/2)} n^{(1/2)} \right ) \right ] \right \}, \nonumber\\ (1-m^2) K_0 \nabla^4 \psi^{(3/2)} &=& m \partial_T n^{(1/2)} - \partial_T \psi^{(1/2)} \nonumber\\ &+& (1-m^2) \left \{ - \etaa \nabla^2 \left [ n^{(1/2)} \left ( 2 {\bm \nabla} \cdot {\bm \nabla}_s \right ) n^{(1/2)} \right ] + g \psi^{(1/2)} \nabla^2 {n^{(1/2)}}^2 + 2v_1 \psi^{(1/2)} \nabla^2 n^{(1/2)} \right \}. \label{eq:expan32} \end{eqnarray} As shown in Eq. (\ref{eq:npsi_1}), the zero eigenvectors of operators $\nabla^2 {\cal L}_0$ and $\nabla^4$ are $(e^{\pm i {\bm q}_j^0 \cdot {\bm r}}, 1)$ and $1$ (of the 0th mode), respectively. Using the Fredholm theory or solvability condition \cite{re:cross93} in Eq. (\ref{eq:expan32}), we can derive the lowest order amplitude equations as (with $j=1,2,3$) \begin{eqnarray} & \partial A_j^{(1/2)} / \partial t = - (1-m^2) q_0^2 \left \{ \left [ -1 + \left ( 2i {\bm q}_j^0 \cdot {\bm \nabla}_s \right )^2 \right ] A_j^{(1/2)} + \left [ 3 {n_0^{(1/2)}}^2 + 2g_2 n_0^{(1/2)} + g {\psi_0^{(1/2)}}^2 \right ] A_j^{(1/2)} \right. & \nonumber\\ & + 3 A_j^{(1/2)} \left [ \left | A_j^{(1/2)} \right |^2 + 2 \sum_{k,l \neq j}^{k<l} \left ( \left | A_k^{(1/2)} \right |^2 + \left | A_l^{(1/2)} \right |^2 \right ) \right ] + \left ( 6 n_0^{(1/2)} + 2g_2 \right ) \sum_{k,l \neq j}^{k<l} {A_k^{(1/2)}}^* {A_l^{(1/2)}}^* & \nonumber\\ & \left. - \etaa \left [ \psi_0^{(1/2)} \left ( 2i {\bm q}_j^0 \cdot {\bm \nabla}_s \right ) A_j^{(1/2)} + \left ( 2i {\bm q}_j^0 \cdot {\bm \nabla}_s \right ) \left ( \psi_0^{(1/2)} A_j^{(1/2)} \right ) \right ] \right \}, & \label{eq:Aj_} \\ & \partial n_0^{(1/2)} / \partial t = q_0^4 \nabla_s^2 n_0^{(1/2)}, & \label{eq:n0_} \\ & \partial \psi_0^{(1/2)} / \partial t = m q_0^4 \nabla_s^2 \psi_0^{(1/2)}. & \label{eq:psi0_} \end{eqnarray} Using the scaling relation $A_j = \epsilon^{1/2} A_j^{(1/2)}$, $n_0 = \epsilon^{1/2} n_0^{(1/2)}$, and $\psi_0 = \epsilon^{1/2} \psi_0^{(1/2)}$, we can then obtain the corresponding amplitude equations in the unscaled units $(x,y,t)$. It is noted that the direct solutions to Eq. (\ref{eq:expan32}) have the form \begin{eqnarray} & n^{(3/2)} = n_0^{(3/2)}(X,Y,T) + \sum\limits_{j=1}^{3} \A_j^{(3/2)}(X,Y,T) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + {\rm higher~ harmonics},& \label{eq:n_32}\\ & \psi^{(3/2)} = \psi_0^{(3/2)}(X,Y,T) + \sum\limits_{j=1}^{3} \psi_j^{(3/2)}(X,Y,T) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + {\rm higher~ harmonics}.& \label{eq:psi_32} \end{eqnarray} Compared to Eq. (\ref{eq:npsi_1}) for the ${\cal O}(\epsilon^{1/2})$ and ${\cal O}(\epsilon)$ solutions, it can be found that the complex amplitudes $\psi_j$ corresponding to the periodic modes of the concentration field in substitutional binary alloys considered here is generally of order $\epsilon$ higher than $A_j$, $n_0$, and $\psi_0$. For systems in which a sublattice ordering occurs (such as B2 or B32 ordering in bcc crystals), $\psi_j$ would be of the same order as these other fields. To describe sublattice ordering a different free energy functional from the one given in Eq. (\ref{eq:F_eff}) would also be required. Detailed results will be presented elsewhere. \subsection{A hybrid approach: Full order amplitude equations} \label{sec:hybrid} The lowest order amplitude equations (\ref{eq:Aj_})--(\ref{eq:psi0_}) derived above are not sufficient to describe the evolution of binary systems; e.g., Eq. (\ref{eq:Aj_}) for $A_j$ is not rotationally invariant, and Eqs. (\ref{eq:n0_}) and (\ref{eq:psi0_}) for $n_0$ and $\psi_0$ are just diffusion equations and would lead to a steady state solution of constant $n_0$ and $\psi_0$ values at long enough time. We thus need higher order amplitude equations, which in principle can be derived by extending the multiple scale process described above to higher order expansions. However, the procedure is complicated and tedious. In the following we use, instead, a simplified approach combining the above steps of multiple scale expansion and the idea of the ``Quick and Dirty'' RG method \cite{re:goldenfeld05,re:athreya06}. The first step is the standard multiple scale expansion given in Sec. \ref{sec:multiscale}, starting from the scale separation Eq. (\ref{eq:deriv_xyt}). From the solution forms of Eqs. (\ref{eq:npsi_1}), (\ref{eq:n_32}) and (\ref{eq:psi_32}), we know that to all orders of $\epsilon$ the solutions of $n$ and $\psi$ fields can be written as \begin{eqnarray} & n = n_0(X,Y,T) + \sum\limits_{j=1}^{3} \A_j(X,Y,T) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + {\rm higher~ harmonics},& \label{eq:n_expan}\\ & \psi = \psi_0(X,Y,T) + \sum\limits_{j=1}^{3} \psi_j(X,Y,T) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + {\rm higher~ harmonics},& \label{eq:psi_expan} \end{eqnarray} with $(X,Y,T)$ the slow scales. Thus, based on the separation between ``fast''/''slow'' scales the following expansions (full-order) can be obtained: \begin{eqnarray} \nabla^2 n &\rightarrow& \epsilon \nabla_s^2 n_0 + \sum_{j=1}^{3} ({\cal L}_j^s \A_j) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ (\nabla^2+q_0^2)^2 n &\rightarrow& \left ( \epsilon \nabla_s^2 + q_0^2 \right )^2 n_0 + \sum_{j=1}^{3} ({{\cal G}_j^s}^2 \A_j) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ (\nabla^2 + \nabla^4) (n \psi) &\rightarrow& \left ( \epsilon \nabla_s^2 + \epsilon^2 \nabla_s^4 \right ) \left ( n_0 \psi_0 + \sum_{j=1}^3 \A_j \psi_j^* + {\rm c.c.} \right ) \nonumber\\ &+& \sum_{j=1}^3 \left[ {\cal L}_j^s {\cal G}_j^s \left ( \psi_0 \A_j + n_0 \psi_j + \sum_{k,l \neq j}^{k<l} \A_k^* \psi_l^* \right )\right] e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ n^2 &\rightarrow& n_0^2 + 2 \sum_{j=1}^3 |\A_j|^2 + \sum_{j=1}^3 \left ( 2n_0 \A_j + 2 \sum_{k,l \neq j}^{k<l} \A_k^* \A_l^* \right ) e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ n^3 &\rightarrow& n_0^3 + 6n_0 \sum_{j=1}^3 |\A_j|^2 + 6 \left ( \prod_{j=1}^3 \A_j + {\rm c.c.} \right ) \nonumber\\ &+& \sum_{j=1}^3 \left \{ 3(n_0^2 + |\A_j|^2) \A_j + 6 \sum_{k,l \neq j}^{k<l} \left [ n_0 \A_k^* \A_l^* + \A_j \left ( |\A_k|^2 + |\A_l|^2 \right ) \right ] \right \} e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ n \psi^2 &\rightarrow& n_0 \psi_0^2 + 2 n_0 \sum_{j=1}^3 |\psi_j|^2 + \sum_{j=1}^3 \left ( 2 \psi_0 \A_j + \sum_{k \neq l \neq j} \A_k^* \psi_l^* \right ) \psi_j^* + {\rm c.c.} + \sum_{j=1}^3 \left [ 2n_0 \left ( \psi_0 \psi_j + \sum_{k,l \neq j}^{k<l} \psi_k^* \psi_l^* \right ) \right. \nonumber\\ &+& \left. \left ( \psi_0^2 + 2 \sum_{k=1}^3 |\psi_k|^2 \right ) \A_j + 2 \psi_0 \sum_{k \neq l \neq j} \A_k^* \psi_l^* + 2 \psi_j \sum_{k \neq j} \left ( \A_k \psi_k^* + {\rm c.c.} \right ) + \A_j^* \psi_j^2 \right ] e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \{\cdots\}, \nonumber\\ \cdots \cdots && \label{eq:expan} \end{eqnarray} where $\{\cdots\}$ refers to the contributions from higher harmonics and the slow operators are given by \begin{equation} {\cal L}_j^s = \epsilon \nabla_s^2 + \epsilon^{1/2} \left ( 2i {\bm q}_j^0 \cdot {\bm \nabla}_s \right ) - q_0^2, \qquad {\cal G}_j^s = {\cal L}_j^s + q_0^2 = \epsilon \nabla_s^2 + \epsilon^{1/2} \left ( 2i {\bm q}_j^0 \cdot {\bm \nabla}_s \right ). \label{eq:LGs} \end{equation} Assuming that higher harmonic terms can be neglected, the binary PFC equations (\ref{eq:pfc_npsi}) and (\ref{eq:D12}) are then replaced by \begin{eqnarray} & \epsilon \partial_T n_0 + \epsilon \sum_j \partial_T \A_j e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} = {\cal D}_1^s + m {\cal D}_2^s,& \label{eq:n_s}\\ & \epsilon \partial_T \psi_0 + \epsilon \sum_j \partial_T \psi_j e^{i {\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} = m {\cal D}_1^s + {\cal D}_2^s,& \label{eq:psi_s} \end{eqnarray} where ${\cal D}_1^s$ and ${\cal D}_2^s$ are the corresponding expansion of ${\cal D}_1$ and ${\cal D}_2$, as obtained by substituting Eq. (\ref{eq:expan}) into Eq. (\ref{eq:D12}). Integrating Eqs.~(\ref{eq:n_s}) and (\ref{eq:psi_s}) over the eigenmodes $\int d{\bm r} \{ e^{-i {\bm q}_j^0 \cdot {\bm r}}, 1 \}$, keeping in mind that ``fast'' and ``slow'' scales are separated, and in the final step returning to original unscaled units $(x,y,t)$, we arrive at the following full-order amplitude equations for the binary PFC model: \begin{eqnarray} \partial n_0 / \partial t &=& \nabla^2 \frac{\delta {\cal F}} {\delta n_0} + m \nabla^2 \frac{\delta {\cal F}}{\delta \psi_0}, \label{eq:n0}\\ \partial \A_j / \partial t &=& {\cal L}_j \left ( \frac{\delta {\cal F}}{\delta \A_j^*} + m \frac{\delta {\cal F}} {\delta \psi_j^*} \right ) \simeq - q_0^2 \left ( \frac{\delta {\cal F}}{\delta \A_j^*} + m \frac{\delta {\cal F}} {\delta \psi_j^*} \right ), \label{eq:Aj}\\ \partial \psi_0 / \partial t &=& m \nabla^2 \frac{\delta {\cal F}} {\delta n_0} + \nabla^2 \frac{\delta {\cal F}}{\delta \psi_0}, \label{eq:psi0}\\ \partial \psi_j / \partial t &=& {\cal L}_j \left ( m \frac{\delta {\cal F}}{\delta \A_j^*} + \frac{\delta {\cal F}} {\delta \psi_j^*} \right ) \simeq - q_0^2 \left ( m \frac{\delta {\cal F}}{\delta \A_j^*} + \frac{\delta {\cal F}} {\delta \psi_j^*} \right ), \label{eq:psij} \end{eqnarray} where $j=1,2,3$, and \begin{eqnarray} & {\cal F} = \int d{\bm r} & \left \{ - \frac{1}{2} \epsilon n_0^2 + \frac{1}{2} \left [ \left ( \nabla^2 + q_0^2 \right ) n_0 \right ]^2 + \frac{1}{3} g_2 n_0^3 + \frac{1}{4} n_0^4 + \left ( - \epsilon + 3 n_0^2 + 2 g_2 n_0 + g \psi_0^2 \right ) \sum_{j=1}^3 |\A_j|^2 \right. \nonumber\\ && + \sum_{j=1}^3 \left | {\cal G}_j \A_j \right |^2 + \frac{3}{2} \sum_{j=1}^3 |\A_j|^4 + (6n_0 + 2g_2) \left ( \prod_{j=1}^3 \A_j + {\rm c.c.} \right ) + 6 \sum_{j<k} |\A_j|^2 |\A_k|^2 \nonumber\\ && + g \left [ \frac{1}{2} n_0^2 \psi_0^2 + n_0^2 \sum_{j=1}^3 |\psi_j|^2 + 2 \sum_{j,k=1}^3 |\A_j|^2 |\psi_k|^2 + \sum_{j=1}^3 \left ( 2n_0 \psi_0 \A_j \psi_j^* + \frac{1}{2} \A_j^2 {\psi_j^*}^2 + {\rm c.c.} \right ) \right. \nonumber\\ && \left. \qquad + \sum_{j \neq k} (\A_j \psi_j^* + {\rm c.c.}) (\A_k \psi_k^* + {\rm c.c.}) + \sum_{j \neq k \neq l} ( n_0 \psi_j^* + \psi_0 \A_j^* ) \A_k^* \psi_l^* + {\rm c.c.} \right ] \nonumber\\ && + \etaa \left [ \psi_0 n_0 \left ( \nabla^2 + \nabla^4 \right ) n_0 + \psi_0 \left ( \sum_{j=1}^3 \A_j^* {\cal L}_j {\cal G}_j \A_j + {\rm c.c.} \right ) + n_0 \left ( \nabla^2 + \nabla^4 \right ) \left ( \sum_{j=1}^3 \A_j \psi_j^* + {\rm c.c.} \right ) \right. \nonumber\\ && \left. \qquad \quad + n_0 \sum_{j=1}^3 \psi_j^* {\cal L}_j {\cal G}_j \A_j + \sum_{j \neq k \neq l} \A_j \psi_k {\cal L}_l {\cal G}_l \A_l + {\rm c.c.} \right ] \nonumber\\ && + \frac{1}{2} w_0 \psi_0^2 + \frac{1}{2} K_0 \left | \nabla \psi_0 \right |^2 + \frac{1}{4} u_0 \psi_0^4 + \left ( w_0 + 3 u_0 \psi_0^2 \right ) \sum_{j=1}^3 |\psi_j|^2 - \frac{1}{2} K_0 \sum_{j=1}^3 \left ( \psi_j {\cal L}_j^* \psi_j^* + {\rm c.c.} \right ) \nonumber\\ && + u_0 \left [ \frac{3}{2} \sum_{j=1}^3 |\psi_j|^4 + 6 \psi_0 \left ( \prod_{j=1}^3 \psi_j + {\rm c.c.} \right ) + 6 \sum_{j<k} |\psi_j|^2 |\psi_k|^2 \right ] \nonumber\\ && \left. + v_1 \left [ n_0 \psi_0^2 + 2 n_0 \sum_{j=1}^3 |\psi_j|^2 + 2 \psi_0 \left ( \sum_{j=1}^3 A_j \psi_j^* + {\rm c.c.} \right ) + \sum_{j \neq k \neq l} A_j \psi_k \psi_l + {\rm c.c.} \right ] \right \}. \label{eq:F} \end{eqnarray} Corresponding to Eq. (\ref{eq:LGs}), the operators ${\cal L}_j$ and ${\cal G}_j$ (in the original scales) are defined by \begin{equation} {\cal L}_j = \nabla^2 + 2i {\bm q}_j^0 \cdot {\bm \nabla} - q_0^2, \qquad {\cal G}_j = {\cal L}_j + q_0^2 = \nabla^2 + 2i {\bm q}_j^0 \cdot {\bm \nabla}, \label{eq:L_G} \end{equation} and for simplicity, in Eqs. (\ref{eq:Aj})--(\ref{eq:F}) the operator ${\cal L}_j$ can be replaced by $-q_0^2$ in the long wavelength approximation as adopted in Ref. \cite{re:elder10}. As discussed at the end of Sec. \ref{sec:multiscale}, the amplitudes $\psi_j$ are of ${\cal O}(\epsilon)$ higher compared to the others for the free energy functional considered here. Thus the above amplitude equations can be further simplified by assuming $\psi_j \sim 0$, which leads to \begin{eqnarray} \partial \A_j / \partial t &=& - q_0^2 \frac{\delta {\cal F}}{\delta \A_j^*} - m q_0^2 \left \{ \etaa \left [ \A_j \left ( \nabla^2 + \nabla^4 \right ) n_0 + n_0 {\cal L}_j {\cal G}_j \A_j + \sum_{k \neq l \neq j} \A_k^* {\cal L}_l^* {\cal G}_l^* \A_l^* \right ] \right. \nonumber\\ &+& \left. 2g \psi_0 ( n_0 \A_j + \sum_{k,l \neq j}^{k<l} \A_k^* \A_l^* ) + 2v_1 \psi_0 A_j \right \} \nonumber\\ &=& - q_0^2 \frac{\delta {\cal F}}{\delta \A_j^*} - m q_0^2 \left. \frac{\delta {\cal F}}{\delta \psi_j^*} \right |_{\psi_j=0}. \nonumber \end{eqnarray} The dynamic equations for $n_0$ and $\psi_0$ are still governed by Eqs. (\ref{eq:n0}) and (\ref{eq:psi0}). The amplitude equations can be further simplified by noting from Eq. (\ref{eq:psij}) $0 \simeq \partial \psi_j / \partial t = -q_0^2 ( m \delta {\cal F} / \delta \A_j^* + \delta {\cal F} / \delta \psi_j^* |_{\psi_j=0})$. Thus, the above dynamic equation for $A_j$ can be further approximated as \begin{equation} \partial \A_j / \partial t \simeq - q_0^2 (1-m^2) \frac{\delta {\cal F}}{\delta \A_j^*}, \label{eq:Aj0} \end{equation} which to lowest order recovers the result of multiple scale approach given in Eq. (\ref{eq:Aj_}). In the applications that will be examined in Sec. \ref{sec:appl} the simplified amplitude equations (\ref{eq:n0}), (\ref{eq:psi0}), and (\ref{eq:Aj0}) will be used. \section{Noise dynamics and stochastic amplitude equations} \label{sec:noise} In the original PFC model \cite{re:elder02,re:elder04} a conserved noise dynamics has been incorporated. However, in DDFT it has been argued that the dynamic equation governing the density field evolution should be deterministic and an additional stochastic noise term added to Eq. (\ref{eq:ddft}) would lead to an artificial double-counting of thermal fluctuations \cite{re:marconi99}. On the other hand, recent studies \cite{re:archer04b} have shown that deterministic DDFT dynamics governs the ensemble averaged density field $\rho({\bm r},t)$, while if the density field is temporally coarse-grained --as is the assumption in PFC modeling-- the corresponding dynamic equation would then be stochastic, but with a (unknown) coarse-grained free energy functional instead of the equilibrium Helmholtz free energy functional used in static DFT. In the current case of PFC modeling, quite drastic approximations have been made to the DFT free energy functional (particularly at the level of the direct correlation functions; see e.g., Eqs. (\ref{eq:C2}), (\ref{eq:C3}), and (\ref{eq:C_expan})), and hence it could be argued that the incorporation of noise terms in the PFC dynamics would be necessary and useful to capture the qualitative effects of fluctuations in phenomena such a homogeneous nucleation. In what follows, noise will be added to the PFC models studied above and the corresponding stochastic amplitude equations will be derived for both single component and binary systems. \subsection{Single-component PFC} \label{sec:pure} The stochastic DDFT equation for single-component systems is given by Eq. (\ref{eq:ddft}) with a multiplicative noise term ${\bm \nabla} \cdot [ \sqrt{\rho({\bm r},t)} {\bm \zeta}({\bm r},t) ]$ added to the right-hand-side, where the noise field ${\bm \zeta}({\bm r},t)$ is determined by (with $\Gamma_0 = 2 k_B T M$) \begin{equation} \langle {\bm \zeta}({\bm r},t) \rangle =0, \qquad \langle \zeta^{\mu}({\bm r},t) \zeta^{\nu}({\bm r'},t') \rangle = \Gamma_0 \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu} \quad (\mu, \nu = x, y, z). \label{eq:noise} \end{equation} The corresponding dynamic equation governing the rescaled density field $n$ is similar to Eq. (\ref{eq:ddft_n}), i.e., $\partial n / \partial t = M' {\bm \nabla} \cdot [ (1+n) {\bm \nabla} {\delta {\cal F}} / {\delta n} ] + {\bm \nabla} \cdot [ \sqrt{(1+n) / \rho_l} ~ {\bm \zeta} ]$. Adopting the lowest order approximation as given in Sec. \ref{sec:ddft_pure}, we can write the rescaled stochastic PFC equation as \begin{equation} \partial n / \partial t = \nabla^2 \left [ -\epsilon n + (\nabla^2 + q_0^2)^2 n + g_2 n^2 +n^3 \right ] + {\bm \nabla} \cdot {\bm \zeta}, \label{eq:pfc_noise} \end{equation} where the rescaled noise ${\bm \zeta}$ is also determined by Eq. (\ref{eq:noise}) but with $\Gamma_0 = 2v/({B^x}^2 R^d \rho_l)$ (where $d$ is the dimensionality). To derive the associated stochastic amplitude equations, we follow the standard multiple scale approach in the limit of small $\epsilon$, which leads to the expansion of density field $n$ in terms of the zeroth-mode average density $n_0$ and complex amplitudes $A_j$ that are varying on slow scales $(X,Y,T)$; see Eq. (\ref{eq:n_expan}). Effects of external noise can be approximated via a projection procedure used in hydrodynamic analysis \cite{re:graham74,re:hohenberg92}. Based on the fact that thermal noises originate from the fluctuations or random motion of individual atoms/molecules at the microscopic scales, we can project ${\bm \zeta}$ onto the base modes given in Eq. (\ref{eq:n_expan}), i.e., \begin{equation} {\bm \zeta} = {\bm \zeta}_0(X,Y,T)+\sum\limits_{j=1}^{3} {\bm \zeta}_{A_j}(X,Y,T) e^{i{\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.}, \label{eq:zexpan} \end{equation} where \begin{eqnarray} & \langle {\bm \zeta}_0 \rangle = \langle {\bm \zeta}_{A_j} \rangle =0, \quad \langle {\bm \zeta}_{A_i} {\bm \zeta}_{A_j} \rangle = \langle {\bm \zeta}_0 {\bm \zeta}_{A_j} \rangle = \langle {\bm \zeta}_0 {\bm \zeta}_{A_j}^* \rangle =0, & \nonumber\\ & \langle \zeta_0^{\mu} \zeta_0^{\nu} \rangle = \vartheta_0 \Gamma_0 \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu}, \quad \langle \zeta_{A_i}^{\mu} {\zeta_{A_j}^{\nu}}^* \rangle = \vartheta_i \Gamma_0 \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta_{ij} \delta^{\mu\nu}, & \label{eq:zeta_nA} \end{eqnarray} (with $i,j=1,2,3; \mu, \nu =x,y$). Here $\vartheta_i$ ($i=0,1,2,3$) is a constant determining the noise correlation strength, which can be approximated as $\vartheta_i=\vartheta=1/7$ if equal contribution from all modes in Eq. (\ref{eq:zexpan}) is assumed. Thus the random noise term in Eq. (\ref{eq:pfc_noise}) is given by \begin{equation} {\bm \nabla} \cdot {\bm \zeta} = \sum\limits_{j=1}^{3} i {\bm q}_j^0 \cdot {\bm \zeta}_{A_j} e^{i{\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} + \epsilon^{1/2} \left [ \partial_X \zeta_0^x + \partial_Y \zeta_0^y + \sum\limits_{j=1}^{3} \left ( \partial_X \zeta_{A_j}^x + \partial_Y \zeta_{A_j}^y \right ) e^{i{\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.} \right ]. \label{eq:dzeta} \end{equation} In order to be relevant in the amplitude expansion, it is necessary that ${\bm \nabla} \cdot {\bm \zeta} \sim {\cal O}(\epsilon^{3/2})$, leading to ${\bm \zeta}_{A_j} \sim {\cal O}(\epsilon^{3/2})$ and hence the noise intensity $\Gamma_0 \sim {\cal O}(\epsilon)$. The latter yields ${\bm \zeta}_0 \sim {\cal O}(\epsilon^{3/2})$, which can be deduced from Eq. (\ref{eq:zeta_nA}). Following the procedure of multiple scale expansion and retaining the random force contribution to the lowest order, we can derive the following stochastic amplitude equations \begin{eqnarray} &\partial A_j / \partial t = - q_0^2 \delta {\cal F} / \delta A_j^* + \zeta_j,& \label{eq:amplA}\\ &\partial n_0 / \partial t = \nabla^2 \delta {\cal F} / \delta n_0 + {\bm \nabla} \cdot {\bm \zeta_0},& \label{eq:ampln0} \end{eqnarray} where ${\cal F}$ is the effective free energy of the single-component amplitude representation (see Refs. \cite{re:huang08,re:yeon10,re:huang10} for the detailed form), which is given by Eq. (\ref{eq:F}) with $\psi_0$ and $\psi_j$ set to 0. Also, $\zeta_j = i {\bm q}_j^0 \cdot {\bm \zeta}_{A_j}$ ($j=1,2,3$) and \begin{eqnarray} & \langle \zeta_j \rangle = \langle {\bm \zeta}_0 \rangle =0, \quad \langle \zeta_i \zeta_j \rangle = \langle {\bm \zeta}_0 \zeta_j \rangle = \langle {\bm \zeta}_0 \zeta_j^* \rangle =0, & \nonumber\\ & \langle \zeta_i \zeta_j^* \rangle = \vartheta_i q_0^2 \Gamma_0 \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta_{ij}, \quad \langle \zeta_0^{\mu} \zeta_0^{\nu} \rangle = \vartheta_0 \Gamma_0 \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu}. & \label{eq:zeta} \end{eqnarray} The noise dynamics is then consistent with the dynamics of amplitude representation, i.e., nonconserved dynamics for $A_j$ in Eq. (\ref{eq:amplA}) and conserved one for $n_0$ in Eq. (\ref{eq:ampln0}). \subsection{Binary PFC} Similar to the single-component system, based on Eq. (\ref{eq:ddft_AB}) the stochastic DDFT equations for a binary system can be written as \begin{equation} \frac{\partial \rho_A}{\partial t} = {\bm \nabla} \cdot \left [ M_A \rho_A {\bm \nabla} \frac{\delta {\cal F}}{\delta \rho_A} + \sqrt{\rho_A} {\bm \zeta_A} \right ], \qquad \frac{\partial \rho_B}{\partial t} = {\bm \nabla} \cdot \left [ M_B \rho_B {\bm \nabla} \frac{\delta {\cal F}}{\delta \rho_B} + \sqrt{\rho_B} {\bm \zeta_B} \right ], \label{eq:ddft_ABs} \end{equation} where for noises $(\alpha, \beta = A, B; \mu, \nu = x, y, z; \Gamma_{\alpha} = 2 k_B T M_{\alpha})$, \begin{equation} \langle {\bm \zeta_i}({\bm r},t) \rangle =0, \qquad \langle \zeta_{\alpha}^{\mu}({\bm r},t) \zeta_{\beta}^{\nu} ({\bm r'},t') \rangle = \Gamma_{\alpha} \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta_{\alpha \beta} \delta^{\mu \nu}. \label{eq:noise_AB} \end{equation} From Eqs. (\ref{eq:ndN}) and (\ref{eq:pfc_ndN}) the dynamics equations for $n$ and $\psi$ fields can be rewritten as \begin{eqnarray} &\partial n / \partial t =& M_1 {\cal D}_1 + M_2 {\cal D}_2+ {\bm \nabla} \cdot \left [ \sqrt{1+n} \left ( \sqrt{1+\psi} {\bm \zeta_A} + \sqrt{1-\psi} {\bm \zeta_B} \right ) \right ], \nonumber\\ &\partial \psi / \partial t =& \frac{1}{1+n} \left \{ (M_2-M_1 \psi) {\cal D}_1 + (M_1-M_2 \psi) {\cal D}_2 \right. \label{eq:npsi}\\ && \left. + (1-\psi) {\bm \nabla} \cdot \left [ \sqrt{(1+n)(1+\psi)} {\bm \zeta_A} \right ] - (1+\psi) {\bm \nabla} \cdot \left [ \sqrt{(1+n)(1-\psi)} {\bm \zeta_B} \right ] \right \}, \nonumber \end{eqnarray} where we have rescaled ${\bm \zeta_{A(B)}} \rightarrow {\bm \zeta_{A(B)}}/\sqrt{2\rho_l}$. Following the procedure given in Sec. \ref{sec:ddft_binary} and only retaining the lowest order noise terms, we can derive the rescaled stochastic binary PFC equations as \begin{equation} \partial n / \partial t = {\cal D}_1 + m {\cal D}_2 + {\bm \nabla} \cdot {\bm \zeta}_n, \quad \partial \psi / \partial t = m {\cal D}_1 + {\cal D}_2 + {\bm \nabla} \cdot {\bm \zeta}_{\psi}, \label{eq:bpfc} \end{equation} where the expressions of ${\cal D}_1$ and ${\cal D}_2$ have been given in Eqs. (\ref{eq:D12})--(\ref{eq:F_eff}). The noise terms are defined by \begin{equation} {\bm \zeta}_n = {\bm \zeta}_A + {\bm \zeta}_B, \qquad {\bm \zeta}_{\psi} = {\bm \zeta}_A - {\bm \zeta}_B, \end{equation} where $\zeta_{A(B)}$ also obeys Eq. (\ref{eq:noise_AB}), although with $\Gamma_{\alpha}=k_BTM_{\alpha}v/(M_1{B_0^x}^2R^d\rho_l)$ due to the rescaling, and \begin{eqnarray} & \langle {\bm \zeta}_n \rangle = \langle {\bm \zeta}_{\psi} \rangle =0, \quad \langle \zeta_n^{\mu} \zeta_{\psi}^{\nu} \rangle = (\Gamma_A-\Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu},& \nonumber\\ & \langle \zeta_n^{\mu} \zeta_n^{\nu} \rangle = \langle \zeta_{\psi}^{\mu} \zeta_{\psi}^{\nu} \rangle = (\Gamma_A+\Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu},& \label{eq:zeta_npsi} \end{eqnarray} with $\Gamma_A+\Gamma_B = 2v/({B_0^x}^2R^d\rho_l)$ and $\Gamma_A-\Gamma_B = m (\Gamma_A+\Gamma_B) = 2mv/({B_0^x}^2R^d\rho_l)$. Using the multiple scale approach, we can expand the density field $n$ according to Eq. (\ref{eq:n_expan}) while assuming the concentration field as slowly varying, $\psi = \psi_0(X,Y,T)$ (that is, keeping only the zeroth mode and neglecting the higher-order contributions from $\psi_j$ in Eq. (\ref{eq:psi_expan}), as discussed in Sec. \ref{sec:hybrid}). Similar to the single-component case, the projection of noises can be given by \begin{equation} {\bm \zeta}_n = {\bm \zeta}_0(X,Y,T)+\sum\limits_{j=1}^{3} {\bm \zeta}_{A_j}(X,Y,T) e^{i{\bm q}_j^0 \cdot {\bm r}} + {\rm c.c.}, \quad {\bm \zeta}_{\psi} = {\bm \zeta}_{\psi}(X,Y,T). \label{eq:bzexpan} \end{equation} Thus the expression of ${\bm \nabla} \cdot {\bm \zeta}_n$ is the same as Eq. (\ref{eq:dzeta}), while ${\bm \nabla} \cdot {\bm \zeta}_{\psi} = \epsilon^{1/2} (\partial_X \zeta_{\psi}^x + \partial_Y \zeta_{\psi}^y)$. Also we can estimate ${\bm \zeta}_{A_j}, {\bm \zeta}_0, {\bm \zeta}_{\psi} \sim {\cal O}(\epsilon^{3/2})$ and $\Gamma_A, \Gamma_B \sim {\cal O}(\epsilon)$. The stochastic amplitude equations for binary PFC model can then be derived, i.e., \begin{eqnarray} \partial A_j / \partial t &=& - q_0^2 (1-m^2) \frac{\delta {\cal F}} {\delta \A_j^*} + \zeta_j, \label{eq:A_noise} \\ \partial n_0 / \partial t &=& \nabla^2 \frac{\delta {\cal F}} {\delta n_0} + m \nabla^2 \frac{\delta {\cal F}}{\delta \psi_0} + {\bm \nabla} \cdot {\bm \zeta}_0, \label{eq:n0_noise} \\ \partial \psi_0 / \partial t &=& m \nabla^2 \frac{\delta {\cal F}} {\delta n_0} + \nabla^2 \frac{\delta {\cal F}}{\delta \psi_0} + {\bm \nabla} \cdot {\bm \zeta}_{\psi_0}, \label{eq:psi0_noise} \end{eqnarray} where the deterministic parts have been obtained in Sec. \ref{sec:hybrid}; see Eqs. (\ref{eq:n0}), (\ref{eq:psi0}), and (\ref{eq:Aj0}), as well as Eq. (\ref{eq:F}) for the effective potential ${\cal F}$. For the noise terms, $\zeta_j = i {\bm q}_j^0 \cdot {\bm \zeta}_{A_j}$ ($j=1,2,3$), and \begin{eqnarray} & \langle \zeta_j \rangle = \langle {\bm \zeta}_0 \rangle = \langle {\bm \zeta}_{\psi_0} \rangle = 0, \quad \langle \zeta_i \zeta_j \rangle = \langle {\bm \zeta}_0 \zeta_j \rangle = \langle {\bm \zeta}_0 \zeta_j^* \rangle = \langle {\bm \zeta}_{\psi_0} \zeta_j \rangle = \langle {\bm \zeta}_{\psi_0} \zeta_j^* \rangle = 0, & \nonumber\\ & \langle \zeta_i \zeta_j^* \rangle = \vartheta_i q_0^2 (\Gamma_A + \Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta_{ij}, \quad \langle \zeta_0^{\mu} \zeta_0^{\nu} \rangle = \vartheta_0 (\Gamma_A + \Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu}, & \label{eq:bzeta}\\ & \langle \zeta_{\psi_0}^{\mu} \zeta_{\psi_0}^{\nu} \rangle = (\Gamma_A + \Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu}, \quad \langle \zeta_{\psi_0}^{\mu} \zeta_0^{\nu} \rangle = (\Gamma_A - \Gamma_B) \delta ({\bm r} - {\bm r'}) \delta (t-t') \delta^{\mu\nu},& \nonumber \end{eqnarray} with $i,j=1,2,3$ and $\mu, \nu = x,y$. If assuming $\Gamma_A \simeq \Gamma_B$ (for equal mobility $M_A \simeq M_B$ and $m \simeq 0$), i.e., with almost the same noise/fluctuation intensity for A and B components, we have $\langle \zeta_{\psi_0}^{\mu} \zeta_0^{\nu} \rangle \simeq 0$ and hence all noise terms ($\zeta_j$, ${\bm \zeta}_0$, ${\bm \zeta}_{\psi_0}$) can be treated independently. However, for the case of different mobilities ($M_A \neq M_B$ and $m \neq 0$), we get $\langle \zeta_{\psi_0}^{\mu} \zeta_0^{\nu} \rangle \neq 0$, and hence noises ${\bm \zeta}_0$ and ${\bm \zeta}_{\psi_0}$ for 0th-mode density fields $n_0$ and $\psi_0$ are then correlated. Similar results can be obtained for noises ${\bm \zeta}_n$ and ${\bm \zeta}_{\psi}$ in the stochastic PFC equations (\ref{eq:npsi}) and (\ref{eq:zeta_npsi}). \section{Applications in Alloy Heterostructures} \label{sec:appl} As discussed in the introduction, the PFC model and the corresponding amplitude equations have applied to the study of a wide variety of phenomena involved in material processing and microstructure evolution. In this section we will illustrate how the amplitude equations derived in the preceding sections can be employed to examine the effect of surface segregation and alloy intermixing. Alloy intermixing is known to play an important role in the growth and processing of material heterostructures, including morphological and compositional profiles and the associated sample optoelectronic properties and functionality. Recent intensive studies on thin film epitaxy and atomic deposition have shown the important effects of intermixing on nanostructure self-assembly. Typical examples include InAs(InGaAs)/GaAs(001) \cite{re:moison89,re:walther01,re:cederberg07} or Ge(SiGe)/Si(001) \cite{re:walther97,re:denker05} heteroepitaxy that has been investigated extensively (particularly the intermixing-caused alloying of wetting layers and quantum dots), and the interlayer diffusion in semiconductor multilayers or superlattices such as InP/InGaAs \cite{re:gerling01}, GaAs/GaSb \cite{re:dorin03}, and GaAs/InAs \cite{re:pearson04}. An important phenomenon in these epitaxial layers is the occurrence of surface segregation, in which an enrichment of one of the film components at a surface or interface region occurs. This has been observed in a variety of material systems including III-V and II-VI semiconductor heterostructures \cite{re:moison89,re:walther01,% re:cederberg07,re:walther97,re:denker05,re:gerling01,re:dorin03,re:pearson04}. To address these complicated phenomena and effects, the basic processes and mechanisms of intra- and inter-layer diffusion at nearly-planar interfaces as well as their coupling with material processing and growth parameters needs to be clarified. In light of the above observations, the focus of this section is on heterostructures of a nearly-planar interface, for both lattice-matched and strained epitaxial layers. For layers stressed due to lattice mismatch, the configurations studied here are metastable in nature, and our results will be used for further studies of the associated later-stage nanostructure evolution (e.g., quantum dots), which will be presented elsewhere. For such film geometry with a planar interface, it can be assumed that both morphological and compositional profiles along the lateral direction are approximately uniform or homogeneous (at least metastably), and hence these structural profiles vary only along the direction ($y$) normal to the interface. An advantage of the amplitude equation representation of the PFC model developed above is that the system of interest can then be mapped onto an effective one-dimensional (1D) description, as will be shown below. \subsection{Effective 1D model system with elasticity} To address the elasticity incorporated in the amplitude equation formalism, it is useful to note that the structural amplitudes can be written as \begin{equation} A_j = A_j' e^{i {\bm q}_j^0 \cdot {\bm u}} \quad (j=1,2,3), \end{equation} where for 2D hexagonal structure ${\bm q}_j^0$ are the three basic wave vectors given in Sec. \ref{sec:ampl} and ${\bm u} = \delta_0 (x \hat{x} + y \hat{y})$ describes the bulk compression or dilation. The effective free energy ${\cal F}$ in Eq. (\ref{eq:F}) can be rewritten as (neglecting the higher order contributions from $\psi_j$ and approximating ${\cal L}_j \simeq -q_0^2$) \begin{eqnarray} & {\cal F} = \int d{\bm r} & \left \{ - \frac{1}{2} \epsilon n_0^2 + \frac{1}{2} \left [ \left ( \nabla^2 + q_0^2 \right ) n_0 \right ]^2 + \frac{1}{3} g_2 n_0^3 + \frac{1}{4} n_0^4 + \left ( - \epsilon + 3 n_0^2 + 2 g_2 n_0 + g \psi_0^2 \right ) \sum_j |\A_j'|^2 \right. \nonumber\\ && + \sum_j \left | {\cal G}_j' \A_j' \right |^2 + \frac{3}{2} \sum_j |\A_j'|^4 + (6n_0 + 2g_2) \left ( \prod_j \A_j' + {\rm c.c.} \right ) + 6 \sum_{j<k} |\A_j'|^2 |\A_k'|^2 \nonumber\\ && + \frac{1}{2} w_0 \psi_0^2 + \frac{1}{2} K_0 \left | \nabla \psi_0 \right |^2 + \frac{1}{4} u_0 \psi_0^4 + \frac{1}{2} g n_0^2 \psi_0^2 + v_1 n_0 \psi_0^2 \nonumber\\ && \left. + \etaa \left [ \psi_0 n_0 \left ( \nabla^2 + \nabla^4 \right ) n_0 - q_0^2 \psi_0 \left ( \sum_j {\A_j'}^* {\cal G}_j' \A_j' + {\rm c.c.} \right ) \right ] \right \}, \label{eq:F'} \end{eqnarray} where \begin{equation} {\cal G}_j' = \nabla^2 + 2i \left ({\bm \delta}_j + {\bm q}_j^0 \right ) \cdot {\bm \nabla} - |{\bm \delta}_j|^2 - 2 {\bm q}_j^0 \cdot {\bm \delta}_j, \end{equation} with $\delta_1 = -\delta_x \hat{x} - \delta_y \hat{y}/2$, $\delta_2 = \delta_y \hat{y}$, $\delta_3 = \delta_x \hat{x} - \delta_y \hat{y}/2$, $\delta_x = \sqrt{3} q_0 \delta_0 /2$, and $\delta_y = q_0 \delta_0$. The corresponding dynamic equations for $A_j'$, $n_0$, and $\psi_0$ are still governed by Eqs. (\ref{eq:A_noise})--(\ref{eq:psi0_noise}), although with $A_j$ replaced by $A_j'$. In mechanical equilibrium, we can assume that $A_j' \simeq A$, i.e., $A_j \simeq A \exp(i {\bm q}_j^0 \cdot {\bm u})$ where $A$ is a constant. Minimizing the effective free energy ${\cal F}$ with respect to A yields the equilibrium value $\delta_0^{\rm eq} = -1 + \sqrt{1-2\alpha_0\psi_0} \simeq - \alpha_0 \psi_0$ to lowest order. This leads to the equilibrium wave number $q_{\rm eq} = (1+\delta_0^{\rm eq}) q_0 = \sqrt{1-2\alpha_0\psi_0} ~q_0$ (where $\alpha_0$ is the rescaled solute expansion coefficient defined in Sec. \ref{sec:ddft_binary}), and the equilibrium amplitude \begin{equation} A = \frac{1}{15} \left \{ -(3n_0+g_2) + \sqrt{(3n_0+g_2)^2 - 15 \left [ -\epsilon + q_0^4 (\delta_0^2 + 2\delta_0)(\delta_0^2 + 2\delta_0+ 4\alpha_0 \psi_0) + n_0 (3n_0+2g_2) + g\psi_0^2 \right ]} \right \}. \label{eq:A_eq} \end{equation} The elastic constants (rescaled) are then given by $C_{11}=C_{22}=9A^2$, $C_{12}=C_{44}=C_{11}/3=3A^2$, and Young's modulus $E=8A^2$ \cite{re:elder04,re:elder07,re:elder10}. For the dynamics of a heterostructure configuration with nearly-planar interface (either liquid-solid or solid-solid), we can assume that $A_j'(x,y,t) \simeq A_j^0(y,t)$, $n_0(x,y,t) \simeq n_0^0(y,t)$, and $\psi_0(x,y,t) \simeq \psi_0^0(y,t)$, resulting in an effective 1D description of the system. The dynamics of the amplitude equations then become \begin{eqnarray} \partial n_0^0 / \partial t &=& \partial_y^2 \frac{\delta {\cal F}} {\delta n_0^0} + m \partial_y^2 \frac{\delta {\cal F}}{\delta \psi_0^0}, \label{eq:n0_1D} \\ \partial \psi_0^0 / \partial t &=& m \partial_y^2 \frac{\delta {\cal F}} {\delta n_0^0} + \partial_y^2 \frac{\delta {\cal F}}{\delta \psi_0^0}, \label{eq:psi0_1D} \\ \partial A_j^0 / \partial t &=& - q_0^2 (1-m^2) \frac{\delta {\cal F}}{\delta {\A_j^0}^*}, \label{eq:A_1D} \end{eqnarray} where \begin{eqnarray} \frac{\delta {\cal F}}{\delta n_0^0} &=& \left [ -\epsilon + \left ( \partial_y^2 + q_0^2 \right )^2 \right ] n_0^0 + g_2 {n_0^0}^2 + {n_0^0}^3 + (6n_0^0+2g_2) \sum_j |A_j^0|^2 + 6 \left (\prod_j A_j^0 + {\rm c.c.} \right ) + (g n_0^0 + v_1) {\psi_0^0}^2 \nonumber\\ &&+ 2\alpha_0 \left [ \psi_0^0 \left ( \partial_y^2 + \partial_y^4 \right ) n_0^0 + \left ( \partial_y^2 + \partial_y^4 \right ) \left ( n_0^0 \psi_0^0 \right ) \right ], \label{eq:dF_dn0} \\ \frac{\delta {\cal F}}{\delta \psi_0^0} &=& (w_0-K_0\partial_y^2) \psi_0^0 + u_0 {\psi_0^0}^3 + g \left ( {n_0^0}^2 + 2 \sum_j |A_j^0|^2 \right ) \psi_0^0 + 2v_1 n_0^0 \psi_0^0 \nonumber\\ &&+ 2 \alpha_0\left [ n_0^0 \left ( \partial_y^2 + \partial_y^4 \right ) n_0^0 - q_0^2 \sum_j \left ( {A_j^0}^* {\cal G}_j^0 A_j^0 + {\rm c.c.} \right ) \right ], \label{eq:dF_dpsi0} \\ \frac{\delta {\cal F}}{\delta {\A_j^0}^*} &=& \left [ -\epsilon + {{\cal G}_j^0}^2 + 2g_2 n_0^0 + 3{n_0^0}^2 + g{\psi_0^0}^2 \right ] \A_j^0 + 3 \A_j^0 \left [ |\A_j^0|^2 + 2 \sum_{k,l \neq j}^{k<l} \left ( |A_k^0|^2 + |A_l^0|^2 \right ) \right ] \nonumber\\ && + (6n_0^0 + 2g_2) \prod_{k \neq j} {A_k^0}^* - 2\alpha_0 q_0^2 \left [ \psi_0^0 {\cal G}_j^0 A_j^0 + {\cal G}_j^0 \left ( \psi_0^0 A_j^0 \right ) \right ], \label{eq:dF_dAj} \end{eqnarray} with \begin{equation} {\cal G}_j^0 = \partial_y^2 + 2i \left (\delta_{jy} + q_{jy}^0 \right ) \partial_y - |{\bm \delta}_j|^2 - 2 {\bm q}_j^0 \cdot {\bm \delta}_j. \end{equation} For coherent strained alloy layers, which are of great interest in materials growth, the solid layer is strained with respect to a substrate and subjected to an epitaxial condition $q_x=q_x^{\rm sub}=(\sqrt{3}/2) q_0 (1+\delta_0^{\rm sub})$ (with ``sub'' referring to the substrate). The wavenumber $q_y$ along the vertical or layer growth direction $y$ is determined by the lattice elastic relaxation (or Poisson relaxation in continuum elasticity theory). The system is thus governed by the above amplitude equations (\ref{eq:n0_1D})--(\ref{eq:dF_dAj}), but with $\delta_0$ fixed by the corresponding elasticity quantity $\delta_0^{\rm sub}$ of the substrate (and thus $\delta_x=\sqrt{3} q_0 \delta_0^{\rm sub} /2$ and $\delta_y=q_0 \delta_0^{\rm sub}$). The vertical strain relaxation (Poisson relaxation) can be determined from the phase of complex amplitudes $A_j^0$. Furthermore, the misfit strain $\varepsilon_m$ of such a solid layer is given by \begin{equation} \varepsilon_m = \frac{R_{\rm eq} - R}{R} = \frac{q_x}{q_{x,\rm eq}}-1 = \frac{\delta_0 - \delta_0^{\rm eq}} {1+\delta_0^{\rm eq}}, \label{eq:misfit} \end{equation} where $R$ and $q_x$ are lateral lattice spacing and wavenumber of the strained layer, and $R_{\rm eq}$, $q_{x,\rm eq}$, and $\delta_0^{\rm eq}$ are for the corresponding stress-free, equilibrium bulk state. For the systems studied here the model parameters are chosen such that no phase separation or spinodal decomposition can occur in the bulk of each solid or liquid region. The corresponding conditions on the parameters that assure this are derived via a linear stability analysis of the amplitude equations. Following standard procedures, we substitute the expansion $n_0=\bar{n}_0+\hat{n}_0$, $\psi_0=\bar{\psi}_0+\hat{\psi}_0$, and $A_j=\bar{A}_j+\hat{A}_j$ into Eqs. (\ref{eq:n0}), (\ref{eq:psi0}) and (\ref{eq:Aj0}), obtain the linearized evolution equations for the perturbed quantities $\hat{n}_0$, $\hat{\psi}_0$, and $\hat{A}_j$, and calculate the associated perturbation growth rates. The corresponding results are complicated due to the coupling between the evolution equations of all three perturbed quantities. To estimate the conditions for phase separation, here we simply assume that $\hat{n}_0, \hat{A}_j \sim 0$, and only study the stability of concentration field. To first order of $\hat{\psi}_0$ we have \begin{equation} \partial \hat{\psi}_0 / \partial t \simeq \nabla^2 \left \{ -K_0 \nabla^2 + w_0 + 3u_0 \bar{\psi}_0^2 + g \bar{n}_0^2 + 2v_1 \bar{n}_0 + 2g \sum_j |\bar{A}_j|^2 + m \left [ \etaa \bar{n}_0 (\nabla^2+\nabla^4) + 2g\bar{n}_0\bar{\psi}_0 + 2v_1 \bar{\psi}_0 \right ] \right \} \hat{\psi}_0. \label{eq:psi0_lin} \end{equation} In Fourier space, the perturbation growth rate $\sigma(q)$ is then given by \begin{equation} \sigma = -q^2 \left [ 2 m \alpha_0 \bar{n}_0 q^4 + (K_0 - 2m\alpha_0 \bar{n}_0) q^2 + w_{\rm eff} \right ], \end{equation} where \begin{equation} w_{\rm eff} = w_0 + 3u_0 \bar{\psi}_0^2 + g \bar{n}_0^2 + 2v_1 \bar{n}_0 + 2g \sum_j |\bar{A}_j|^2 + 2m (g\bar{n}_0+v_1) \bar{\psi}_0. \label{eq:w_eff} \end{equation} If $w_{\rm eff}<0$, an instability of the homogeneous alloy occurs, leading to spinodal decomposition or phase separation of alloy components. The characteristic wave number (for maximum perturbation growth rate) is then given by $q_{\rm max}^2 = [ \sqrt{(K_0 - 2m\alpha_0\bar{n}_0)^2 - 6m\alpha_0\bar{n}_0 w_{\rm eff}} - (K_0 - 2m\alpha_0\bar{n}_0) ] / (6m\alpha_0\bar{n}_0)$ if $m, \alpha_0, \bar{n}_0 \neq 0$, or $q_{\rm max}^2 = - w_{\rm eff}/(2K_0)$ if one of $m, \alpha_0, \bar{n}_0 =0$. For the heterostructural systems presented below and the parameters chosen, the condition $w_{\rm eff}>0$ is always satisfied in the bulk phases, keeping homogeneous concentration profile within each layer. Concentration heterogeneity may occur across the system configuration, which however is due to the effect of interfaces or due to composition overshooting, a phenomenon caused by alloy intermixing that will be discussed in detailed below. \subsection{Results: Equilibrium profiles and layer growth} Equations (\ref{eq:n0_1D})--(\ref{eq:dF_dAj}) were solved numerically using a pseudospectral method and an exponential propagation scheme for time integration of stiff equations \cite{re:friesner89,re:cross94b}. Results of the corresponding morphological and compositional 1D profiles are shown in Figs. \ref{fig:ssl_P}--\ref{fig:sl_f_m}, for two types of configurations of liquid-solid-solid and liquid-solid coexistence or growth. For the simulations shown here we choose a time step $\Delta t =1$, which can be made as large as this due to the numerical scheme we used; The numerical grid spacing used is $\Delta y = \lambda_0/8$ (where $\lambda_0 = 2\pi /q_0$). To emulate a liquid-solid (or liquid-solid-solid) heterostructure and apply periodic boundary conditions in the numerical calculation, the initial configuration is set as two (or four) symmetric interfaces located at $y=L_y/4$ and $3L_y/4$ (or $y=L_y/6$, $L_y/3$, $2L_y/3$ and $5L_y/6$), separating different liquid or solid regions. These interfaces need to be set sufficiently far apart from each other to avoid any interface coupling and the artifacts of finite size effects. For results shown below we choose the 1D system size perpendicular to the interfaces as $L_y=2048 \Delta y$, with similar results obtained in calculations up to $L_y=8192 \Delta y$. Also, the parameters used in the amplitude equations are based on the phase diagrams given in Ref. \cite{re:elder10} showing liquid-solid and solid-solid coexistence, i.e., $(g,g_2,u_0,K_0,v_1) =(-1.8,-0.6,4,1,0)$, $w_0=0.008$ or $0.088$, $\alpha_0=0.3$ or $0$, and $\epsilon=\pm 0.02$. \subsubsection{Liquid-solid and liquid-solid-solid coexistence} The equilibrium profile for a liquid-solid(I)-solid(II) coexistence is given in Fig. \ref{fig:ssl_P} (with time corresponding to $t=2 \times 10^7$). To obtain the liquid-solid-solid coexistence, we use $\epsilon=0.02$, $\alpha_0=0.3$, and $w_0=0.008$ (from the eutectic phase diagram in Ref. \cite{re:elder10}), set the initial length ratio of liquid:solid(I):solid(II) as 1/3:1/3:1/3, and let all of $\psi_0^0$, $A_j^0$ and $n_0^0$ evolve with time until a stationary state is reached. Solid II is treated as a substrate (unstrained), and hence in the amplitude equations (\ref{eq:n0_1D})--(\ref{eq:dF_dAj}) we set $\delta_0=\delta_0^{\rm II}=-1 + \sqrt{1-2\alpha_0\psi_0^{\rm II}}$. Due to nonzero solute expansion coefficient $\alpha_0$, i.e., different atomic sizes of A and B alloy components, solid I is strained (with misfit $\varepsilon_m$ with respect to the substrate (solid II) being $14.9\%$ for the parameters of Fig. \ref{fig:ssl_P}). This is consistent with the numerical results in Fig. \ref{fig:ssl_P}b, showing zero phase of amplitudes $A_j$ within unstrained solid II and a linear dependence of phase on position $y$ in the bulk of solid I. For comparison, the magnitude of lattice misfit between III-V or II-VI layers is around 0 to $5\%$ (e.g., $\varepsilon_m=4.2\%$ for Ge/Si and less for Si$_x$Ge$_{1-x}$/Si$_y$Ge$_{1-y}$), while the lattice mismatch for III-V Nitride heteroepitaxial films or III-V/Si heterostructures could reach 10\% or more (e.g., $\varepsilon_m=11.5\%$ for InAs/Si). \begin{figure} \centerline{ \includegraphics[height=3.3in]{fig1a.eps} \hskip 5pt \includegraphics[height=3.3in]{fig1b.eps}} \caption{Liquid-solid-solid coexistence profile calculated from the amplitude equations, as characterized by (a) the composition field $\psi_0$, amplitudes $|A_j|$, and the average density field $n_0$, and (b) the phases of amplitudes $A_j$. The parameters are set as $\epsilon=0.02$, $\alpha_0=0.3$, $(g,g_2,u_0,K_0,w_0) =(-1.8,-0.6,4,1,0.008)$, and the time at $t=2\times 10^7$. Solid I is strained with respect to the substrate solid II.} \label{fig:ssl_P} \end{figure} For a liquid-solid heterostructural configuration, to determine the coexistence state we choose similar parameters except for $w_0=0.088$, $\epsilon=-0.02$, and initially $\psi_0=0$ in the whole system. This corresponds to the single solid phase region (no solid-solid coexistence, only liquid-solid) in the phase diagram \cite{re:elder10}. To make the solid strained, we set $\delta_0=0.05$ as given by an external condition (i.e., a substrate), and thus from Eq. (\ref{eq:misfit}) the misfit strain in the solid here is about $5\%$. The results for $\alpha_0=0$ and $0.3$ are given in Figs. \ref{fig:slPeq}a and \ref{fig:slPeq}b respectively, including the equilibrium profiles (up to $t=2\times 10^7$) and the process of time evolution. As expected, for $\alpha_0=0$ (equal atomic size of alloy components) the concentration field $\psi_0$ remains at 0 all the time, as seen in Fig. \ref{fig:slPeq}a. However, for $\alpha_0=0.3$ the initial $\psi_0=0$ profile splits at the liquid-solid interface (see Fig. \ref{fig:slPeq}b). For the parameters used here, $\alpha_0>0$ (with size of atom A larger than that of atom B) and misfit $\varepsilon_m>0$ (compressed solid), and thus the solid would prefer to have more smaller atoms B (with $\psi_0<0$), leading to a ``dip'' on the solid side of the compositional interface; due to the conservation law on the field $\psi_0$, a ``bump'' of $\psi_0>0$ (more larger atoms A) appears on the other side via layer interdiffusion or alloy intermixing. As a result of atomic diffusion, such ``dip'' and ``bump'' will spread out into the bulk phases as time increases, leading to a positive/negative $\psi_0$ equilibrium profile of liquid-solid coexistence, as shown in Fig. \ref{fig:slPeq}b. \begin{figure} \centerline{ \includegraphics[height=3.3in]{fig2a.eps} \hskip 5pt \includegraphics[height=3.3in]{fig2b.eps}} \caption{Liquid-solid coexistence profiles, for $\alpha_0=0$ (a) and $0.3$ (b). The other parameters are the same as Fig. \ref{fig:ssl_P}, except for $\epsilon=-0.02$, $w_0=0.088$, and initially $\psi_0=0$. The solid region has a misfit strain of around $5\%$ due to $\delta_0=0.05$ set in the amplitude equations.} \label{fig:slPeq} \end{figure} It is interesting to note that the non-homogeneous compositional profile can also be found in liquid-solid heterostructures with nonzero $\alpha_0$ and no misfit strain (i.e., $\delta_0=0$, $\psi_0=0$, and $\alpha_0=0.3$, as in Fig. \ref{fig:sl0eq}). A slight enrichment of larger atoms A is observed on the surface of unstrained solid, showing as a ``peak'' (with $\psi_0 \sim 1.5 \times 10^{-4}$) at the compositional interface in Fig. \ref{fig:sl0eq}. Note that this phenomenon of weak surface segregation persists in the equilibrium or stationary configuration (as tested up to $t=10^7$), and is caused by unequal atomic sizes of alloy components. Due to the conservation of the $\psi_0$ field and the appearance of concentration ``peak'' at interface, the bulk values of concentration field $\psi_0$ in both liquid and solid regions deviate from the 0 value in the corresponding phase diagram, as mediated by the alloy diffusion process. We find that this deviation is a result of finite size effect: The deviation decreases with increasing system size, as confirmed in our simulations of $L_y=1024 \Delta y$, $2048 \Delta y$ and $8192 \Delta y$. Thus in the thermodynamic limit (with $L_y \rightarrow \infty$) $\psi_0=0$ is expected in the liquid and solid bulks, consistent with the equilibrium phase diagram for unstrained systems. On the other hand, the effect of surface enrichment would be preserved, as we have observed in simulations of various system sizes. \begin{figure} \centerline{ \includegraphics[height=3.3in]{fig3.eps}} \caption{Liquid-solid coexistence profiles for unstrained solid layer (with $\delta_0=0)$ and $\alpha_0=0$ and $0.3$. The other parameters are the same as Fig. \ref{fig:slPeq}. In the lower panel the $|A_j|$ and $n_0$ profiles overlap for $\alpha_0=0$ and $0.3$.} \label{fig:sl0eq} \end{figure} \subsubsection{Coherent strained layer growth and front motion} To simulate the process of strained layer growth encountered in most experiments, we start from a liquid-solid(strained) coexisting configuration and let the liquid solidify, leading to a growing front of the strained solid layer (as shown in Fig. \ref{fig:sl_f}). The initial condition is set as the liquid-solid coexistence profiles given in Fig. \ref{fig:slPeq}, with only $n_0$ in liquid changed to $n_0^{\rm liq}=-0.0021$ to initialize the solidification and growth while all others (including concentration $\psi_0$ and amplitudes $A_j$) being kept the same as the coexistence condition. The growth rate of the strained layer can be controlled by the setting of liquid $n_0^{\rm liq}$, i.e., its deviation from the equilibrium or coexistence value. A boundary condition of constant flux is kept in the liquid region (with distance $100 \Delta y$ beyond the moving interface). The growth process is shown in Fig. \ref{fig:sl_f}, for equal mobility $M_A=M_B$, $5\%$ misfit strain for solid layer, and up to $t=10^6$. The liquid-solid front moves smoothly for both $\alpha_0=0$ and $0.3$, as seen from the amplitude and $n_0$ profiles in the figure. For $\alpha_0=0$, the concentration $\psi_0$ in both liquid and solid layers remains uniform at the initial value 0, as in the equilibrium state. However, the results for $\alpha_0=0.3$ show a phenomenon of composition overshooting at the growth front of strained solid (see Fig. \ref{fig:sl_f}b). Such overshooting effect reveals as the increase of $\psi_0$ (i.e., more A or less B atoms) around the interface, resulting in the phenomenon of surface enrichment: The A atoms (with larger atomic size for $\alpha_0>0$) are segregated on the solid surface with compressive strain. As time increases, such variation of alloy concentration will propagate into the bulk of solid layer as a result of atomic diffusion (note that the concentration of liquid bulk remains unchanged due to the constant flux boundary condition). \begin{figure} \centerline{ \includegraphics[height=3.3in]{fig4a.eps} \hskip 5pt \includegraphics[height=3.3in]{fig4b.eps}} \caption{Growth of strained solid layer from a liquid-solid initial configuration, with $\alpha_0=0$ (a) and $0.3$ (b) and equal mobility $M_A=M_B$. The parameters are the same as the corresponding liquid-solid coexistence state given in Fig. \ref{fig:slPeq}, except for $n_0=-0.0021$ in the liquid region where a constant flux boundary condition is set up.} \label{fig:sl_f} \end{figure} Figure \ref{fig:sl_f_m} shows that the mobility disparity between different alloy components plays an important role on this overshooting effect. Atoms with larger mobility will accumulate on the surface, even with $\alpha_0=0$. As seen in the concentration profile of Fig. \ref{fig:sl_f_m}a, a peak of larger (or smaller) $\psi_0$ appears around the liquid-solid interface for $M_A > M_B$ (or $M_A < M_B$), while no overshooting is observed in the case of equal mobility. For nonzero $\alpha_0$ (Fig. \ref{fig:sl_f_m}b), the effect of surface enrichment of A atoms will be enhanced when $M_A> M_B$, while when $M_A < M_B$ the B atom enrichment is observed at large enough time. Another effect of mobility difference presented in Fig. \ref{fig:sl_f_m} is the change of solid layer growth rate or front moving speed. For large disparity of atomic mobility between A and B components, one of the components moves much slower compared to the other one and thus would hinder the atomic diffusion process. This leads to a slower motion of interface, as seen in Fig. \ref{fig:sl_f_m}. Thus we can expect that in the limit of $M_A/M_B \gg 1$ (or $M_A/M_B \ll 1$), B (or A) atoms would be almost immobile compared to A (or B) and hence would pin the interface location, resulting in a frozen front. This has been incorporated in the amplitude equations developed above: When $m=\pm 1$ (with $m=(M_A-M_B)/(M_A+M_B)$ as defined in Eq. (\ref{eq:m_g0_v})), Eq. (\ref{eq:Aj0}) yields $dA_j/dt=0$, a frozen amplitude profile. Furthermore, the concentration profile is symmetric with respect to the sign of $m$ (i.e., $M_A/M_B>1$ vs. $<1$) for $\alpha_0=0$, as shown in Fig. \ref{fig:sl_f_m}a for $M_A/M_B=100$ and $10^{-2}$ which yield the same front moving rate and the same $A_j$ and $n_0$ profiles. The situation for nonzero $\alpha_0$ (different atomic sizes) is more complicated. In our calculations of Fig. \ref{fig:sl_f_m}b with $\alpha_0=0.3$ and $5\%$ compressive misfit, the liquid-solid coexisting profile yields $\psi_0>0$ (A-rich) in the liquid region and $<0$ (B-rich) in the solid layer (see also Fig. \ref{fig:slPeq}b). When $M_A=100 M_B$, the segregation of fast A atoms around the interface would tend to hinder the growth of B-rich solid layer, while for $M_A=M_B/100$ the accumulation of fast B atoms will naturally be accompanied by the expansion of solid region, resulting in a faster solid growth. \begin{figure} \centerline{ \includegraphics[height=3.3in]{fig5a.eps} \hskip 5pt \includegraphics[height=3.3in]{fig5b.eps}} \caption{Growth of strained solid layer from a liquid-solid initial configuration, with $\alpha_0=0$ (a) and $0.3$ (b), mobilities $M_A=M_B$, $M_A=100 M_B$, and $M_A=M_B/100$, and time $t=10^6$. Other parameters are the same as those in Fig. \ref{fig:sl_f}. In (a) the $|A_j|$ and $n_0$ profiles for $M_A=100 M_B$ and $M_A=M_B/100$ overlap.} \label{fig:sl_f_m} \end{figure} The composition overshooting effect presented here and the associated surface enrichment phenomenon can be viewed as a result of interface intermixing process via atomic interdiffusion and mass transport of alloy components, showing as the vertical phase separation or segregation in the liquid-solid interface region. Such process of vertical separation has also been found in 2D simulations of binary PFC equations \cite{re:elder07}, where the component of greater size or larger mobility was found to accumulate near undulated solid surface in a liquid/substrate epitaxial system. Importantly, the results shown here are consistent with recent experimental observations of surface or interface segregation phenomenon in alloy heterostructures, particularly in semiconductor epitaxial layers. Most experiments focus on III-V or group IV heteroepitaxial films, with typical systems including InGaAs/GaAs(001) (with In enrichment or segregation \cite{re:moison89,re:walther01,re:cederberg07}), Ge(SiGe)/Si(001) (with Ge segregation \cite{re:walther97,re:denker05}), and multilayers or superlattices of InP/InGaAs (with excess InAs at the interface \cite{re:gerling01}), GaAs/GaSb (with Sb segregation and Sb-As exchange and intermixing \cite{re:dorin03}), GaAs/InAs (with In segregation \cite{re:pearson04}), etc. In these experimental systems the segregation or enrichment effect involves the coupling of various factors of different atomic size (nonzero $\alpha_0$), misfit strain, and unequal mobility of alloy components (e.g., $M_{\rm Ge} > M_{\rm Si}$ and $M_{\rm In} > M_{\rm Ga}$), each of which has been identified in our analysis given above. \section{Conclusions} In this paper we have furthered the development of the phase-field-crystal methodology by systematically deriving the PFC dynamic model equations from dynamical density functional theory (DDFT) and completing the derivation of the corresponding amplitude equation formalism. A truncation of the DFT free energy functional up to three-point direct correlation functions has been used, and the dynamics derived from DDFT has been further simplified through lowest order approximations via a simple scale analysis to obtain the PFC equations, for both single-component and binary alloy systems. For the binary PFC model, the corresponding amplitude equations (both deterministic and stochastic) have been established via a hybrid multiple-scale approach, which describe large or ``slow'' scale dynamics of structural and compositional profiles based on the underlying crystalline state. Compared to other recent developments which have mainly focused on the evolution of complex structural amplitudes and concentration field, this work presents results that incorporate the new effects of mobility difference between alloy components, the coupling to zero-mode average atomic density, and also noise dynamics. Although the results of amplitude equations that we derive are for 2D hexagonal crystalline state, they can be extended to 3D bcc or fcc structures by following a procedure similar to the one developed here and adopting the corresponding basic wavevectors (see also Ref. \cite{re:elder10}). This amplitude equation formalism for binary PFC has been applied to identifying the mechanisms and parameter coupling during the process of surface segregation and alloy intermixing. Both liquid-solid and liquid-solid-solid epitaxial heterostructures have been examined, including morphological and compositional profiles. We find that the effect of concentration segregation on solid surface is controlled by material parameters such as the disparity of atomic size and mobility between different alloy components and misfit strain in solid layers. In the cases of nonzero solute expansion coefficient or unequal atomic mobility, an effect of composition overshooting around liquid-solid interface is obtained during strained layer growth, corresponding to vertical phase separation or segregation in the interface region. These results are consistent with recent experimental findings in heteroepitaxial systems, particularly the phenomenon of surface or interface segregation showing as the enrichment of one of the alloy species as compared to the bulk phase. This sample application of the amplitude equation formalism developed here has further illustrated the features and advantages of the PFC methodology, particularly in terms of modeling and understanding complex material phenomena involving spacial and temporal scales of experimental relevance. \begin{acknowledgments} Z.-F.H. acknowledges support from the National Science Foundation (NSF) under Grant No. CAREER DMR-0845264. K.R.E. acknowledges support from NSF under Grant No. DMR-0906676. N.P. acknowledges support from the National Science and Engineering Research Council of Canada. \end{acknowledgments}
1,116,691,501,401
arxiv
\section{Introduction} Critical phenomena in complex networks have been attracting a lot of interest~\cite{Dorogovtsev07}. Complex networks are characterized by a so-called small-world property~\cite{WS98}. The number of neighbors of a node increases exponentially with the distance from it. For this property, it is believed that critical phenomena in complex networks belong to the mean field universality class. Nevertheless, structural heterogeneity leads to rich behaviors. For example, in scale-free~(SF) networks having a power-law degree distribution $P(k)\sim k^{-\gamma}$~\cite{BA99}, mean-field critical exponents may vary with the degree exponent $\gamma$~\cite{Dorogovtsev07}. Recent studies raise an important issue on the finite-size-scaling~(FSS) theory in complex networks~\cite{Hong07,Castellano08}. A scale-free network with $N$ nodes has a maximum cutoff $k_{\max}$ in degree. In most cases without any constraint, the cutoff scales as $k_{\max} \sim N^{1/(\gamma-1)}$, which is determined by the condition $\sum_{k>k_{\max}}P(k) = 1/N$. This is called the natural cutoff. One may impose a forced cutoff \begin{equation}\label{k_max} k_{\max} = N^{1/\omega} \end{equation} with the cutoff exponent $\omega > \gamma-1$. Taking the thermodynamic limit, one should take the limit $N\rightarrow \infty$ and $k_{\max}\rightarrow \infty$ simultaneously. This may give rise to an intricate finite-size effect~\cite{Castellano08}. Hong, Ha, and Park~\cite{Hong07} developed a FSS theory based on the single-parameter scaling hypothesis. Their theory predicts the values of the FSS exponents in the Ising model (including more general equilibrium $\phi^n$ theory) and the contact process~(CP), respectively. The CP is a reaction-diffusion model describing an epidemic spreading, which exhibits a prototype nonequilibrium phase transition from an inactive phase into an active phase~\cite{Hin00}. It has been suggested that the FSS exponents depend only on the degree exponent $\gamma$, regardless of the cutoff if it is not strong enough $(\omega<\gamma)$. Note that this condition includes networks with the natural cutoff $(\omega=\gamma-1)$ as well as networks with a weak forced cutoff $(\gamma-1<\omega<\gamma)$. These results were confirmed numerically in the static model~\cite{Goh01} having the natural cutoff and the uncorrelated configuration model~(UCM)~\cite{Catanzaro05}. Castellano and Pastor-Satorras~\cite{Castellano08} considered the CP in the so-called random neighbor (annealed) network. Links are not fixed but fluctuate in this annealed network. At each time step, neighbors of a node are chosen independently and randomly according to the degree distribution. It contrasts with a network where links are fixed permanently in time once they are formed. In order to stress the distinction, the former network will be referred to as an {\em annealed} network, while the latter network as a {\em quenched} network. From the analysis of the survival probability at the critical point, they found that the dynamic exponent characterizing the relaxation time depends not only on $\gamma$ but also on $\omega$ when $\omega>\gamma-1$ (all networks with a forced cutoff) and $2<\gamma<3$. In particular, it has been shown that there are two different characteristic time (and also the order parameter) scales which make a single-parameter scaling impossible. From the relaxation time scaling, the order parameter in the quasi-steady state also scales with $N$ with an exponent depending on both $\gamma$ and $\omega$. At a glance, the results of Refs.~\cite{Hong07} and \cite{Castellano08} seem incompatible (single-parameter versus two-parameter scaling and cutoff-independent versus cutoff-dependent scaling) when $\gamma-1<\omega<\gamma$ (weak forced cutoff) and $2<\gamma<3$ (highly heterogeneous regime). But it is not true. The FSS theory of Ref.~\cite{Hong07} concerns a quenched scale-free network, while that of Ref.~\cite{Castellano08} concerns an annealed scale-free network. Quenched disorder in linking topology generates local correlations through quenched links between nodes, which are responsible for the shift of the phase transition point and its disorder fluctuations. Therefore, one may not rule out a possibility that the disorder fluctuations near the phase transition point may wipe away or at least significantly alter the cutoff-dependent scaling regime, see Sec.~VI. In this paper, we present a full FSS theory governing the critical and off-critical scaling behaviors of the CP in annealed networks. In Sec.~II, an annealed network is introduced without any sampling disorder and the heterogeneous mean field theory is briefly reviewed for the CP. The critical dynamics is analyzed in Sec.~III, while the off-critical scaling is investigated in Sec.~IV. In Sec.~V, we discuss the effect of sampling disorder in annealed networks and its self-averaging property. Finally, we summarize our results along with a brief discussion on the effect of linking disorder in quenched networks. \section{CP in annealed networks} We consider the annealed scale-free networks with the degree distribution $P(k)=a k^{-\gamma}$ for $k_{\min}\leq k\leq k_{\max}$ with a normalization constant $a$ and $P(k)=0$ elsewhere. The maximum degree $k_{\max}$ scales with network size $N$ as in Eq.~(\ref{k_max}) and the minimum degree $k_{\min}$ is an $O(1)$ constant. Since neighbors of each node need not be specified, an annealed network is realized by choosing a degree sequence $\{k_1,\cdots, k_N\}$ only. There are two different ways in choosing the degree sequence. One may assign degree $k$ to $N_k$ nodes deterministically in such a way that $\sum_{k'\geq k} N_{k'} = \mbox{int}[N \sum_{k'\geq k}P(k')]$ for all $k$ in the decreasing order starting from $k_{\max}$, where $\mbox{int}[x]$ is the integer part of $x$. One may easily show that the maximum degree realized using this assignment algorithm is the same order in $N$ of a given $k_{\max}$ when $\omega\ge\gamma-1$. Or, one may draw probabilistically $N$ values of $k$ in accordance with the probability distribution $P(k)$. The probabilistic method yields an ensemble of different samples, which makes an ensemble average necessary. We mainly consider the annealed network realized by the deterministic method. Sample-to-sample fluctuations in the ensemble generated by the probabilistic method will be discussed in Sec.~V. The CP on the annealed SF network is defined as follows. Each node is either occupied by a particle or empty. A particle on a node is annihilated with probability $p$ or branches one offspring to its {\em neighbor}, if empty, with probability $(1-p)$. At each time step, a neighbor of a node is selected among all other nodes with probabilities proportional to their degree. Since a node is coupled only probabilistically with all other nodes, the mean field theory becomes exact in the annealed network. Let $n(t)$ be the number of particles at time $t$. Following Ref.~\cite{Castellano08} in a quasistatic approximation for large $t$, it increases by 1 with probability \begin{equation}\label{eq:w+} w_+ = p \lambda \rho \sum_k \frac{ k P(k) }{\langle k \rangle} \frac{1}{1+ \lambda \rho k/ \langle k \rangle} , \end{equation} or decreases by 1 with probability \begin{equation}\label{eq:w-} w_- = p \rho \end{equation} after a time step $\Delta t = 1/N$. Here $\rho = n/N$ is the particle density and $\lambda = (1-p)/p$ with the mean degree $\langle k \rangle$. The transition probability $w_+$ contains a nontrivial $\rho$-dependence. When the thermodynamic limit is taken first~\cite{Hong07} or the density is high~($\rho \gg 1/k_{\max}$) in finite networks~\cite{Castellano08}, one may arrive at a singular expansion \begin{equation}\label{w+high} w_+ / p = \lambda \rho - c \rho^{\theta-1} + \cdots \end{equation} with a constant $c$ and \begin{equation} \theta = \min\{\gamma,3\} . \end{equation} When the density is low~($\rho \ll 1/k_{\max}$) in finite networks, one can expand the denominator in Eq.~(\ref{eq:w+}) to obtain \begin{equation}\label{w+low} w_+ / p = \lambda \rho - \lambda^2 g \rho^2 + \cdots \end{equation} where $ g = {\langle k^2\rangle}/{\langle k \rangle^2 }$ with $\langle k^n\rangle\equiv \sum_{k} k^n P(k)$. Note that $g$ is an $O(1)$ constant for $\gamma>3$, while it scales as $g \sim k_{\max}^{3-\gamma} \sim N^{(3-\gamma)/\omega}$ for $2<\gamma<3$. The scaling behavior can be rewritten as \begin{equation}\label{g_s} g \sim k_{\max}^{3-\theta} \sim N^{(3-\theta)/\omega} , \end{equation} for general $\gamma (\neq 3)$ and $\omega\ge \gamma-1$. At $\gamma=3$, $g\sim \log N$. As the stochastic fluctuation $(\Delta \rho)/\rho$ (multiplicative diffusive noise) becomes negligible in the $N\rightarrow \infty$ limit, one can write the rate equation for the average particle density in the continuum limit as \begin{equation}\label{rate_eq} \frac{d\rho}{dt} = w_+ - w_- . \end{equation} It is clear that the system undergoes an absorbing phase transition at $p=p_c = 1/2$~($\lambda_c = 1$) at all values of $\gamma>2$ in the thermodynamic limit. The particle density near the critical point scales as $\rho \sim (\lambda-\lambda_c)^\beta$ with the order parameter exponent $\beta = {1}/(\theta-2)$~\cite{Hong07}. At $\gamma=3$, an additional logarithmic correction appears as $\rho\sim (\lambda-\lambda_c)/|\log (\lambda-\lambda_c)|$. \section{Critical dynamics}\label{CD} We consider the CP at the critical point ($p=1/2$ or $\lambda=1$). One may regard the particle number $n$ $(0\le n\le N)$ as a coordinate of an one-dimensional random walker~\cite{Castellano08}. At each time step $\Delta t = 1/N$, the walker jumps to the right with probability $w_+$ or to the left with probability $w_-$, or does not move with probability $1-(w_+ + w_-)$. The walker is bounded by an absorbing wall at $n=0$ and a reflecting wall at $n=N$. Reaching the absorbing wall, it will be trapped there forever. It turns out that an event-driven dynamics is useful. In this dynamics, the walker always makes a jump at each time step $\Delta \tau=1$ to the right or left with probabilities \begin{equation} \tilde{w}_{\pm} = \frac{w_{\pm}}{w_+ + w_-} . \end{equation} This is equivalent to the original problem if one rescales the time with the relation \begin{equation}\label{t_tau} dt = \frac{1}{N}\frac{d\tau}{w_+ + w_-} . \end{equation} \subsection{Defect dynamics} It is interesting to study how particles spread starting from a localized seed. Dynamics initiated from a single particle is called the defect dynamics~\cite{HKPP98,Hin00}. So its initial condition is $n(0)=n_0=1$. Quantities of interest are the survival probability $P_{s}(t)$, the probability that the system is still active at time $t$, and $n_{s}(t)$, the number of particles averaged over surviving samples. At the critical point, they exhibit power-law scalings \begin{equation} P_s(t) \sim t^{-\delta} \ \ \mbox{and} \ \ n_s(t) \sim t^{\tilde\eta} \end{equation} for $t<t_c(N)$ with the relaxation time scaling as \begin{equation} t_c \sim N^{\bar z} . \end{equation} At $t= t_c$, the system starts to feel its finite size and $n_s(t)$ saturates. For $t>t_c$, $P_s(t)$ decays exponentially. The critical exponents $\delta$, ${\tilde \eta}$, and ${\bar z}$ are universal. Note that ${\tilde\eta}=\delta+\eta$ where $\eta$ is the particle number growing exponent for all samples. Initially, $\rho_0=n_0/N$ is so small (much smaller than $1/k_{\max}$) that one can always use the expansion in Eq.~(\ref{w+low}) for $w_+$. We will confirm that this is valid throughout the defect dynamics. Then, in the event-driven dynamics, the jumping probability for the walker at site $n$ is given by $$ \tilde{w}_+ = \frac{1 - g \rho}{2 - g\rho} \ \ \mbox{and} \ \ \tilde{w}_- = \frac{1}{2 - g\rho} $$ for small $\rho$. This shows that the walker performs biased walks toward the absorbing wall with the drift velocity \begin{equation} v_{drift} \equiv \frac{dn}{d\tau}=\tilde{w}_+ - \tilde{w}_- = - \frac{g\rho}{2-g\rho} . \end{equation} The bias is negligible~($v_{drift}/\tilde{w}_\pm \ll 1$) during the initial stage since $g \rho \ll 1$. Hence, for sufficiently small $\tau$, it suffices to consider the unbiased random walk motion in the presence of the absorbing wall at $n=0$. The effect of the absorbing wall can be taken into account by using the image method~\cite{Fisher84}. This yields that the surviving probability decays as \begin{equation}\label{P_tau} P_{s}(\tau) \simeq n_0 (\pi \tau/2)^{-1/2} \end{equation} and that the surviving walker spreads out diffusively as \begin{equation}\label{n_tau} n_s(\tau) \simeq \sqrt{\pi\tau/2} . \end{equation} The diffusion velocity for the surviving walkers is given by \begin{equation} v_{diffuse} \equiv n_s(\tau)/\tau \simeq \sqrt{\pi /(2 \tau)} \simeq \pi /(2 n_s) . \end{equation} As $\tau$ increases, the diffusion velocity becomes smaller while the bias becomes stronger. The walker reaches a stationary state when the diffusion velocity and the drift velocity are balanced. The condition $v_{diffuse} \sim |v_{drift}|$ yields that the walker reaches the stationary state at position \begin{equation}\label{n_sc} n_s^\infty \sim \sqrt{N/g} \end{equation} and at time \begin{equation}\label{tau_c} \tau_c \sim {N}/{g} . \end{equation} This result is self-consistent with the underlying assumptions that $\rho k_{\max}\ll 1$ and $g\rho\ll 1$. The time scales $t$ and $\tau$ are related through Eq.~(\ref{t_tau}). Using Eqs.~(\ref{n_tau}) and (\ref{t_tau}), one finds that \begin{equation} t \simeq \int^\tau \frac{d\tau'}{ n(\tau')} \sim \sqrt{\tau} . \end{equation} Therefore we conclude that \begin{eqnarray} &&P_{s}(t) \sim n_0 t^{-1},\label{defect_P}\\ &&n_s(t) \sim t ,\label{defect_ns}\\ &&t_c \sim \sqrt{{N}/{g}} , \label{defect_tc} \end{eqnarray} which leads to \begin{eqnarray} \delta &=& 1 ,\label{delta}\\ \tilde{\eta} &=& 1 \quad(\eta=0),\label{eta}\\ {\bar z} &=& (1-(3-\theta)/\omega)/2 . \label{z_d} \end{eqnarray} The result for $\delta$ and ${\bar z}$ coincides with that of Ref.~\cite{Castellano08}. At $\gamma=3$, $t_c\sim n_s^\infty\sim (N/\log N)^{1/2}$. \subsection{Static dynamics} The static dynamics starts with the initial condition that all nodes are occupied, $n_0=N$ ($\rho_0=1$). We consider the scaling behavior of $\rho_s$, the particle density averaged over surviving samples. The rate equation~(\ref{rate_eq}) takes a different form depending on the particle density. When $\rho k_{\max} \gg 1$, it becomes $d\rho/dt = -c\rho^{\theta-1}/2$, which yields \begin{equation}\label{rho_t_1} \rho_s (t) \sim t^{-1/(\theta-2)} . \end{equation} If the density becomes sufficiently small such that $\rho k_{\max}\ll 1$, then the rate equation should be replaced by $d\rho/dt = -g \rho^2/2$, which yields the solution \begin{equation}\label{rho_t_2} \rho_s (t)\sim (gt)^{-1} . \end{equation} The crossover between the two regimes takes place at time \begin{equation}\label{z_*} t_* \sim g^{(\theta-2)/(3-\theta)}\sim N^{{\bar z}_*} ~\mbox{with}~{\bar z}_* = (\theta-2)/w . \end{equation} At this crossover time scale $t=t_*$, the system starts to feel the finite upper bound of the maximum degree, $k_{\max}$. The density at the crossover is given by \begin{equation}\label{alpha*} \rho_* \sim g^{-1/(3-\theta)} \sim N^{-\alpha_*} ~\mbox{with}~ \alpha_* = 1/\omega . \end{equation} \begin{figure}[t] \includegraphics[width=\columnwidth]{fig1.eps} \caption{(a) Schematic plot of $\rho_s$ vs. $t$ in the log-log scale at the critical point in the annealed networks with $2<\gamma<3$ and $\omega>\gamma-1$. The solid~(dashed) line corresponds to the static~(defect) dynamics. (b) Schematic plot of $\rho_s$ vs. $\varepsilon$ in the same condition. }\label{fig1} \end{figure} Finally, the system reaches the stationary state. From Eq.~(\ref{n_sc}), the particle density at the stationary state is given by \begin{equation}\label{alpha} \rho_s^\infty \sim \sqrt{1/(gN)}\sim N^{-\alpha} ~\mbox{with}~ \alpha = (1+(3-\theta)/\omega)/2. \end{equation} The saturation time $t_c$ determined from $(gt_c)^{-1} \sim N^{-\alpha}$ has the same scaling behavior as the relaxation time in the defect dynamics (see in Eqs.~(\ref{defect_tc})and (\ref{z_d})). This means that the finite systems reach the stationary state at the same time scale, irrespective of the initial conditions. There are a few remarks. For $\omega > \gamma-1$ and $\gamma<3~(\theta=\gamma)$, there exist two distinct $N$-dependent time scales $t_* \sim N^{{\bar z}_*}$ and $t_c\sim N^{\bar z}$ with ${\bar z}_*<{\bar z}$. The former comes into play due to the finiteness of the maximum degree $k_{\max}\sim N^{1/\omega}$, while the latter is the time scale to reach the stationary state in finite networks. This implies that finite-size effects in the annealed SF networks depends on the limiting procedure how $N$ and $k_{\max}$ are taken to infinity. For $\gamma>3~(\theta=3)$, the distinction between the first regime (Eq.~(\ref{rho_t_1})) and the second regime~(Eq.~(\ref{rho_t_2})) disappears. The particle density decays as $\rho_s\sim t^{-1}$ for $t<N^{\bar z}$ with ${\bar z}=1/2$, and then saturates to the stationary state value $\rho_s^\infty \sim N^{-1/2}$. At $\gamma=3$, $\rho_s\sim (t\log t)^{-1}$ for $t<t_c\sim (N/\log N)^{1/2}$ and $\rho_s^\infty\sim (N\log N)^{-1/2}$. The systems with the natural cutoff ($\omega=\gamma-1$) are special. Even for $\gamma<3$, the two time scales $t_*$ and $t_c$ coincide having ${\bar z}_*={\bar z} = (\gamma-2)/(\gamma-1)$. This means that the second regime does not exist. The density decays as $\rho_s \sim t^{-1/(\gamma-2)}$ for $t<N^{\bar z}$, and then saturates to $\rho_s^\infty \sim N^{-1/(\gamma-1)}$. The defect and static dynamics at criticality are illustrated schematically in Fig.~\ref{fig1}(a). \subsection{Numerical simulations} We have performed numerical simulations in the annealed SF networks to confirm the analytic results. In the defect simulations, the survival probability is expected to scale as \begin{equation}\label{p_scale} P_s(t,N) = N^{-{\bar z} \delta} \mathcal{P}(t/N^{\bar z}) . \end{equation} The scaling function behaves as $\mathcal{P}(x) \sim x^{-\delta }$ as $x\rightarrow 0$ and decays exponentially as $x\rightarrow \infty$. The particle number averaged over surviving samples is expected to scale as \begin{equation}\label{n_scale} n_s(t,N) = N^{{\bar z} \tilde\eta } \mathcal{N}(t/N^{\bar z}) . \end{equation} The scaling function behaves as $\mathcal{N}(x)\sim x^{\tilde\eta}$ as $x\rightarrow 0$ and converges to a constant as $x\rightarrow \infty$. \begin{figure}[t] \includegraphics*[width=\columnwidth]{fig2.eps} \caption{(Color online) (a) Scaling plot for the survival probability. (b) Scaling plot for the number of particles averaged over surviving samples. The upper, middle, and lower sets of data correspond to the annealed SF networks with $\gamma=2.5$ and $\omega=1.5$, $2.0$, and $3.0$, respectively. For readability, each data set is scaled down by a constant factor. The dashed lines are guides to the eyes having slope $-1$ in (a) and $1$ in (b).}\label{fig2} \end{figure} Figure~\ref{fig2} shows the scaling plots according to the scaling form in Eqs.~(\ref{p_scale}) and (\ref{n_scale}) with the exponent values in Eqs.~(\ref{delta}), (\ref{eta}), and (\ref{z_d}). The annealed networks of size $N=10^3,\cdots,10^6$ were generated with the deterministic method. The plotted data were obtained by averaging over $10^6$ runs. The nice data collapse confirms the validity of the analytic result. \begin{figure}[t] \includegraphics*[width=\columnwidth]{fig3.eps} \caption{(Color online) (a) Critical density decay at $\gamma=2.5$ and $\omega=2.5$. The network sizes are $N=10^3,\cdots,10^6$. Effective exponent $q$ versus $t/N^{{\bar z}_*}$ at $\gamma=2.5$ and (b)~$\omega=2.0$, (c)~$2.5$, (d)~$3.0$.} \label{fig3} \end{figure} We have also performed the static simulations. Numerical data at the critical point obtained in networks with $\gamma=2.5$ and $\omega=2.5$ are presented in Fig.~\ref{fig3}(a). Unlike in the schematic plot in Fig.~\ref{fig1}, the crossover between two regimes with $\rho_s \sim t^{-1/(\gamma-2)}$ and $\rho_s \sim t^{-1}$, respectively, is not prominent. Moreover, the decay exponents seem to deviate from the expected values significantly. In order to understand the origin of the discrepancy, we have performed a local slope analysis. As an estimate for the density decay exponent, we define an effective exponent $q(t) \equiv - \ln(\rho_s(t)/\rho_s(t/m)) / \ln m$ with a constant $m=4$. The effective exponents measured at $\omega=2.0$, $2.5$, and $3.0$ are plotted in Figs.~\ref{fig3}(b), (c), and (d), respectively, against the scaling variable $ x=t/N^{{\bar z}_*}$. The analytic theory predicts that $q$ should converge to $1/(\gamma-2)=2$~($1$) as $N$ increases for $x< 1$~($x> 1$). The effective exponent plot shows a weak but clear tendency toward the analytic prediction. For $x<1$, the effective exponents steadily increases above 1 with network size, but still much lower than the predicted value 2 even for $N=10^6$. Moreover there is no appreciable power-law region (flat region for $q$). For $x>1$, the effective exponents overshoot the predicted value 1 till $N=10^5$, but start to decrease slightly at $N=10^6$. We also notice the appreciable flat region in this case. In order to identify numerically the power-law scaling in the first regime, it would be required that at least $t_*\sim 10^2$ (two log decades). With $\gamma=2.5$ and $w=2.5$, the system size must be larger than $~\sim 10^{10}$, which is beyond the current computer capacity. We have also studied the FSS behavior of the stationary-state particle density at criticality. It exhibits a power-law scaling with $N$ as $\rho_s^\infty \sim N^{-\alpha}$. It is found (not shown here) that the numerical result for the exponent $\alpha$ is compatible with the analytic result given in Eq.~(\ref{alpha}). However, a discrepancy becomes noticeable as $\gamma$ becomes smaller and $\omega$ becomes larger, due to strong finite size effects. The degree distribution becomes singular as $\gamma$ approaches $2$. At large $\omega$, the maximum degree $k_{\max}\sim N^{1/\omega}$ grows so slowly that it becomes difficult to observe the asymptotic scaling behavior. \section{Off-critical scaling} For $2<\gamma<3$, the particle density exhibits distinct dynamic characteristics depending on whether $\rho k_{\max}>1$ or $\rho k_{\max}<1$. This causes an interesting cutoff-dependent FSS behavior at the critical point. Such a cutoff dependence disappears far from the critical point. However, in finite systems near the critical point, the cutoff-dependence can still survive to lead to an anomalous FSS behavior. For $\gamma>3$, the system shows a simple normal FSS behavior. Near the critical point at $p=p_c(1-\varepsilon)$, the rate equation for the density, Eq.~(\ref{rate_eq}), reads for $2<\gamma<3$ \begin{eqnarray} d\rho_s/dt &= \varepsilon \rho_s - c' \rho_s^{\gamma-1} \ \ &\mbox{for}\ \rho_s k_{\max}>1, \label{OC1}\\ &= \varepsilon \rho_s - \frac{1}{2} g \rho_s^2 \ \ &\mbox{for}\ \rho_s k_{\max}<1 ,\label{OC2} \end{eqnarray} where $k_{\max}=N^{1/\omega}$, $g=\langle k^2\rangle / \langle k\rangle^2 \sim N^{(3-\gamma)/\omega}$, and $c'=c/2$. By setting $d\rho_s/dt = 0$, one obtains that the stationary state solution is given by \begin{eqnarray} \rho_s^\infty &\sim \varepsilon^{\beta} \ \ &\mbox{for} \ \ \rho_s k_{\max} >1 ,\\ &\simeq 2\varepsilon / g \ \ &\mbox{for} \ \ \rho_s k_{\max} < 1\label{linear} \end{eqnarray} with the bulk order parameter exponent \begin{equation} \beta = 1/(\gamma-2) . \end{equation} This shows that the stationary state solution also depends on the degree cutoff $k_{\max}$. There is a crossover at $\varepsilon=\varepsilon_*$ with \begin{equation} \label{eq-eps1} \varepsilon_* \sim g^{-(\gamma-2)/(3-\gamma)} \sim N^{-1/\bar\nu_*} \mbox{with}~ 1/\bar\nu_* = \frac{\gamma-2}{\omega} . \end{equation} For $\varepsilon < \varepsilon_*$, the order parameter scaling changes into the $\gamma$-independent ordinary MF linear scaling as in Eq.~(\ref{linear}), although this crossover disappears ($\varepsilon_*\rightarrow 0$) in the thermodynamic limit. When $\varepsilon$ decreases further below $\varepsilon_*$, the system will reach the critical state where the particle density scales as $\rho_s^\infty \sim \sqrt {1/(gN)}$~(see Eq.~(\ref{alpha})). The critical region in finite systems starts at $\varepsilon = \varepsilon_c$ with \begin{equation}\label{eq-epsc} \varepsilon_c\sim \sqrt{\frac{g}{N}} \sim N^{-1/\bar\nu} \mbox{with}~ 1/\bar\nu= \frac{1-(3-\gamma)/\omega}{2}, \end{equation} where the finite size saturation starts to occur ($2\varepsilon_c/g=\rho_s^\infty$). The off-critical FSS behavior is illustrated in Fig.~\ref{fig1}(b). This scaling theory predicts that there exist two characteristic sizes $N_* \sim \varepsilon^{-\bar\nu_*}$ and $N_c \sim \varepsilon^{-\bar\nu}$ which separate three scaling regimes. In the regime I ($\varepsilon > \varepsilon_*$) where $N_* < N$, the system behaves as in a SF network with infinite $N$ and infinite $k_{\max}$, e.g.~$\rho_s\sim \varepsilon^\beta$. In the regime II ($\varepsilon_c < \varepsilon < \varepsilon_*$) where $N_c < N < N_*$, it behaves as in a SF network with infinite $N$ but with finite $k_{\max}$. The density scales as $\rho_s \simeq 2\varepsilon / g \sim \varepsilon N^{-(3-\gamma)/\omega} $. Finally, it behaves as in a SF network with finite $N$ and $k_{\max}$ in the regime III ($\varepsilon<\varepsilon_c$) where $N < N_c$. The density scales as $\rho_s \sim N^{-\alpha}$ with $\alpha=(1+(3-\gamma)/\omega)/2$ (see Eq.~(\ref{alpha})). At the special case of the natural cutoff with $\omega=\gamma-1$, the regime II disappears and there is a direct crossover from the regime I (no size effect) to the regime III (critical size scaling). For $\gamma>3$ where $\theta=3$ and $g\sim O(1)$, $\rho_s^\infty \sim \varepsilon$ in both the regime I and II, so $\varepsilon_*$ becomes meaningless. Here again we observe a direct crossover from the regime I to the regime III. The FSS scaling behavior in the annealed SF network is sharply contrasted with that in the quenched SF network. While there are two characteristic sizes $N_*$ and $N_c$ that depend explicitly on the degree cutoff in the former (at least for $2<\gamma<3$ and $\omega>\gamma-1$), it has been proposed in the latter through a droplet-excitation (hyperscaling) argument~\cite{Hong07} that there exists a unique cutoff-independent characteristic size $N_q \sim \varepsilon^{-\bar\nu_q}$ with $1/\bar\nu_q = (\gamma-2)/(\gamma-1)$ for $2<\gamma<3$ and $1/\bar\nu_q=1/2$ for $\gamma>3$. It is interesting to notice that the FSS theory in the annealed network coincides with that in the quenched network for $\gamma>3$ and also at the special case with the natural cutoff with $\omega=\gamma-1$ for $2<\gamma<3$. The origin of the discrepancy in the FSS theory between two different networks as well as the relevance/role of the quenched linking disorder have not been fully explored as yet, which awaits a further investigation. \begin{figure} \includegraphics*[width=\columnwidth]{fig4.eps} \caption{(Color online) (a) Scaling plots of $y = \rho_s N^\alpha$ vs. $x =\varepsilon N^{1/\bar\nu}$ at $\gamma=2.5$ and $\omega = 1.5, 2.0, 2.5$, and $3.0$. Network sizes are $N=10^3,\cdots,10^7$. (b) Plots of $y/x$ against $x$. Each data set is shifted vertically by a constant factor to avoid an overlap.}\label{fig4} \end{figure} We have performed extensive simulations in the annealed SF networks to test the off-critical FSS theory. In Fig.~\ref{fig4}, we present a scaling plot of $y \equiv \rho_s N^\alpha$ against a scaling variable $x\equiv \varepsilon N^{1/\bar\nu}$ at $\gamma=2.5$ and $\omega=1.5, 2.0, 2.5$, and $3.0$. When $\omega=\gamma-1=1.5$ (natural cutoff), the FSS theory predicts that the quantity $y$ converges to a constant value for $x\ll 1$~(regime III) and scales as $y\sim x^{1/(\gamma-2)}=x^2$ for $x\gg 1$~(regime I). There should be no regime II. The numerical data in Fig.~\ref{fig4} seem to support this two-regime scaling behavior reasonably well. When $\omega>\gamma-1$, we expect three scaling regimes. Numerical data in the regime II and III will converge to a single curve, but those in the regime I should deviate from it because of the two different characteristic sizes. The numerical data in Fig.~\ref{fig4}(a) show a clear evidence of the regime III (flat region), but a weak signature of the regimes II (linear-slope region, $y\sim x$) and I (no collapse). Although the signature is not prominent due to strong finite size effects, the existence of the three scaling regimes is evident. In Fig.~\ref{fig4}(b), we present the numerical data in a different style by plotting $y/x$ against $x$, so the regime II can be identified by a flat region. As expected, there is no flat region at $\omega=1.5$ (natural cutoff). As $\omega$ increases, one can see clearly broadening of the flat region (regime II) which becomes larger with increasing $N$. This behavior is qualitatively consistent with the expected FSS behavior. It is very difficult to observe the regime III scaling even at $N=10^7$, similar to the difficulty encountered in the study of the critical dynamics in Sec.~\ref{CD}. Finally, the off-critical dynamic behavior can be easily derived from the rate equations, (\ref{OC1}) and (\ref{OC2}), in the thermodynamic limit, approaching the criticality from the active or the absorbing side: $\rho_s(t)-\rho_s^\infty\sim e^{-t/\tau}$ where the relaxation time scales as $\tau\sim \varepsilon^{-\nu_t}$ with $\nu_t=1$ in all cases. These results are consistent with our previous results through relaxation time relations of ${\bar z}=\nu_t/\bar\nu$ and ${\bar z}_*=\nu_t/\bar\nu_*$. \section{Sample-to-sample fluctuations} Suppose that one wants to generate a network of $N$ nodes with a given degree distribution $P(k)$ with a (forced or natural) cutoff. In general, there are two kinds of quenched disorder to be considered. First, one should sample a degree sequence $\{k_1,\cdots,k_N\}$ from $P(k)$ and then choose a way of linking the nodes together to create a network. Disorder can be involved in both processes, which is named as sampling disorder and linking disorder, respectively. A {\em quenched} network involves both sampling and linking disorder, in general. An annealed network is free from the linking disorder. However, it may still have the sampling disorder. In the numerical studies in the preceding sections, we have sampled the degree sequence deterministically without any disorder. On the other hand, probabilistic sampling of the degree sequence leads to the sampling disorder. In this section, we investigate sample-to-sample fluctuations in annealed networks due to the sampling disorder. The quantity of our primary interest is $g\equiv \langle k^2 \rangle / \langle k\rangle^2$. When $N$ values $\{k_1,\cdots,k_N\}$ are drawn probabilistically in accordance with the distribution $P(k)$ for $k_{\min} \le k \le k_{\max}$, a sampled distribution $\tilde P(k)=\sum_{i=1}^N \delta_{k,k_i}/N$ may deviate from the target distribution $P(k)$ due to the finiteness of $N$. The deviation is denoted by $\delta P(k) = \tilde P(k) - P(k)$. Then, it is straightforward to show that \begin{eqnarray} \left[\delta P(k) \right] &=& 0 \label{correlator1} \\ \left[\delta P(k) \delta P(k') \right] &=& - \frac{P(k)P(k')}{N} + \frac{P(k)}{N} \delta_{k,k'} , \label{correlator2} \end{eqnarray} where $[\cdots]$ denotes the sample (disorder) average. The $n$th moment of the degree of a sample is given by \begin{equation} \langle k^n \rangle \equiv \sum_k k^n \tilde P(k) = \langle n \rangle_0 \left( 1 + \frac{\langle n\rangle_\delta}{\langle n\rangle_0} \right), \end{equation} where we introduce shorthand notations as $\langle n\rangle_0 \equiv \sum_{k}k^n P(k)$ and $\langle n\rangle_\delta \equiv \sum_{k}k^n \delta P(k)$. There is an $1/N$ factor in the correlator in Eq.~(\ref{correlator2}). So, $\langle n\rangle_\delta/\langle n\rangle_0$ can be considered as a small expansion parameter for large $N$. Up to the second order, the quantity $g$ of a given sample can be written as $$ g = \frac{\langle 2\rangle_0}{\langle 1\rangle_0^2} \left( 1+\frac{ \langle 2\rangle_\delta}{\langle 2 \rangle_0} - 2 \frac{\langle 1\rangle_\delta}{\langle 1\rangle_0} + 3 \frac{\langle 1\rangle_\delta^2}{\langle 1\rangle_0^2} - 2 \frac{\langle 2\rangle_\delta \langle 1\rangle_\delta}{\langle 2\rangle_0 \langle 1\rangle_0} \right) . $$ The disorder-averaged correlators in Eqs.~(\ref{correlator1}) and (\ref{correlator2}) imply that $\left[ \langle n\rangle_\delta \right]=0$ and that \begin{equation} \left[ \langle m\rangle_\delta \langle n \rangle_\delta \right] = \frac{1}{N} \left( \langle m+n \rangle_0 - \langle m\rangle_0 \langle n\rangle_0 \right) . \end{equation} This allows us to expand systematically $[g]$ and $(\Delta g)^2 \equiv [g^2] - [g]^2$ in powers of $\frac{1}{N}$. After some algebra, we obtain the following result up to the order $1/N$: \begin{equation} [g] = \frac{\langle 2\rangle_0}{\langle 1\rangle_0^2} \left\{ 1 + \frac{1}{N} \left( 3 \frac{\langle 2\rangle_0}{\langle 1\rangle_0} - 2 \frac{\langle 3 \rangle_0}{\langle 1\rangle_0 \langle 2\rangle_0} - 1 \right) \right\} \label{g_1} \end{equation} and \begin{equation} (\Delta g)^2 = \frac{1}{N} \frac{\langle 2\rangle_0^2}{\langle 1\rangle_0^4} \left\{ \frac{\langle 4\rangle_0}{\langle 2\rangle_0^2} - 4 \frac{\langle 3\rangle_0}{\langle 1\rangle_0 \langle 2 \rangle_0} + 4 \frac{\langle 2\rangle_0}{\langle 1\rangle_0^2} -1 \right\} . \label{g_2} \end{equation} Our interest lies in the SF network of $N$ nodes having the degree distribution $P(k) \propto k^{-\gamma}$ in the interval $k_{\min}\le k\le k_{\max}=N^{1/\omega}$ with $\gamma>2$ and $\omega\ge \gamma-1$. The $1/N$ term in Eq.~(\ref{g_1}) is always subleading, so the scaling behavior of $[g]$ is determined by $\langle 2 \rangle_0$, which yields that \begin{equation} [g] = \left\{ \begin{array}{lll} \sim N^{(3-\gamma)/\omega} & \mbox{for}& 2<\gamma<3 \\ [2mm] \sim \log N & \mbox{for}& \gamma=3 \\ [2mm] \sim O(1) &\mbox{for}& \gamma >3 . \end{array} \right. \end{equation} On the other hand, the term $\langle 4\rangle_0 / \langle 2\rangle_0^2$ in the parenthesis of Eq.~(\ref{g_2}) makes a leading order contribution. Hence, we find that the relative variance $R_g = (\Delta g)^2 / [g]^2$ is given by \begin{equation}\label{d_f} R_g = \left\{ \begin{array}{lll} \sim N^{ (\gamma-1)/w - 1 } & \mbox{for} & 2<\gamma < 3 \\ [2mm] \sim N^{ 2/w - 1 }(\log N)^{-2} & \mbox{for} & \gamma = 3 \\ [2mm] \sim N^{ (5-\gamma)/w -1 } & \mbox{for} & 3 < \gamma < 5 \\ [2mm] \sim N^{-1} \log N &\mbox{for}& \gamma =5 \\ [2mm] \sim N^{-1} &\mbox{for}& \gamma >5 . \end{array} \right. \end{equation} In the theory of disordered systems, the relative variance $R_X$ of an observable $X$ due to a quenched disorder is an indicator of the self-averaging property~\cite{Aharony96}. When it vanishes in the thermodynamic limit $N\rightarrow\infty$, such a system is said to be {\em self-averaging}. The self-averaging property implies that an observable measured in a sample with a typical disorder configuration takes the same value as the sample-averaged value in the $N\rightarrow\infty$ limit. A system with $R_X \sim N^{-1}$ is said to be {\em strongly self-averaging}~(SSA). This is the case when the central limit theorem works. When $R_X \sim N^{-r}$ with $r<1$, such a system is said to be {\em weakly self-averaging}~(WSA). A system with strong or relevant disorder lacks the self-averaging property near the criticality. In such a system, $R_X$ converges to a finite value as $N$ increases. The result in Eq.~(\ref{d_f}) discloses the self-averaging property of the annealed SF network under the sampling disorder. First of all, we find that the system with $\gamma>5$ is SSA at all values of the degree cutoff exponent $\omega$. For $\gamma\le 5$, $R_g$ decays slower than $N^{-1}$ at all values of $\omega>\gamma-1$. So the system is WSA. Interestingly, the systems lack the self-averaging property when $2< \gamma<3$ and $\omega=\gamma-1$ ($R_g$ approaches a non-zero constant as $N$ increases). Note that the cutoff exponent $\omega=\gamma-1$ corresponds to the natural cutoff. Networks without explicit constraint on the degree also display the cutoff scaling $k_{\max} \sim N^{1/(\gamma-1)}$. In these networks, not only the node-to-node degree fluctuation but also the sample-to-sample degree fluctuations are very strong. \begin{figure} \includegraphics*[width=\columnwidth]{fig5.eps} \caption{(Color online) Probability distribution $P_g(x)$ for $x=g/[g]$ in the annealed networks with $N=10^3,\cdots,10^6$. (a) $\gamma=2.75$ and $\omega=3.0$ (b) $\gamma=2.75$ and $\omega=1.75$.}\label{fig5} \end{figure} We present numerical data showing the (non-)self-averaging property in Fig.~\ref{fig5}. Drawing $N$ values of $k$ from the distribution $P(k)\sim k^{-\gamma}$ in the interval $2\le k\le N^{\omega}$, we calculated $g=\langle k^2\rangle / \langle k\rangle^2$. This was repeated $N_S=10^5$ times, from which one can construct the probability distribution function $P_g(x)$ for $x=g/[g]$. Figure~\ref{fig5}(a) shows that the distribution becomes sharper and sharper as $N$ increases. It indicates the self-averaging property at $\gamma=2.75$ and $\omega=3.0$. On the other hand, Fig.~\ref{fig5}(b) shows that the distribution converges to a limiting distribution. It indicates that the system is not self-averaging at $\gamma=2.75$ and $\omega=\gamma-1=1.75$. The strong disorder fluctuation raises an important question. In general, a real complex network is a disordered media having quenched disorder, for example, the sampling disorder and the linking disorder as mentioned before. Being coupled with dynamic degrees of freedom, the quenched structural disorder may give rise to disorder-relevant critical phenomena. This is a plausible scenario, but has been ignored in most studies. It seems a quite challenging problem to incorporate the quenched disorder into a systematic analysis. In the annealed network considered in this study, the dynamic degrees of freedom is completely decoupled with the sampling disorder (no linking disorder). Hence, the scaling theory developed here should be valid whether the sampling disorder is self-averaging or not. However, our result still warns that the sample average of any observable involving $g$ is practically meaningless due to its broad distribution, which occurs in the critical region (regime II and III) in annealed networks with $2< \gamma<3$ and the natural cutoff. \section{Discussion and Summary} We studied critical behavior of the CP in annealed scale-free networks. For the degree exponent $\gamma>3$, the standard single-parameter FSS is found with various dynamic and static exponents which are independent of the cutoff exponent $\omega$ and also $\gamma$. For highly heterogeneous networks with $\gamma<3$, there exist two different characteristic time scales and their associated exponents depend not only on $\gamma$ but also on $\omega$. These results are contrasted with those in quenched scale-free networks where a single-parameter FSS is found without any cutoff dependence even for $\gamma<3$ if the cutoff is not strong enough ($\omega<\gamma$)~\cite{Hong07,Ha07}. At the special case of $\omega=\gamma-1$ (natural cutoff), these two different FSS coincide to each other. Annealed networks may include the sampling disorder, which generates a strong sample-to-sample fluctuation in highly heterogeneous networks with the natural cutoff. In quenched networks, the linking disorder is inherent, which generates the density-density correlation in neighboring nodes through coupling with fluctuating variables. This correlation leads to the shift of the transition point of the CP model~\cite{CPS06,Ha07}. In addition, the linking disorder generates another type of sample-to-sample fluctuations which cause spreading of the transition points in finite systems. Hong, Ha, and Park~\cite{Hong07} showed that there exists a characteristic (droplet) size scale diverging as $N_q\sim \varepsilon^{-\bar\nu_q}$ with $1/\bar\nu_q=(\gamma-2)/(\gamma-1)$ for $\gamma<3$. For $N<N_q$ (or equivalently $\varepsilon < \varepsilon_q$ with $\varepsilon_q\sim N^{-(\gamma-2)/(\gamma-1)}$), the system feels the droplet length scale and the finite-size effect is dominant. As $\varepsilon_q > \varepsilon_c$ given in Eq.~(\ref{eq-epsc}), one may expect that the finite-size saturation induced by the droplet length scale comes in earlier (at $\varepsilon=\varepsilon_q$) in quenched networks than in annealed networks. Then, the cutoff-dependency of the saturation density may disappear. However, as $\varepsilon_q < \varepsilon_*$ given in Eq.~(\ref{eq-eps1}), the cutoff-dependent density-decaying dynamics comes in before saturation. The linking disorder fluctuation may be responsible for the disappearance of this dynamics in the quenched networks, but this is just a speculation as yet. A full understanding for the FSS behavior in quenched networks needs a further investigation. During the final stage of preparing this manuscript, Bogu\~n\'a, Castellano, and Pastor-Satorras posted a preprint on the cond-mat archive~\cite{BCPS08}, the results of which partially overlap with those presented here. \acknowledgments This work was supported by KOSEF grant Acceleration Research (CNRC) (Grant No. R17-2007-073-01001-0). This work was also supported by the Korea Research Foundation grant funded by MEST (Grant No. KRF-2006-003-C00122).
1,116,691,501,402
arxiv
\section{Introduction} \section{Introduction} Birational geometry of a variety constructed by GIT quotient is closely related to the variation of GIT. Cox rings are invariants playing an important role in this interaction. It turns out that Cox rings contain geometric, birational and arithmetic information of algebraic varieties. It is well-known that if the Cox ring of an algebraic variety is finitely generated then the variety enjoy ideal properties in the aspect of minimal model program. Such varieties are called Mori dream spaces (cf. \cite{HK}). Recently, Cox rings of various varieties have been studied by many authors, for example, surfaces, moduli spaces of rational curves with marked points, wonderful varieties and log Fano varieties. See \cite{ADHL} and references therein for more detail. However it seems that not much is known about Cox rings of varieties of general type. Moreover there are not many literatures discussing Mori dream spaces of general type. Therefore it is interesting to find more examples of Mori dream spaces which are varieties of general type and to investigate their properties. In general, it is hard to compute the effective, nef or semiample cones of a given variety of general type and hence it is difficult to determine whether it is a Mori dream space or not. Therefore we will focus on special classes of algebraic varieties of general type in this paper. Our candidates of Mori dream spaces of general type are minimal surfaces of general type with $p_g=0$. These surfaces have been studied for a long time and there are many works about them (see \cite{BCP} for a survey). Moreover, it turns out that there are some similarities between del Pezzo surfaces and minimal surfaces of general type with $p_g=0.$ It is well-known that del Pezzo surfaces are Mori dream spaces. Therefore it is natural to ask the following questions. \begin{question} Let $X$ be a minimal surface of general type with $p_g=0.$ When the effective cone of $X$ is rational polyhedral cone? When the nef cone and semiample cones of $X$ are the same? When the Cox ring of $X$ is finitely generated? \end{question} In this paper we provide examples of Mori dream surfaces which are minimal surfaces of general type with $p_g=0$ and $2 \leq K^2 \leq 9.$ We also compute their effective cones explicitly. It is easy to provide examples and compute their effective cones when $K^2$ is large but this task becomes more difficult when $K^2$ gets smaller. Indeed, we use specific structures of our examples to compute the effective cones and to prove that they are Mori dream surfaces. \begin{theorem}\label{Main} The following minimal surfaces of general type with $p_g=0$ are Mori dream surfaces and their effective cones are computed explicitly $($see the subsection in each case$)$. \begin{enumerate} \item Fake projective planes $($all have $K^2=9)$ \item Surfaces isogenous to a higher product of unmixed type $($all have $K^2=8)$ \item Inoue surfaces with $K^2=7$ \item Surfaces with $K^2=7$ constructed by Y. Chen \item Kulikov surfaces with $K^2=6$ \item Burniat surfaces with $2 \leq K^2 \leq 6$ \item Product-quotient surfaces with $K^2=6, G=D_4 \times \mathbb{Z}/2\mathbb{Z}$ \item A family of Keum-Naie surfaces which are product-quotient surfaces with $K^2=4, G=\mathbb{Z}/4\mathbb{Z} \times \mathbb{Z}/2\mathbb{Z}$ \end{enumerate} \end{theorem} In particular, there is a minimal surface of general type with $p_g=0$ which is a Mori dream space for any $2 \leq K^2 \leq 9.$ On the other hand, we do not know an example of minimal surface of general type with $p_g=0$ which is not a Mori dream space. It would be an interesting task to determine which minimal surfaces of general type with $p_g=0$ are Mori dream surfaces. \begin{problem} Classify Mori dream spaces among minimal surfaces of general type with $p_g=0$. \end{problem} We believe that an answer to this problem could be an important step toward classification of surfaces of general type with $p_g=0$, which has remained as one of the most important and difficult problems in algebraic geometry. It is easy to see that for any Mori dream surface $X$ the bounded negativity conjecture holds, that is, there is a nonnegative number $b_X$ such that $C^2 \geq -b_X$ for any irreducible reduced curve $C$. (For historical background of the conjecture we refer to \cite{BHKKMSRS}.) For the examples in Theorem \ref{Main}, by refining the explicit computation of effective cones we are able to compute all negative curves explicitly in each case. Here a negative curve means an irreducible reduced curve with negative self-intersection. \begin{theorem}\label{main}[Negative Curves] For each surface in Theorem \ref{Main} all negative curves are computed explicitly. In each case $(1)-(8)$ the number of negative curves and the list of the pairs $(C^2, p_a(C))$ for negative curves $C$ are given as follows. Here $m(C^2, p_a(C))$ means $m$ copies of $(C^2, p_a(C)).$ \begin{enumerate} \item None \item None \item $3: 2(-1,1), (-1,2)$ \item $4: (-1,1), (-1,2), (-1,3), (-4,2)$ \item $6: 6(-1,1)$ \item Burniat surfaces with $2 \leq K^2 \leq 6$ \begin{enumerate} \item $6 : 6(-1,1)$ if $K^2 = 6;$ \item $10 : 9(-1,1), (-4,0)$ if $K^2 = 5;$ \item $16 : 12(-1,1), 4(-4,0)$ if non-nodal with $K^2 = 4;$ \item $13: 10(-1,1), 2(-4,0), (-2,0)$ if nodal with $K^2 = 4;$ \item $15 : 9(-1,1), 3(-4,0), 3(-2,0)$ if $K^2 = 3;$ \item $16: 6(-1,1), 6(-2,0), 4(-4,0)$ if $K^2 = 2;$ \end{enumerate} \item $4: 2(-2, 0), (-1,1), (-1, 2)$ \item $8: 4(-1, 1), 4(-2, 0)$ \end{enumerate} \end{theorem} \bigskip {\bf Notations.} We will work over $\mathbb{C}.$ When $G$ is an abelian group, then $G_{\mathbb{R}}$(resp. $G_{\mathbb{Q}}$) will denote $G \otimes_{\mathbb{Z}} {\mathbb{R}}$(resp. $G \otimes_{\mathbb{Z}} {\mathbb{Q}}$). A variety will mean a normal projective variety. If $X$ be a normal $\mathbb{Q}$-factorial variety, then we will use the following notations. \\ $K_X$ : the canonical divisor on $X.$ \\ $Cl(X)$ : divisor class group of $X.$ \\ $Pic(X)$ : Picard group of $X.$ \\ $\rho(X)$ : Picard number of $X.$ \\ $Eff(X)$ : the effective cone of $X$. \\ $Nef(X)$ : the nef cone of $X$. \\ $Mov(X)$ : the movable cone of $X$. \\ $SAmp(X)$ : the semiample cone of $X$. \\ Let $D_1, D_2$ are two divisors on $X.$ We write $D_1 \sim D_2$(resp. $D_1 \sim_{num} D_2$) to denote that they are linearly(resp. numerically) equivalent. \\ {\bf Acknowledgements.} The second named author thanks Ingrid Bauer, Fabrizio Catanese, Sung Rak Choi, June Huh, DongSeon Hwang, Jinhyung Park, Yongjoo Shin and Joonyeong Won for helpful conversations and discussions. Part of this work was done when he was a research fellow of KIAS. \section{Preliminaries} In this section we recall basic definitions and results about effective, nef and semiample cones of algebraic surfaces, Mori dream spaces, especially Mori dream surfaces. We also prove a useful criterion to provide many new examples of Mori dream surfaces. \subsection{Effective, nef and semiample cones of surfaces} Effective, nef, movable and semiample cones of algebraic varieties are key tools of birational geometry (cf. \cite{KMM,KM}). \begin{definition} Let $X$ be a normal projective variety and $D$ be a Weil divisor on $X.$ We will use $Bs|D|$ to denote the base locus of $|D|.$ \\ (1) The stable base locus is the intersection of all base locus of multiples of $D,$ i.e. $$ \mathbb{B} |D|:= \bigcap_{k \in \mathbb{Z}_{\geq 1}} Bs|kD|. $$ (2) The effective cone $Eff(X)$ is the convex cone generated by effective divisors. We will use $\overline{Eff(X)}$ to denote the closure of $Eff(X)$ in $Cl(X)_{\mathbb{R}}.$ \\ (3) The nef cone $Nef(X)$ is the convex cone generated by nef divisors. \\ (4) A Weil divisor $D$ is movable if $\mathbb{B} |D|$ has codimension at least 2. The moving cone $Mov(X)$ is the convex cone generated by movable divisors. \\ (5) A Weil divisor $D$ is semiample if $\mathbb{B} |D|$ is empty. The semiample cone $SAmp(X)$ is the convex cone generated by semiample divisors. \end{definition} Let $X$ be a smooth projective surface with $q=0.$ Then the Picard group $Pic(X)$ is a finitely generated abelian group and $Pic(X)_{\mathbb{R}}$ is a finite dimensional vector space. \begin{proposition}\cite{AL} Let $X$ be a smooth projective surface with $q=0.$ Then we have the following inclusions. $$ SAmp(X) \subset Mov(X) \subset Nef(X) \subset \overline{Eff(X)} $$ \end{proposition} In order to check whether a given surface is Mori dream or not, the first thing to do is to determine whether the effective cone of the surface is a rational polyhedral cone or not. Sometimes we can compute the effective cone of a surface explicitly. Let us recall a helpful proposition of Artebani and Laface. \begin{proposition}\cite[Proposition 1.1]{AL} Let $X$ be a smooth projective surface with $\rho(S) \geq 3$ and its effective cone is polyhedral cone. Then $$ Eff(X) = \sum_{[C] \in Exc(X)} \mathbb{R}_{\geq 0}[C] $$ where $Exc(X)$ is the set of classes of integral curves $C$ of $X$ with $C^2 < 0.$ \end{proposition} Let us define a negative curve on a smooth projective surface as follows. \begin{definition} Let $X$ be a smooth projective surface. A negative curve $C$ is an irreducible reduced curve on $X$ such that $C^2 < 0.$ \end{definition} It is well known that the nef cone is dual to the closure of the effective cone. Therefore if the $Eff(X)$ is a rational polyhedral cone then the $Nef(X)$ is also a rational polyhedral cone. In this case, it is sufficient to prove that extremal rays of $Nef(X)$ is semiample to prove that $X$ is Mori dream space. Let us recall the following result which is helpful to prove a given divisor is semiample. \subsection{Mori dream space} Hu and Keel studied relation between minimal model program and variation of GIT and defined the notion of Mori dream space in \cite{HK}. Let us recall the definition of Mori dream space. \begin{definition}\cite{HK} A variety $X$ is a Mori dream space if \\ (1) $X$ is a $\mathbb{Q}$-factorial variety and $h^1(X,\mathcal{O}_X)=0,$ \\ (2) the nef cone of $X$ is the convex cone generated by finitely many semiample classes. \\ (3) there are finitely many birational maps $\phi_i : X \dashedrightarrow X_i, 1 \leq i \leq m$ which are isomorphisms in codimension 1, $X_i$ are varieties satisfying (1), (2) and if $D$ is a movable divisor then there is an index $1 \leq i \leq m$ and a semiample divisor $D_i$ on $X_i$ such that $D=\phi^*_iD_i.$ \end{definition} Let us recall the definition of Cox ring. \begin{definition} Let $X$ be a normal projective $\mathbb{Q}$-factorial variety with finitely generated $Cl(X)$. Let $\Gamma \subset Cl(X)$ be a free Abelian group such that the inclusion map induces an isomorphism $\Gamma \otimes \mathbb{Q} \cong Cl(X) \otimes \mathbb{Q}.$ Then a Cox ring of $X$(associated to $\Gamma$) is a multi-graded ring defined as follows. $$ Cox(X) = \bigoplus_{D \in \Gamma}H^0(X,\mathcal{O}_X(D)). $$ \end{definition} \begin{remark}\cite{HK, Okawa} Note that the definition of a Cox ring depends on the choice of $\Gamma \subset Cl(X).$ However it is well-known that the finite-generation of a Cox ring of $X$ does not depend on the choice of $\Gamma \subset Cl(X).$ \end{remark} It is well-known that a variety $X$ is a Mori dream space if and only if the Cox ring of $X$ is finitely generated (cf. \cite{HK}). \begin{theorem}\cite{HK} Let $X$ be a $\mathbb{Q}$-factorial variety such that $Pic(X)$ is a finitely generated abelian group. Then the followings are equivalent. \\ (1) $X$ is a Mori dream space. \\ (2) $Cox(X)$ is a finitely generated ring. \end{theorem} Let us recall Okawa's theorem which we will use frequently in this paper. \begin{theorem}\cite{Okawa} Let $f : X \to Y$ be a surjective morphism between normal $\mathbb{Q}$-factorial projective varieties and $X$ be a Mori dream space. Then $Y$ is also a Mori dream space. \end{theorem} \subsection{Mori dream surfaces} There are simple criterions for a surface to be a Mori dream space. Artebani, Hausen and Laface proved the following theorem in \cite{AHL}. \begin{theorem}\cite[Theorem 2.5]{AHL} Let $X$ be a normal complete surface with finitely generated $Cl(X).$ Then the followings are equivalent. \\ (1) $Cox(X)$ is finitely generated. \\ (2) The effective cone $Eff(X) \subset Cl(X)_{\mathbb{R}}$ and moving cone $Mov(X) \subset Cl(X)_{\mathbb{R}}$ are rational polyhedral cones and $Mov(X)=SAmp(X).$ \end{theorem} As a corollary we have the following helpful criterion of finitely generation of Cox rings of $\mathbb{Q}$-factorial surfaces. \begin{corollary}\cite[Corollary 2.6]{AHL} Let $X$ be a $\mathbb{Q}$-factorial projective surface with finitely generated $Cl(X).$ Then the followings are equivalent. \\ (1) $Cox(X)$ is finitely generated. \\ (2) The effective cone $Eff(X) \subset Cl(X)_{\mathbb{R}}$ is a rational polyhedral cone and $Nef(X)=SAmp(X).$ \end{corollary} There are many examples of surfaces with $\kappa \leq 0$ which are Mori dream spaces. For $\kappa = -\infty,$ it is well-known that log del Pezzo surfaces are Mori dream surfaces. \begin{theorem}\cite[Corollary 1.3.2]{BCHM} Let $X$ be a log Fano variety. Then $X$ is a Mori dream space. In particular, log del Pezzo surfaces are Mori dream surfaces. \end{theorem} For $\kappa = 0,$ the following criterion is well-known. \begin{theorem}\cite{AHL} Let $X$ be a K3 surface or an Enriques surface. Then $X$ is a Mori dream surface if and only if $Aut(X)$ is a finite group. \end{theorem} Let us recall following remarks(cf. \cite{Fulton1}). \begin{remark} (1) Let $f : X \to Y$ be a finite morphism between two smooth surface, then $K_X=f^*(K_Y)+R$ where $R$ is the ramification divisor. \\ (2) Let $f : X \to Y$ be a finite flat morphism of degree $d.$ Then $$ A_*Y \to A_*X \to A_*Y $$ is multiplication of degree $d.$ \\ (3) Suppose that a finite group $G$ acts on $X$ and the quotient variety is $Y.$ Then there is a canonical isomorphism $$ (A_*Y)_{\mathbb{R}} \cong (A_*X)^G_{\mathbb{R}} $$ and the natural map $$ (A_*Y)_{\mathbb{R}} \to (A_*X)^G_{\mathbb{R}} \to (A_*X) _{\mathbb{R}} \to (A_*Y) _{\mathbb{R}} $$ is multiplication of degree $|G|.$ \end{remark} From the above results we get the following proposition. \begin{proposition}\label{criterion} Suppose that $\pi : X \to Y$ is a finite flat morphism of degree $d$ between normal $\mathbb{Q}$-factorial projective surfaces with $h^1(X,\mathcal{O}_X)=0$ and $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}$ is an isomorphism whose inverse is $\frac{1}{d} \pi_* : Pic(X) _{\mathbb{R}} \cong Pic(Y) _{\mathbb{R}}.$ Then we have the followings. \\ (1) $X$ is a Mori dream surface if and only if $Y$ is also a Mori dream surface. \\ (2) The effective, nef and semiample cones of $X$ are pull-backs of those of $Y.$ \\ (3) When $Eff(X)$(or $Eff(Y)$) is a rational polyhedral cone, every negative curve on $X$ is a pullback of a negative curve on $Y.$ Moreover the pull-back of a negative curve on $Y$ does not split. \end{proposition} \begin{proof} Note that the isomorphism $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}$ send $Eff(Y)$(resp. $Nef(Y)$) into $Eff(X)$(resp. $Nef(X)$). Conversely, the isomorphism $\pi_* : Pic(X) _{\mathbb{R}} \cong Pic(Y) _{\mathbb{R}}$ send $Eff(X)$ into $Eff(Y).$ Therefore we can identify $Eff(X)$ and $Eff(Y)$ via the isomorphism $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}.$ \\ When $X$ is a Mori dream surface then we see that $Y$ is also a Mori dream surface from Okawa's theorem (cf. \cite{Okawa}). Now suppose that $Y$ is a Mori dream surface. Recall that we have an isomorphism $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}.$ Via this isomorphism we can identify the effective cones and nef cones of $X$ and $Y.$ Because $Y$ is a Mori dream surface, we see that the effective cone of $X$ is also a rational polyhedral cone. Let $D$ be a nef divisor on $Pic(X) _{\mathbb{R}}.$ Because every divisor in $Pic(X) _{\mathbb{R}}$ is a pull-back of a divisor of $Pic(Y) _{\mathbb{R}}$ and $\pi$ is surjective, we see that $D$ is a pull-back of a nef divisor $C$ in $Pic(X) _{\mathbb{R}}$. Because $Y$ is a Mori dream space, we see that $C$ is semiample. Suppose that $\mathbb{B}|D|$ is nonempty. Then $\pi_*\mathbb{B}|D|$ is contained in $\mathbb{B}|C|$ which gives a contradiction. Therefore we see that $X$ is a Mori dream surface. \\ Recall that a negative curve on $X$ lies on an extremal ray of $Eff(X).$ From the above identification of $Eff(X)$ and $Eff(Y)$ via the isomorphism $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}$ we see that there is a negative $\mathbb{Q}$-divisor on $Y$ such that its pull-back is the negative curve on $X.$ Let $C'$ be a negative curve on $Y.$ Suppose that $\pi^*(C')$ is not irreducible. Then there is an irreducible component $D_1$ of $\pi^*(C')$ such that $D_1^2<0.$ Let $D_2$ be another irreducible component of $\pi^*(C').$ Because $D_1^2<0, D_1 \cdot D_2 \geq 0$ we see that $D_1, D_2$ are linearly independent vectors in $Pic(X)_{\mathbb{R}}$ which goes to the same element $C'$ in $Pic(X)_{\mathbb{R}}.$ This gives a contradiction to the fact that $\pi^* : Pic(Y) _{\mathbb{R}} \cong Pic(X) _{\mathbb{R}}.$ Therefore we see that the pull-back of a negative curve on $Y$ does not split. \end{proof} \begin{remark} We found that Okawa obtained more general result than the first part of the above proposition via different method in \cite{Okawa}. \end{remark} \section{Minimal surfaces of general type with $p_g=0, 7 \leq K^2 \leq 9.$} In this section, we discuss Mori dream surfaces of general type with $p_g=0, 7 \leq K^2 \leq 9.$ Let $X$ be a surface of general type with $p_g=0, 7 \leq K^2 \leq 9.$ Because the Picard rank of $X$ is small it is relatively easier to check whether $X$ is a Mori dream space or not. \subsection{Fake projective planes} Minimal surfaces of general type with $p_g=0, K^2=9$ are called fake projective planes. Fake projective planes are classifies by works of Prasad and Yeung and Cartright and Steger. \begin{theorem}\cite{CS,PY, PY_addendum} There are exactly 100 fake projective planes. \end{theorem} From Noether's theorem we see that the Picard rank of any fake projective plane is 1. \begin{proposition} Let $X$ be a normal $\mathbb{Q}$-factorial projective variety with $h^1(X,\mathcal{O}_X)=0$ and Picard number 1. Then $X$ is a Mori dream space. \end{proposition} \begin{proof} Because the Picard number of $X$ is 1, we can choose $\Gamma = \langle D \rangle \subset Cl(X)$ where $D$ is an ample divisor. Therefore the Cox ring of $X$ with respect to $\Gamma$ is isomorphic to the section ring $R(X,D)$ and hence finitely generated. \end{proof} Therefore we have the following conclusion. \begin{corollary} Every fake projective plane is a Mori dream space. \end{corollary} \subsection{Fake quadrics} Minimal surfaces of general type with $p_g=0, K^2=8$ are called fake quadrics. Unlike the fake projective planes, we do not know how to classify fake quadrics. We also do not know whether all fake quadrics are Mori dream space or not. \begin{question} Let $X$ be a fake quadric. When $X$ is a Mori dream space? \end{question} Typical examples of fake quadrics are surfaces isogenous to a higher product. Let us recall their definition. \begin{definition}\cite{Catanese}\label{surface isogenous to a higher product} A surface $X$ is isogenous to a higher product if $X$ admits a finite unramified covering which is isomorphic to a product of two curves whose genus are greater than or equal to 2. \end{definition} Catanese proved that when $X$ is a surface isogenous to a higher product then it belongs to one of two possible types(unmixed type and mixed type) in \cite{Catanese}. When $X$ is a surface isogenous to a higher product, then there are two curves $C, D$ and finite group $G$ acting on them. The diagonal action of $G$ on $C \times D$ is free and $X$ is isomorphic to $(C \times D)/G.$ These surfaces were classified by Bauer, Catanese and Grunewald in \cite{BCG}. They form an important class of surfaces of general type with $p_g=0, K^2=8.$ \begin{lemma} Let $X$ be a surface isogenous to a higher product of unmixed type with $p_g=0.$ Then $X$ is a Mori dream space. The effective cone and nef cone are generated by fibers of $X \to C/G$ and fibers of $X \to D/G.$ \end{lemma} \begin{proof} Let $X$ be a surface isogenous to a higher product with $p_g=q=0$ of unmixed type. Then we have the following diagram. \begin{displaymath} \xymatrix{ & \ar[ld] C \times D \ar[d] \ar[rd] & \\ C \ar[d] & \ar[ld] X \cong (C \times D)/G \ar[rd] & \ar[d] D \\ C/G \cong \mathbb{P}^1 & & D/G \cong \mathbb{P}^1 } \end{displaymath} Then it is easy to see that $Nef(X)$ is the convex cone generated by the two line bundles which are pull-back of ample line bundles of the two $\mathbb{P}^1.$ Because these bundles gave fibrations, we see that every nef divisor is semiample. Therefore $X$ is a Mori dream space. \end{proof} \begin{question} (1) Let $S$ be a surface isogenous to a higher product of mixed type. Is $S$ a Mori dream surface? \\ (2) Let $S$ be a irreducible fake quadric. Is $S$ a Mori dream surface? \end{question} \subsection{$K^2=7$ cases} There are few explicitly constructed examples of minimal surfaces of general type with $p_g=0$ and $K^2=7.$ A famous family of such surfaces is the family of Inoue surfaces. Recently, Chen constructed a new family of such surfaces in \cite{Chen1}. We will prove that they are Mori dream surfaces. \\ Let us recall the construction of Inoue surfaces, the first examples of minimal surfaces of general type with $p_g=0, K^2=7.$ Inoue considered product of four elliptic curves with a natural $(\mathbb{Z}/2\mathbb{Z})^5$-actions and smooth invariant complete intersections of divisors of degree $(2,2,2,0),(0,0,2,2).$ Then he constructed Inoue surfaces with $p_g=0, K^2=7$ as free quotients of these complete intersections. Mendes Lopes and Pardini proved that Inoue surfaces can be realized as bidouble coverings over nodal cubic surfaces. Let us recall their construction. We will follow the explanation of \cite{MLP} and see \cite{BC3, Inoue, MLP} for more details. \\ Consider the quadrilateral $p_1p_2p_3p_4$ in $\mathbb{P}^2.$ Let $p_5$ be the intersection of two lines $\overline{p_1p_2}$ and $\overline{p_3p_4}$ and let $p_6$ be the intersection of two lines $\overline{p_1p_4}$ and $\overline{p_2p_3}.$ Let $W \to \mathbb{P}^2$ be the blowup of these six points. Let $\overline{\Delta_1}$ be the strict transform of the line $\overline{p_1p_3},$ $\overline{\Delta_2}$ be the strict transform of the line $\overline{p_2p_4},$ $\overline{\Delta_3}$ be the strict transform of the line $\overline{p_5p_6}.$ They are $(-1)$-curves on $W.$ Let $\overline{c_1}$ be the strict transform of a general conic though $p_2p_4p_5p_6,$ $\overline{c_2}$ be the strict transform of a general conic though $p_1p_3p_5p_6$ and $\overline{c_3}$ be the strict transform of a general conic though $p_1p_2p_3p_4.$ Let $n_i$ be the strict transforms of the line $\overline{p_ip_{i+1}}.$ They are the only nodal curves on $W$ and let $W \to Y$ be the contraction of these four nodal curves. Let $D_1=\overline{\Delta}_1+\overline{c_2}+n_1+n_2,$ $D_2=\overline{\Delta}_2+\overline{c_3},$ $D_3=\overline{\Delta}_3+\overline{c_1}+\overline{c'_1}+n_3+n_4$ where $\overline{c_1},\overline{c'_1} \in |\overline{c_1}|,\overline{c_2} \in |\overline{c_2}|,\overline{c_3} \in |\overline{c_3}|$ are general elements in the corresponding linear system. Then Mendes Lopes and Pardini showed that $D_1,D_2,D_3$ define a smooth bidouble covering $V$ over $W$ whose branch locus of $D_1+D_2+D_3.$ Then $V$ has eight exceptional curves and contracting these exceptional curves gives the Inoue surface $X.$ Moreover $X$ is a bidouble cover over $Y.$ Therefore we have the following commutative diagram. \begin{displaymath} \xymatrix{ V \ar[r] \ar[d] & X \ar[d] \\ W \ar[r] & Y } \end{displaymath} \begin{proposition} Let $X$ be a Inoue surface. Then $X$ is a Mori dream surface. \end{proposition} \begin{proof} Let $X$ be a such Inoue surface and $\pi : X \to Y$ be a bidouble covering. From the construction of \cite{MLP}, $W$ is a weak del Pezzo surface and we see that $Y$ is a surjective image of the Mori dream surface $W.$ Therefore $Y$ is a Mori dream space with $\rho(Y)=3.$ Then by the construction we see that $\pi^* : Pic(Y) _{\mathbb{R}} \to Pic(X) _{\mathbb{R}}$ is an isomorphism. Therefore we have the desired results. \end{proof} Moreover we can compare canonical bundles of $X$ and $Y$ as follows. \begin{lemma}\cite{MLP} We have the following equivalence $$ 2K_X \sim \pi^*(-K_Y+c_1) $$ where $c_1$ is the image of $\overline{c_1}$ in $Y.$ \end{lemma} Let us compute effective and nef cones of Inoue surfaces. Because $\rho \geq 3,$ we need some computation in order to determine the shapes of effective and nef cones. We have a simple strategy to compute effective cones and nef cones. Suppose that we know several effective divisors $E_1, \cdots, E_k$ on $X.$ Let $V=Cl(X)_{\mathbb{R}}$ and $A \subset V$ be the rational polyhedral cone generated by these effective divisors. Suppose that we can check that the extremal rays of $A^\vee \subset V^\vee$ are nef divisors. Because $A \subset Eff(X)$ we have $Nef(X) \subset A^\vee \subset Nef(X)$ and hence we see that $A=Eff(X)$ and $A^\vee=Nef(X).$ \begin{proposition} Let $X$ be a Inoue surface. Then the effective cone of $X$ has three generators which are pullback of three $(-1)$-curves in $Y.$ The pull-back of three $(-1)$-curves are two elliptic curves and one genus 2 curve whose self-intersection numbers are all $-1.$ The nef cone of $X$ has three generators which are pullback of three nef divisors of $Y.$ \end{proposition} \begin{proof} To describe nef cone and effective cone of a Inoue surface $X$ is equivalent to describe the same invariants for 4-nodal cubic $Y.$ Because $Y$ is obtained by contracting 4 nodal curves from $Bl_{6 pts} \mathbb{P}^2$ and we know the configuration of curves on this weak del Pezzo surface, we can compute nef cone and effective cone of $Y.$ From \cite{MLP} we see that $Y$ is contraction of six $(-2)$-curves of del Pezzo surface and there are three $(-1)$ curves on $Y.$ Let $\Delta_1$(resp. $\Delta_2,$ $\Delta_3$) be the image of $\overline{\Delta_1}$(resp. $\overline{\Delta_2},$ $\overline{\Delta_3}$). Because these curves are disjoint from the nodal curves in $W,$ they are $(-1)$-curves on $Y.$ We can see that $\Delta_1+\Delta_2, \Delta_2+\Delta_3, \Delta_3+\Delta_1$ are nef divisors on $Y.$ For any triple $\{ i,j,k \} = \{ 1,2,3 \}$ we have the followings $$ (\Delta_i + \Delta_j) \cdot \Delta_i = 0, $$ $$ (\Delta_i + \Delta_j) \cdot \Delta_j = 0, $$ $$ (\Delta_i + \Delta_j) \cdot \Delta_k > 0. $$ We can see that $\Delta_1+\Delta_2, \Delta_2+\Delta_3, \Delta_3+\Delta_1$ are nef divisors on $Y.$ The rational polyhedral cone generated by $\Delta_1,\Delta_2,\Delta_3$ is a subcone of $Eff(Y).$ Then the above computation shows that $\Delta_1, \Delta_2, \Delta_3$ generate the effective cone of $Y$ and $\Delta_1+\Delta_2, \Delta_2+\Delta_3, \Delta_3+\Delta_1$ generate the nef cone of $Y.$ Note that $\Delta_1, \Delta_2, \Delta_3$ are branch locus of the map $\pi : X \to Y.$ Therefore $\pi^*\Delta_i = 2\widetilde{\Delta_i}$ for an irreducible divisor $\widetilde{\Delta_i}$ for every $i=1,2,3.$ Because self-intersection of $\pi^*(\Delta_i)$ is $-4,$ we have $\widetilde{\Delta_i}^2=-1$ for $i=1,2,3.$ Moreover we have the following identity $$ K_X \cdot \widetilde{\Delta_i} = \frac{1}{4} \pi^*(-K_Y+\Delta_2+\Delta_3) \cdot \pi^*{\Delta_i} = (-K_Y+\Delta_2+\Delta_3) \cdot {\Delta_i} $$ for all $i=1,2,3.$ Because $\widetilde{\Delta_i}$ lies on the 1-dimensional fixed locus of an involution acting on $X$ we see that it is a smooth curve. Therefore they are elliptic curves and a genus $2$ curve whose self-intersections are all $-1.$ They generate $Eff(X).$ Similarly, pull-backs of $\Delta_1+\Delta_2, \Delta_2+\Delta_3, \Delta_3+\Delta_1$ generate $Nef(X).$ \end{proof} Via similar method we can prove that Chen's surfaces are Mori dream surfaces. Let us briefly recall the construction of Chen in \cite{Chen1}. Let $p_0,p_1,p_2,p_3$ be points in $\mathbb{P}^2$ in general position and let $p'_i$ be the infinitely near point over $p_i$ which corresponds to the line $\overline{p_0p_i}$ for $i=1,2,3.$ Let $p$ be a point located outside of lines $\overline{p_0p_i},$ $\overline{p_ip_{i+1}}$ for $i=1,2,3$ and conics $c_1, c_2, c_3$ where $c_i$ is the unique conic passing through $p_i,p_{i+1},p'_{i+1},p_{i+2},p'_{i+2}$ in $\mathbb{P}^2.$ Let $W \to \mathbb{P}^2$ be the blowup of these eight points. Let $E_i$(resp. $E_i'$) be the total transform of $p_i$(resp. $p_i'$) and $H$ be the pull-back of a line in $\mathbb{P}^2.$ Let $\overline{\Gamma}$ be the strict transformation of the line $\overline{p_0p}$ and $\overline{E}$(resp. $E_0$) be the total transforms of $p$(resp. $p_0$). The linear system $|-2K_W-\overline{\Gamma}|$(resp. $|-2K_W-\overline{E}|$) consists of a single $(-1)$-curve, $\overline{B_2}$(resp. $\overline{B_3}$). \\ Consider curves $C_i \sim L-E_0-E_i-E_i'$ and $C_i' \sim E_i-E_i'$ for $i=1,2,3.$ Chen showed that $W$ is a weak del Pezzo surface with degree 1 and the above six curves are only nodal curves on $W.$ Let $Y$ be a surface obtained from $W$ by contracting these six nodal curves. Note that $\overline{E}, \overline{\Gamma}, \overline{B_2}, \overline{B_3}$ are disjoint with the six nodal curves. \\ Then Chen showed that three divisors $F_b+\overline{\Gamma}+C_1+C_1'+C_2+C_2', \overline{B_2}+C_3+C_3', \overline{B_3}$ defines a bidouble covering $\pi : V \to W$ branched over them, where $F_b$ is a smooth fiber of a pencil of lines passing through $p_0.$ There are $(-1)$-curves on $V$ and contracting these $(-1)$-curves we obtain $X$ which is a smooth minimal surface of general type with $p_g=0, K^2=7.$ Then $X$ is a bidouble cover over $Y.$ Let us call such $X$ a Chen's surface. Indeed, it is easy to prove that Chen's surfaces are Mori dream surfaces. \begin{displaymath} \xymatrix{ V \ar[r] \ar[d] & X \ar[d] \\ W \ar[r] & Y } \end{displaymath} \begin{proposition} Let $X$ be a Chen's surface. Then $X$ is a Mori dream surface. \end{proposition} \begin{proof} Because $Y$ can be obtain contracting six nodal curves from a weak del Pezzo surfaces of degree 1, we see that $Y$ is a Mori dream surface with $\rho(Y)=3.$ From the construction $X$ has a finite map $X \to Y$ and $\rho(X)=3.$ Therefore $X$ is a Mori dream surface. \end{proof} Now let us compute effective cones and nef cones of Chen's surfaces. He computed intersection numbers between $\overline{E}, \overline{\Gamma}, \overline{B_2}, \overline{B_3}$ as follows. \begin{center} \begin{tabular}{|c|c|c|c|c|} \hline $\cdot$ & $\overline{E}$ & $\overline{\Gamma}$ & $\overline{B_2}$ & $\overline{B_3}$ \\ \hline $\overline{E}$ & -1 & 1 & 1 & 3 \\ \hline $\overline{\Gamma}$ & 1 & -1 & 3 & 1 \\ \hline $\overline{B_2}$ & 1 & 3 & -1 & 1 \\ \hline $\overline{B_3}$ & 3 & 1 & 1 & -1 \\ \hline \end{tabular} \end{center} \bigskip Because they are disjoint from the six nodal curves, there curves are pull-back of curves in $Y$ and the intersection numbers are the same. Let $E$, $\Gamma$, $B_2$, $B_3$ be the image in $Y.$ Of course, the intersections $E,\Gamma,B_2,B_3$ are the same as above. Now we can describe nef cones and effective cones of Chen's surfaces explicitly. \begin{proposition} Let $Y$ be a weak del Pezzo surface of degree 1 described as above. Then $Eff(Y)$ is a rational polyhedral cone generated by $E,\Gamma,B_2,B_3.$ and $Nef(X)$ is a rational polyhedral cone generated by $\Gamma+B_3,B_2+B_3,E+\Gamma,E+B_2.$ \end{proposition} \begin{proof} From \cite{Chen1} we see that $Y$ is contraction of six $(-2)$-curves of the weak del Pezzo surface $W$ and there are four $(-1)$-curves $E, \Gamma, B_2, B_3$ on $Y.$ From the above intersection numbers we see that $\Gamma+B_3,B_2+B_3,E+\Gamma,E+B_2$ are nef divisors. We can directly check that the two cones are dual to each other. Therefore we obtain the desired result from our strategy. \end{proof} Now we know the extremal rays of $Eff(Y).$ Let us discuss their pull-backs. First we can compare the canonical divisors. \begin{lemma}\cite{Chen1} We have the following isomorphism. $$ 2K_X \simeq \pi^*(-2K_Y + \Gamma) $$ \end{lemma} Now we have the following conclusion. \begin{proposition} Let $X$ be a Chen's surface with $K^2=7.$ Then the effective cone of $X$ has four generators which are pullback of four $(-1)$-curves in $Y.$ The pull-back of four $(-1)$-curves are three curves with self-intersection $-1$ whose arithmetic genus are $1,2,3$ and one (arithmetic) genus $2$ curve whose self-intersection number is $-4.$ The nef cone of $X$ has four generators which are pullback of four nef divisors on $Y.$ \end{proposition} \begin{proof} Note that $\Gamma, B_2, B_3$ lie on the branch locus of the map $\pi : X \to Y.$ Therefore $\pi^*\Gamma = 2\widetilde{\Gamma}$ for an irreducible divisor $\widetilde{\Gamma}$ and $\pi^*B_i = 2\widetilde{B_i}$ for an irreducible divisor $\widetilde{B_i}$ for $i=1,2.$ Because self-intersections of these curves are all $-4,$ we have $\widetilde{\Gamma}^2=\widetilde{B_2}^2=\widetilde{B_3}^2=-1.$ Moreover we have the following identities. $$ K_X \cdot \widetilde{\Gamma}= \frac{1}{4} \pi^*(-2K_Y + \Gamma) \cdot \pi^*(\Gamma) = (-2K_Y + \Gamma) \cdot \Gamma = 1 $$ $$ K_X \cdot \widetilde{B_2}= \frac{1}{4} \pi^*(-2K_Y + \Gamma) \cdot \pi^*(B_2) = (-2K_Y + \Gamma) \cdot B_2 = 5 $$ $$ K_X \cdot \widetilde{B_3}= \frac{1}{4} \pi^*(-2K_Y + \Gamma) \cdot \pi^*(B_3) = (-2K_Y + \Gamma) \cdot B_3 = 3 $$ Note that $E$ does not lie on the branch locus of the map $\pi : X \to Y.$ Therefore $\pi^*E = \widetilde{E}$ for an irreducible divisor $\widetilde{E}.$ Because $E^2=-1,$ we have $\widetilde{E}^2=-4.$ Moreover we have the following identity. $$ K_X \cdot \widetilde{E}= \frac{1}{2} \pi^*(-2K_Y + \Gamma) \cdot \pi^*(E) = 2(-2K_Y + \Gamma) \cdot E = 6 $$ From the proposition \ref{criterion}, we see that $\widetilde{E}, \widetilde{\Gamma}, \widetilde{B_2}, \widetilde{B_3}$ are negative curves generate $Eff(X).$ Similarly, pull-backs of $\Gamma+B_3,B_2+B_3,E+\Gamma,E+B_2$ generate $Nef(X).$ \end{proof} \section{Minimal surfaces of general type with $p_g=0, 2 \leq K^2 \leq 6.$} When $K_X^2$ becomes smaller, it becomes harder to check whether a given surface $X$ is a Mori dream space or not. However for $2 \leq K^2 \leq 6$ cases there are several classical surfaces of general type with $p_g=0$ for which we can show that they are Mori dream surfaces. Our results in this section were motivated by \cite{Alexeev, AO, Coughlan} and some of the results easily follow from them. Especially, semigroups of effective divisors of some surfaces discussed in this section were computed in \cite{Alexeev, Coughlan}. \subsection{Abelian coverings of weak del Pezzo surfaces} Let us recall the definition of the abelian covering. \begin{definition} An abelian covering of $Y$ with an abelian group $G$ is a variety $X$ with a faithful action of $G$ on $X$ such that there is a finite morphism $\pi : X \to Y$ which is the quotient map of $X$ by the group action $G.$ \end{definition} Let $X$ be a surface of general type with $p_g=q=0$ and let $\pi : X \to Y$ be an abelian covering where $Y$ is a del Pezzo surface. There are lots of surfaces of general type with $p_g=q=0$ constructed in this way, e.g. Burniat surfaces, Kulikov surfaces, some numerical Campedelli surfaces, etc. The key property in some of these examples is that there is a natural isomorphism $Pic(X) _{\mathbb{R}} \cong Pic(Y) _{\mathbb{R}}$ which preserves the intersection pairing(up to scale) and we can identify effective cones and nef cones of $X$ and $Y.$ This phenomena was observed in \cite{Alexeev, AO, Coughlan} for the some of such surfaces and play key role in their works. In these cases, because $Y$ is a Mori dream space we see that the effective cone of $X$ is a rational polyhedral cone and every nef divisor of $X$ is semiample. Therefore we see that $X$ is a Mori dream space. \subsection{Weak del Pezzo surfaces} Let $Y$ be a smooth rational surface. When $-K_Y$ is ample(resp. nef and big) we call $Y$ a (resp. weak) del Pezzo surface. It is well-known that (weak) del Pezzo surfaces are Mori dream surfaces. Let us recall several basic definitions and facts about (weak) del Pezzo surfaces. \begin{definition}\cite{ADHL} We say that $1 \leq r \leq 8$ distinct points $p_1,\cdots,p_r$ in $\mathbb{P}^2$ are in general position if they satisfy the following conditions. \\ (1) No three of them lie on a line. \\ (2) No six of them lie on a conic. \\ (3) No eight of them lie on a cubic with a singularity at some of the $p_i.$ \end{definition} It is well-known that a del Pezzo surface $Y$ is either $\mathbb{P}^1 \times \mathbb{P}^1$ or blow-ups of $\mathbb{P}^2$ at $0 \leq r \leq 8$ points in general position. Let $\phi : Y=Y_r \to Y_{r-1} \to \cdots \to Y_0 = \mathbb{P}^2$ be the blowup at $p_i \in Y_{i-1}.$ Let $E_i$ denotes the total transform of the exceptional divisor over $p_i \in Y_{i-1}$ and let $H$ be the pull-back of $\mathcal{O}_{\mathbb{P}^2}(1).$ We say that a point $p_i$ lie on a line(resp. conic) if its image in $\mathbb{P}^2$ lies on a line(resp. conic). \begin{definition}\cite{ADHL} We say that $1 \leq r \leq 8$ distinct points $p_1,\cdots,p_r$ in $\mathbb{P}^2$ are in almost general position if they satisfy the following conditions. \\ (1) No four of them lie on a line. \\% in $\mathbb{P}^2.$ \\ (2) No seven of them lie on a conic. \\ (3) $E_i$ is a $(-1)$-curve or a chain of rational curves whose last component is a $(-1)$-curve and all the other components are $(-2)$-curves. \end{definition} The above condition is equivalent to saying that no $p_i$ lies on the $(-2)$-curve on $Y_{i-1}.$ \\ It is well-known that a weak del Pezzo surface $Y$ is either $\mathbb{P}^1 \times \mathbb{P}^1$ or $\mathbb{F}_2$ or blow-ups of $\mathbb{P}^2$ at $0 \leq r \leq 8$ points in almost general position. Of course, a del Pezzo surface is a weak del Pezzo surface. Because a weak del Pezzo surface is a Mori dream surface, it is easy to see the following description of its effective cone. \begin{lemma} (1) Let $Y$ be a smooth del Pezzo surface with $\rho \geq 3.$ Then $Eff(Y)$ is the rational polyhedral cone generated by the classes of $(-1)$-curves. \\ (2) Let $Y$ be a smooth weak del Pezzo surface with $\rho \geq 3.$ Then $Eff(Y)$ is the rational polyhedral cone generated by the classes of $(-1)$-curves and $(-2)$-curves. \end{lemma} Therefore in order to descirbe $Eff(Y)$ explicitly, one need to find all $(-1)$-curves and $(-2)$-curves on $Y$ explicitly. Indeed, these curves were intensively studied by many authors. See \cite{Dolgachev} for more details. \begin{theorem}\cite[Theorem 8.3.2]{Dolgachev} \label{wdP} Let $Y$ be a weak del Pezzo surface of degree $d.$ Then we have the followings. \\ (1) If $d \geq 2,$ then $|-K_Y|$ has no base points. \\ (2) Let $\phi$ be the morphism defined by $|-K_Y|.$ If $d \geq 3,$ then the image $\phi(Y)$ is a del Pezzo surface of degree $d$ in $\mathbb{P}^d$ with rational double points. The morphism $\Phi=|-K_Y| : Y \to \mathbb{P}^d$ contracts $(-2)$-curves on $Y.$ \\ (3) If $d=2,$ then $\phi$ factors $Y \to \overline{Y} \to \mathbb{P}^2$ where $\overline{Y}$ is a normal surface and $\overline{Y} \to \mathbb{P}^2$ is a finite map of degree 2 branched along a curve $B.$ The image of a component of the chains of $(-2)$-curves on $Y$ is a rational double point on $\overline{Y}.$ The curve $B$ is either smooth or has only simple singularities. \end{theorem} We have the following characterization of $(-2)$-curves on weak del Pezzo surfaces. \begin{lemma}\cite{ADHL} Let $Y$ be a weak del Pezzo surface which is the blow-up of $\mathbb{P}^2$ at $2 \leq r \leq 8$ points in almost general position. Then any $(-2)$-curve on $Y$ is either of the form $E_i-E_{i+1}$(if $E_i$ is reducible), or is linearly equivalent to one of the $H-E_1-E_2-E_3,2H-E_1-E_2-E_3-E_4-E_5-E_6,3H-2E_1-E_2-E_3-E_4-E_5-E_6-E_7-E_8$(up to permutation of the indices). \end{lemma} From the above characterization of $(-2)$-curves, we can explicitly describe negative curves on weak del Pezzo surfaces. \subsection{Burniat surfaces} Burniat surfaces can be constructed as bidouble coverings of del Pezzo surfaces with the same Picard rank. Let $p_1,p_2,p_3$ are points in general position in $\mathbb{P}^2.$ For each $p_i,$ there are two lines $\overline{p_{i-1}p_i}, \overline{p_ip_{i+1}}$ (indices modulo 3) and let us consider two distinct lines different from $\overline{p_{i-1}p_i}, \overline{p_ip_{i+1}}.$ Then we have a configuration of nine lines on $\mathbb{P}^2.$ By blowing up points where more than two lines are passing through, we obtain a weak del Pezzo surface $Y.$ From the configuration of lines, one can see that there is a smooth bidouble covering $X$ of $Y.$ These surfaces are called Burniat surfaces and see \cite{Alexeev, BC1, Coughlan} for more details about them. Let $X$ be a Burniat surface and $\pi : X \to Y$ be the quotient map of the bidouble covering. In this case, the pull-back $\pi^* : Pic(X) _{\mathbb{R}} \to Pic(Y) _{\mathbb{R}}$ is an isomorphism and we see that $X$ is a Mori dream space from Proposition \ref{criterion}. We can compute the effective cone of $X$ via that of $Y.$ See \cite{Alexeev, AO, Coughlan} for more details and we will follow notations in \cite{BC1}. \begin{lemma}\cite{Alexeev} We have the following isomorphism. $$ 2K_X \simeq \pi^*(-K_Y)$$ \end{lemma} Let $D$ be a irreducible reduced curve on $Y$ such that $\pi^*D$ does not split. Let $\widetilde{D}$ be the reduced component of $\pi^*D.$ When $D$ is a component of the branch locus of $\pi$ then we have $\pi^*D=2\widetilde{D}$ and we can compute intersection number of $K_X$ and $\widetilde{D}$ as follows. $$ K_X \cdot \widetilde{D} = \frac{1}{4} \cdot \pi^*(-K_Y) \cdot \pi^*D = (-K_Y) \cdot D$$ $$ \widetilde{D}^2 = \frac{1}{4} \cdot \pi^*D \cdot \pi^*D = D^2 $$ When $D$ is not a component of the branch locus of $\pi$ then we have $\pi^*D=\widetilde{D}$ and we can compute intersection number of $K_X$ and $\widetilde{D}$ as follows. $$ K_X \cdot \widetilde{D} = \frac{1}{2} \cdot \pi^*(-K_Y) \cdot \pi^*D = 2 \cdot (-K_Y) \cdot D$$ $$ \widetilde{D}^2 = (\pi^*D)^2 = 4D^2 $$ \subsubsection{Burniat surfaces with $K^2=6$} Burniat surfaces with $K^2=6$ are called primary Burniat surfaces. Let $p_1,p_2,p_3 \in \mathbb{P}^2$ be three points in general position. Then $Y$ is a blowup of these three points on $\mathbb{P}^2.$ Then $Y$ has three exceptional curves and three strict transformations of $\overline{p_ip_j}$ where $i, j \in \{ 1,2,3 \}$ and $i \neq j.$ These six curves are only $(-1)$-curves on $Y.$ \begin{proposition} The effective cone of $Y$ is the rational polyhedral cone generated by these six $(-1)$-curves. \end{proposition} We have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=6$ constructed as a bidouble covering over $Y.$ Then $X$ is a Mori dream surface and the effective cone is the rational polyhedral cone which is the pull-back of the effective cone of $Y.$ The six negative curves are elliptic curves whose self-intersection numbers are all $-1.$ \end{proposition} \subsubsection{Burniat surfaces with $K^2=5$} Burniat surfaces with $K^2=5$ are called secondary Burniat surfaces. Let $p_1,p_2,p_3,p_4 \in \mathbb{P}^2$ be four points in general position. Then $Y$ is a blow-up of these four points on $\mathbb{P}^2.$ Then $Y$ has four exceptional curves and six strict transformations of $\overline{p_ip_j}$ where $i, j \in \{ 1,2,3,4 \}$ and $i \neq j.$ These ten curves are only $(-1)$-curves on $Y.$ \begin{proposition} The effective cone of $Y$ is the polyhedral cone generated by the above ten $(-1)$-curves. \end{proposition} Then we have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=5$ constructed as a bidouble covering over $Y.$ Then $X$ is a Mori dream surface and the effective cone is the rational polyhedral cone which is the pull-back of the effective cone of $Y.$ The ten negative curves are nine elliptic curves with self-intersection numbers are all $-1$ and one negative curve with self-intersection $-4$ and arithmetic genus $0.$ \end{proposition} \begin{proof} The ten $(-1)$-curves on $Y$ lie on the branch locus of $\pi : X \to Y$ except exceptional divisor over $p_4.$ The pull-back of the exceptional divisor over $p_4$ is a negative curve with self-intersection $-4$ and arithmetic genus $0.$ The pull-back of other nine $(-1)$-curves are smooth elliptic curve with self-intersection number $-1$ since they lie on the branch locus of $\pi.$ \end{proof} \subsubsection{Burniat surfaces with $K^2=4$} Bauer and Catanese proved that there are two types of families of Burniat surfaces with $K^2=4$ (nodal and non-nodal types). See \cite{BC1} for more details. \\ Let us first consider non-nodal cases. From \cite{BC1} we see that $Y$ is a del Pezzo surface. Let $p_1,p_2,p_3,p_4,p_5 \in \mathbb{P}^2$ be five points in general position. Then $Y$ is a blowup of these five points on $\mathbb{P}^2.$ Then $Y$ has five exceptional divisors and ten strict transformations of $\overline{p_ip_j}$ where $i, j \in \{ 1,2,3,4,5 \}$ and $i \neq j.$ There is a unique conic passing through all $p_1,p_2,p_3,p_4,p_5$ and its strict transform gives one more $(-1)$-curve on $Y.$ These sixteen curves are only $(-1)$-curves on $Y.$ Therefore we have the following. \begin{proposition} The effective cone of $Y$ is the polyhedral cone generated by sixteen $(-1)$-curves. \end{proposition} Then we have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=4$ of non-nodal type constructed as above. Then $X$ is a Mori dream surface and the effective cone is the rational polyhedral cone which is the pull-back of the effective cone of $Y.$ There are twelve elliptic curves whose self-intersections are $-1$ and four curves with self-intersections $-4$ and arithmetic genus $0.$ \end{proposition} \begin{proof} The strict transforms of nine lines and three exceptional divisors over $p_1, p_2, p_3$ are components of the branch locus of $\pi.$ Therefore their reduced pull-backs are elliptic curves with self-intersection $-1.$ The strict transform of the line $\overline{p_4p_5}$ and exceptional divisors over $p_4, p_5$ are not components of the branch locus of $\pi.$ Therefore their reduced pull-backs are curves with self-intersection $-4$ and arithmetic genus $0.$ Similarly, the strict transform of the the unique conic passing through $\{ p_1,p_2,p_3,p_4,p_5 \}$ is not a component of the branch locus of $\pi.$ Therefore its reduced pull-back is a curve with self-intersection is $-4$ and arithmetic genus $0.$ \end{proof} Now let us consider the nodal case. From \cite{BC1, Dolgachev} we see that $Y$ is a weak del Pezzo surface whose anticanonical model has a unique $A_1$ singularity. Therefore there is a unique $(-2)$-curve on $Y$ which is the strict transform of the line passing through $p_1,p_4,p_5.$ Moreover we can see that there is no conic passing through all $p_1,p_2,p_3,p_4,p_5$ as follows. \begin{lemma} There is no curve in $|2H-E_1-E_2-E_3-E_4-E_5|.$ \end{lemma} \begin{proof} Suppose that there is a such curve and let $C$ be the image of the curve in $\mathbb{P}^2.$ From the configuration we see that there is a line passing through three points among these five points. Then $C$ and the line meet at three points and this contradicts to Bezout's theorem. \end{proof} There are seven $(-1)$-curves which are strict transforms of lines passing through only two points of $\{ p_1,p_2,p_3,p_4,p_5 \}.$ There are five exceptional divisors on $Y.$ Therefore we can describe the effective cone of $Y$ as follows. \begin{proposition} The effective cone of $Y$ is the polyhedral cone generated by above thirteen $(-1)$-curves and the unique $(-2)$-curve. \end{proposition} Then we have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=4$ of nodal type constructed as above. Then $X$ is a Mori dream surface and the effective cone is the pull-back of the effective cones of $Y.$ There are ten elliptic curves whose self-intersections are $-1,$ a curve with self-intersection is $-2$ and arithmetic genus $0$ and two curves with self-intersection is $-4$ and arithmetic genus $0.$ \end{proposition} \begin{proof} The exceptional divisors over $p_1, p_2, p_3$ and the strict transforms of seven lines passing through two points among $\{p_1,\cdots,p_5\}$ are components of the branch locus of $\pi.$ Therefore their reduced pull-backs are elliptic curves with self-intersection $-1.$ The strict transform of the line $\overline{p_1p_4p_5}$ is also a component of the branch locus of $\pi.$ Therefore its reduced pull-back is a curve with self-intersection is $-2$ and arithmetic genus $0.$ The exceptional divisors over $p_4, p_5$ are not branched locus of $\pi$ and their reduced pull-backs are curves with self-intersection $-4$ and arithmetic genus $0.$ Therefore we have the desired result. \end{proof} \subsubsection{Burniat surfaces with $K^2=3$} From Theorem \ref{wdP}, we see that $Y$ has a morphism $|-K_Y| : Y \to \mathbb{P}^3$ where the image $\overline{Y}$ is a cubic surface on $\mathbb{P}^3.$ It is known that $\overline{Y}$ has $3A_1$ singularities. Therefore $Y$ has three $(-2)$-curves which are strict transforms of lines passing through three ponts among $p_1,p_2,p_3,p_4,p_5,p_6.$ Again we can see that there is no conic passing through five points among $p_1,p_2,p_3,p_4,p_5,p_6$ as follows. \begin{lemma} There is no curve in $|2H-E_1-E_2-E_3-E_4-E_5|.$ \end{lemma} \begin{proof} Suppose that there is a such curve and let $C$ be the image of the curve in $\mathbb{P}^2.$ For any set of five points in $\{p_1,\cdots,p_6\},$ there is a line passing through three points among these five points. Then $C$ and the line meet at three points and this contradicts to Bezout's theorem. \end{proof} There are six exceptional divisors and six $(-1)$ curves which are strict transforms of lines passing through only two points among $\{ p_1,p_2,p_3,p_4,p_5 \}.$ Therefore we can describe the effective cone of $Y$ as follows. \begin{proposition} The effective cone of $Y$ is the polyhedral cone generated by the above twelve $(-1)$-cuves and three $(-2)$-curves. \end{proposition} Then we have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=3$ constructed as a bidouble covering over $Y.$ Then $X$ is a Mori dream surface and the effective cone is the rational polyhedral cone which is the pull-back of the effective cone of $Y.$ There are nine elliptic curves whose self-intersections are $-1,$ three curves with self-intersection is $-2$ and arithmetic genus $0$ and three curves with self-intersection is $-4$ and arithmetic genus $0.$ \end{proposition} \begin{proof} The exceptional divisors over $p_1, p_2, p_3$ and the strict transforms of six lines passing through two points among $\{p_1,\cdots,p_6\}$ are components of the branch locus of $\pi.$ Therefore their reduced pull-backs are elliptic curves with self-intersection $-1.$ The strict transform of the line $\overline{p_1p_4p_5}, \overline{p_2p_4p_6}, \overline{p_3p_5p_6}$ are also components of the branch locus of $\pi.$ Therefore its reduced pull-backs are curves with self-intersection is $-2$ and arithmetic genus $0.$ The exceptional divisors over $p_4, p_5, p_6$ are not branched locus of $\pi$ and their reduced pull-backs are curves with self-intersection $-4$ and arithmetic genus $0.$ Therefore we have the desired result. \end{proof} \subsubsection{Burniat surfaces with $K^2=2$} Because $Y$ is a weak del Pezzo surface of degree 2. From Theorem \ref{wdP}, we see that $Y$ has a two-to-one map $Y \to \mathbb{P}^2$ which factors $Y \to \overline{Y} \to \mathbb{P}^2.$ From \cite{BC1} we see that the branch locus is union of four lines in general position. It is known that $Y$ has only nodal singularities. From \cite{BC1, Dolgachev} we see that $\overline{Y}$ has $6A_1$ singularities and $Y$ has only six $(-2)$-curves. From the configuration, $Y$ has seven exceptional curves and strict transforms of $\overline{p_1p_2}, \overline{p_1p_3}, \overline{p_2p_3}.$ We can check that these sixteen curves are only negative curves on $Y$ from the followings. \begin{lemma} There is no curve in $|2H-E_1-E_2-E_3-E_4-E_5|.$ \end{lemma} \begin{proof} Suppose that there is a such curve and let $C$ be the image of the curve in $\mathbb{P}^2.$ For five points in $\{p_1,\cdots,p_7\},$ there is a line passing through three points among these five points. Then $C$ and the line meet at three points and this contradicts to Bezout's theorem. \end{proof} \begin{lemma} There is no curve in $|3H-2E_1-E_2-E_3-E_4-E_5-E_6-E_7|.$ \end{lemma} \begin{proof} Suppose that there is a such curve. Because there is a line passing though all of $p_1,p_6,p_7,$ we see that there is a curve in $|H-E_1-E_6-E_7|.$ Because $(3H-2E_1-E_2-E_3-E_4-E_5-E_6-E_7)(H-E_1-E_2-E_3)=-1$ and the element in $|H-E_1-E_6-E_7|$ is irreducible, we see that there is an element in $|2H-E_1-E_2-E_3-E_4-E_5|.$ However this conclusion contradicts to the previous lemma. \end{proof} Using similar argument, we see that there are only three $(-1)$-curves on $Y$ which are strict transforms of $\overline{p_1p_2}, \overline{p_1p_3}, \overline{p_2p_3}.$ \begin{proposition} The effective cone of $Y$ is the polyhedral cone generated by the above sixteen negative curves. \end{proposition} Therefore we have the following conclusion. \begin{proposition} Let $X$ be a Burniat surface with $K^2=2$ constructed as a bidouble covering over $Y.$ Then $X$ is a Mori dream surface and the effective cone is the rational polyhedral cone which is the pull-back of the effective cone of $Y.$ There are six elliptic curves whose self-intersections are $-1,$ four curves with self-intersection is $-4$ and arithmetic genus $0$ and six curves with self-intersection $-2$ and arithmetic genus $0.$ \end{proposition} \begin{proof} The strict transforms of three $(-1)$-curves $\overline{p_1p_2}, \overline{p_1p_3}, \overline{p_2p_3}$ are components of the branch locus of $\pi.$ Therefore their reduced pull-backs are elliptic curves with self-intersection $-1.$ Among the seven exceptional divisors on $Y,$ three of them lie on the branch locus of $\pi$ and hence their reduced pull-backs are elliptic curves whose self-intersection numbers are all $-1.$ Four of the exceptional divisors do not lie on the branch locus of $\pi$ and hence their reduced pull-backs are curves with self-intersection number $-4$ and arithmetic genus $0$ curves. There are six $(-2)$-curves on $Y$ which lie on the branch locus and hence their reduced pull-backs are curves with self-intersection $-2$ and arithmetic genus $0.$ Therefore we obtain the conclusion. \end{proof} \subsection{Kulikov surfaces} Kulikov surfaces are $(\mathbb{Z}/3\mathbb{Z})^2$-covering of del Pezzo surfaces with degree 6. Because both surfaces have Picard rank 4, we see that our criterion works for these surfaces. Let $X$ be a Kulikov surface and $\pi : X \to Y$ be the quotient map of the $(\mathbb{Z}/3\mathbb{Z})^2$-covering. In this case, the pull-back $\pi^* : Pic(X) _{\mathbb{R}} \to Pic(Y) _{\mathbb{R}}$ is an isomorphism and we see that $X$ is a Mori dream space. See \cite{Coughlan} for more details. \begin{lemma}\cite{Coughlan} We have the following numerical equivalence. $$ 3K_X \sim_{num} \pi^*(-K_Y) $$ \end{lemma} Therefore we obtain the following conclusion. \begin{proposition} Let $X$ be a Kulikov surface constructed as above. Then $X$ is a Mori dream surface and the effective cone and nef cone of $X$ are pull-back of those of $Y.$ The six negative curves on $X$ are elliptic curves with self-intersection $-1.$ \end{proposition} \begin{proof} From Proposition \ref{criterion} and construction, it is straightforward that $X$ is a Mori dream surface since $Y$ is a del Pezzo surface of degree 6. Therefore $Eff(Y)$ is generated by six $(-1)$-curves where three of them are exceptional divisors $E_1,E_2,E_3$ and three of them $L_{1},L_{2},L_{3}$ are strict transforms of lines passing through two points among the three blow-up centers. Therefore $Eff(X)$ is a rational polyhedral cone generated by pullbacks of the six $(-1)$-curves on $Y.$ Now let us compute intersection numbers. Let $\widetilde{E_i}$ be the reduced pull-back of $E_i$ and $\widetilde{L_{i}}$ be the reduced pull-back of $L_{i}$ for $i=1,2,3.$ We have $K_Y \cdot E_i=-1$ and $K_Y \cdot L_i=-1$ since they are $(-1)$-curves. They are belong to the branch locus of $\pi.$ Therefore we have the following identities $$ K_X \cdot \widetilde{E_i} = \frac{1}{9} \pi^*(-K_Y) \cdot \pi^*{E_i} = (-K_Y) \cdot {E_i} = 1 $$ $$ K_X \cdot \widetilde{L_i} = \frac{1}{9} \pi^*(-K_Y) \cdot \pi^*{L_i} = (-K_Y) \cdot {L_i} = 1 $$ $$ \widetilde{E_i} \cdot \widetilde{E_i} = \frac{1}{9} \pi^*{E_i} \cdot \pi^*{E_i} = {E_i}^2 = -1 $$ $$ \widetilde{L_i} \cdot \widetilde{L_i} = \frac{1}{9} \pi^*{L_i} \cdot \pi^*{L_i} = {L_i}^2 = -1 $$ for all $i \in \{1,2,3 \}.$ Therefore we obtain the desired result. \end{proof} \section{Product-quotient surfaces} Product-quotient surfaces form an important classes of algebraic surfaces and provide many examples of surfaces of general type with $p_g=0.$ Bauer, Catanese, Grunewald and Pignatelli classified minimal product-quotient surfaces with $p_g=0$ in \cite{BCGP, BP}. In this section, we study effective, nef and semiample cones of some product-quotient surfaces with $p_g=0.$ From this we prove that several product-quotient surfaces with $p_g=0$ are Mori dream surfaces. \subsection{general properties} Let us recall basic definition and results about product-quotient surfaces. \begin{definition}\cite{BCGP}\label{product-quotient surface} Let $G$ be a finite group and $C$, $D$ be algebraic curves with faithful $G$-action. Consider the diagonal action of $G$ on $C \times D.$ An algebraic surface $X$ which is isomorphic to the minimal resolution of $(C \times D)/G$ is called a product-quotient surface and $(C \times D)/G$ is called the quotient model of $X.$ \end{definition} \begin{displaymath} \xymatrix{ X \ar[rd] & & \ar[ld] C \times D \\ & (C \times D)/G & } \end{displaymath} As we proved, product-quotient surfaces with $p_g=q=0, K^2=8$ are Mori dream spaces and we are going to find more product-quotient surfaces which are Mori dream spaces. Product-quotient surfaces with $p_g=q=0$ can be studied via many ways. Let $X$ be a product-quotient surface with $p_g=q=0,$ i.e. minimal resolution of $(C \times D)/G.$ Then we have the following diagram. \begin{displaymath} \xymatrix{ X \ar[rd] & \ar[ld] C \times D \ar[d] \ar[rd] & \\ C \ar[d] & \ar[ld] (C \times D)/G \ar[rd] & \ar[d] D \\ C/G \cong \mathbb{P}^1 & & D/G \cong \mathbb{P}^1 } \end{displaymath} Therefore there are two natural fibration maps to projective lines and many geometric information can be extracted from group action of the product of curves. Sometimes, we can compute effective, nef and semiample cones of product-quotient surfaces via these fibration structures. In particular, we can compute the ($\mathbb{Q}$-)basis of Picard groups of the product-quotient surfaces from the two fibration structures. When the $G$-action on $C \times D$ is free(or equivalently $K^2=8$), it is easy to see that the Picard lattice is the hyperbolic plane $H.$ Therefore from now on we study $K^2 \leq 6$ cases. The fibers of two fibrations to projective lines were studied by Serrano in \cite{Serrano}. Let us recall results in \cite{Serrano}. \begin{theorem}\cite[Theorem 2.1]{Serrano} Let $c$ in a point of $C$ and $\bar{c}$ be its image of $C \to C/G.$ \\ (1) The reduced fiber of $X \to C/G$ over $\bar{c}$ is the union of an irreducible smooth curve $F_1$, called the central component, and either none or at least two mutually disjoint Hirzebruch-Jung strings, each one meeting the central component at one point. These stings are in one-to-one correspondence with the branch points of $D \to D/G_c$ where $G_c$ is the stabilizer group of $c \in C.$ \\ (2) The intersection of a Hirzebruch-Jung string with $F$ is transversal and takes place at only one of the end components of the string. \\ (3) $F_1$ is isomorphic to $D/G_c$ and has multiplicity equal to $|G_c|$ in the fiber. \\ (4) Let $E=E_1+\cdots+E_k$ be an Hirzebruch-Jung string ordered linearly in the fiber over $\bar{c}$ and consider its image $\bar{d}$ of the another fibration $X \to D/G.$ Let $G_1$ be the central component of the fiber of $X \to D/G$ over $\bar{d}.$ Then $E$ meets $F_1$ and $G_1$ at opposite ends. \end{theorem} The self-intersection of strict transform of the reduced fiber was computed by Polizzi in \cite{Polizzi2}. Let us recall a lemma which is very useful to compute the Picard lattice. \begin{proposition}\cite[Proposition 2.8]{Polizzi2} \cite[Lemma 5.3]{BCGP} Let $F$ be a reduced fiber of $(C \times D)/G \to C/G$ as a Weil divisor and let $\widetilde{F}$ be its strict transform in $X.$ Suppose that each singular point $x_i \in (C \times D)/G$ on $F$ is of type $\frac{1}{n_i}(1,k_i).$ Then we have the following identity. $$ -\widetilde{F}^2=\sum_{x_i \in F} \frac{k_i}{n_i}. $$ \end{proposition} When we compute the effective and nef cone of a product-quotient surface, our next task is to compare its semiample cone. Usually it is a very hard task. To prove some divisors are semiample, we construct explicit automorphism of some product-quotient surfaces. It seems that these automorphisms will have further applications. Let us consider the following situation. Let $C, D, G$ as above and suppose that the center of $G$ is a nontrivial group containing a nontrivial subgroup $Z.$ From the following isomorphism $$ G \times Z \cong \{ (g,gz) \in G \times G ~ | ~ g \in G, z \in Z \} \leq G \times G $$ we can see $G \times Z$ as a subgroup of $G \times G$ containing $\Delta{G}.$ Then there is a natrual action of $G \times Z$ on $C \times D$ as follows. $$ (G \times Z) \times (C \times D) \to (C \times D) $$ $$ (g,gz) \cdot (c,d)=(gc,gzd) $$ where $g \in G, z \in Z, c \in C, d \in D.$ We can also consider a natrual action of $G \times Z$ on $C \times D$ as follows $$ (G \times Z) \times C \to C $$ $$ (g,gz) \cdot c = gc $$ and a natrual action of $G \times Z$ on $D$ as follows. $$ (G \times Z) \times D \to D $$ $$ (g,gz) \cdot d = gzd. $$ It is easy to check that the projection maps $(C \times D) \to C$ and $(C \times D) \to D$ are $G \times Z$-equivariant and we have the following commutative diagram. \begin{displaymath} \xymatrix{ X \ar[rd] & \ar[ld] C \times D \ar[d] \ar[rd] & \\ C \ar[d] & \ar[ld] (C \times D)/G \ar[rd] \ar[d] & \ar[d] D \\ C/G \cong \mathbb{P}^1 \ar[d] & (C \times D)/(G \times Z) \ar[ld] \ar[rd] & D/G \cong \mathbb{P}^1 \ar[d] \\ C/(G \times Z) \cong \mathbb{P}^1 & & D/(G \times Z) \cong \mathbb{P}^1} \end{displaymath} Note that the $Z$-actions on $C/G$ and $D/G$ are trivial and hence the $Z$-action on $(C \times D)/G$ preserve fibers of $(C \times D)/G \to C/G$ and $(C \times D)/G \to D/G.$ Suppose that the $G \times Z$-action on $C \times D$ induces a $Z$-action on $X.$ Let $L$ be a $Z$-invariant divisor on $X.$ Then some multiples of $L$ are the pullback of some divisors on $X/Z$ and suppose that one of such divisor is semiample on $X/Z.$ Then we see that $L$ is also semiample. Using this method, we can check some nef divisors become semiample. \\ \subsection{Product-quotient surfaces : $K^2=6,$ $G=D_4 \times \mathbb{Z}/2\mathbb{Z}$ case} Let $X$ be a product-quotient surface with $p_g=q=0$ and $K^2=6.$ An explicit description of such surfaces was provided in \cite{BCGP}. Let us recall the following diagram. \begin{displaymath} \xymatrix{ X \ar[rd] & & \ar[ld] C \times D \\ & (C \times D)/G & } \end{displaymath} In this case, $C$ is a curve with genus 3 and $D$ is a curve with genus 7 with $G$-action. From \cite{BCGP} we see that there are two singular points of type $2 \times \frac{1}{2}(1,1)$ on $(C \times D)/G$ and the $G$-action is encoded in the following data $t_1 : (2,2,2,2,4),$ $S_1 : (56), (56), (12)(34)(56),$ $(13)(56), (1432)$ and $t_2 : (2,2,2,4),$ $S_2 : (24), (14)(23),$ $(13)(24)(56), (1432)(56)$ where $G=\langle (1234), (12)(34), (56) \rangle \leq S_6.$ \\ We can describe the effective cones and nef cones of $X$ explicitly. Let $E_1,E_2$ be the 2 exceptional divisors in $X$ and let $F_1$ be the reduced fiber of one fibration to $\mathbb{P}^1$ meeting $E_1,E_2$ and let $G_1$ be the reduced fiber of another fibration to $\mathbb{P}^1$ meeting $E_1,E_2.$ Then one can check that $F_1$ is the reduced fiber corresponds to the element $(1432).$ Let $F_2$(resp, $F_3,F_4,F_5$) be the reduced fiber corresponds to the element $(56)$(resp. $(56),(12)(34)(56),(13)(56)$). Similarly one can check that $G_1$ is the reduced fiber corresponds to the element $(1432)(56).$ Let $G_2$(resp, $G_3,G_4$) be the reduced fiber corresponds to the element $(24)$(resp. $(14)(23), (13)(24)(56)$). It is easy to see that $E_1,E_2,F_1,G_1$ form a basis of $Pic(X) _{\mathbb{R}}.$ We can compute intersections between these curves from the results of Serrano and Polizzi as follows. \\ \begin{center} \begin{tabular}{|c|c|c|c|c|} \hline $\cdot$ & $E_1$ & $E_2$ & $F_1$ & $G_1$ \\ \hline $E_1$ & -2 & 0 & 1 & 1 \\ \hline $E_2$ & 0 & -2 & 1 & 1\\ \hline $F_1$ & 1 & 1 & -1 & 0 \\ \hline $G_1$ & 1 & 1 & 0 & -1 \\ \hline \end{tabular} \end{center} From the above intersection matrix we have the following isomorphism. \begin{lemma} We have the following isomorphism. $$ K_X \sim_{num} 2E_1+2E_2+3F_1+G_1 $$ \end{lemma} \begin{proof} It follows from adjunction formula. \end{proof} Moreover we can compute the effective cone of $X.$ \begin{lemma} The effective cone of $X$ is a rational polyhedral cone generated by $E_1,E_2,F_1,G_1.$ \end{lemma} \begin{proof} One can check that $F_1+E_1+G_1,$ $F_1+E_2+G_1,$ $E_1+E_2+2F_1,$ $E_1+E_2+2G_1$ are nef divisors because they are effective divisors whose intersection with any of their component is nonnegative. Let $e_1E_1+e_2E_2+f_1F_1+g_1G_1$ be an element in $Eff(X).$ Intersecting this divisor with the above nef divisors, one can check that $e_1,e_2,f_1,g_1 \geq 0.$ Therefore we see that $Eff(X)$ is a rational polyhedral cone generated by $E_1,E_2,F_1,G_1.$ \end{proof} From the previous lemma, we can also compute $Nef(X).$ \begin{lemma} The nef cone of $X$ is a rational polyhedral cone generated by $F_1+E_1+G,$ $F+E_2+G_1,$ $E_1+E_2+2F_1,$ $E_1+E_2+2G_1.$ \end{lemma} \begin{proof} We know that the effective cone of $X$ is a rational polyhedral cone generated by $E_1,E_2,F_1,G_1.$ Because the nef cone is the dual polyhedral cone of it, we get the desired result by direct computation. \end{proof} To prove that the nef divisors are semiample let us consider involutions on $X.$ It is obvious that $Z=\langle (13)(24), (56) \rangle \leq G=\langle (1234), (12)(34), (56) \rangle \leq S_6$ is a center of $G.$ Therefore we see that there are commuting involutions on $X.$ From these involutions we have the following. \begin{lemma} The 1-dimensional ramification locus of $Z$-action is $F_1+F_2+F_3+G_1+G_4+E_1+E_2.$ \end{lemma} \begin{proof} We can directly compute the fixed locus of each action from the group theoretic data. \end{proof} \begin{lemma} The Picard number of $X/Z$ is 4. \end{lemma} \begin{proof} Note that $F_1$ is isomorphic to $C/\langle (1432) \rangle$ and $G_1$ is isomorphic to $D/\langle (1432)(56) \rangle.$ Then from group theoretic data and the construction of the $\langle (56) \rangle$-action we see that the involution does not change the exceptional locus. Similarly, $\langle (13)(24) \rangle$-action does not change the exceptional locus. Because both actions preserve fibration structure, we see that the Picard number of $X/Z$ is 4. \end{proof} Therefore we obtain the desired result. \begin{theorem} Let $X$ be a product-quotient surface with $p_g=q=0$ and $K^2=6,$ $G=D_4 \times \mathbb{Z}/2\mathbb{Z}.$ Then $X$ is a Mori dream space. \end{theorem} \begin{proof} From the above discussion we see that $K_X$ is numerically equivalent to $2E_1+2E_2+3F_1+G_1.$ Then using ramification formula we see that the pull-back of the anticanonical divisor of $X/Z$ is numerically equivalent to $2E_1+2E_2+2F_1+2G_1.$ Because we know $Eff(X)$ we can check that it is nef and big divisor. Therefore we see that $X/Z$ is a Mori dream surface with Picard number 4. Therefore we see that $X$ is a Mori dream surface from the Proposition \ref{criterion}. \end{proof} \subsection{Keum-Naie surfaces : product-quotient surfaces with $K^2=4,$ $G=\mathbb{Z}/4\mathbb{Z} \times \mathbb{Z}/2\mathbb{Z}$ case} Let $X$ be a product-quotient surface with $p_g=q=0, K^2=4$ and $G=\mathbb{Z}/4\mathbb{Z} \times \mathbb{Z}/2\mathbb{Z}.$ Note that these surfaces form a 2-dimensional subfamily of 6-dimensional Keum-Naie surfaces with $K^2=4.$ See \cite{BC2} for more details. An explicit description of such surfaces was provided in \cite{BCGP}. In this case, $C$ and $D$ are curves of genus 3 with $G$-action and the $G$-action is encoded in the following data $t_1 : (2,2,4,4),$ $S_1 : (2,1),(2,1),(3,1),(1,1)$ and $t_2 : (2,2,4,4),$ $S_2 : (0,1),(0,1),(3,0),(1,0).$ There are four singular points of type $4 \times \frac{1}{2}(1,1)$ on $(C \times D)/G$ and see \cite{BCGP} for more details. \\ From the results of Serrano and Polizzi and the above date we can compute the fibration structures of $X.$ Let $E_1,E_2,E_3,E_4$ be the 4 exceptional divisors in $X$ ordered counterclockwise and let $F_1$(resp. $F_2$) be the reduced fiber of one fibration to $\mathbb{P}^1$ meeting $E_1,E_2$(resp. $E_3,E_4$) and let $G_1$(resp. $G_2$) be the reduced fiber of another fibration to $\mathbb{P}^1$ meeting $E_2,E_3$(resp. $E_4,E_1$). We can compute intersections between these curves as follows. \\ \begin{center} \begin{tabular}{|c|c|c|c|c|c|c|c|c|} \hline $\cdot$ & $E_1$ & $E_2$ & $E_3$ & $E_4$ & $F_1$ & $F_2$ & $G_1$ & $G_2$ \\ \hline $E_1$ & -2 & 0 & 0 & 0 & 1 & 0 & 0 & 1 \\ \hline $E_2$ & 0 & -2 & 0 & 0 & 1 & 0 & 1 & 0 \\ \hline $E_3$ & 0 & 0 & -2 & 0 & 0 & 1 & 1 & 0 \\ \hline $E_4$ & 0 & 0 & 0 & -2 & 0 & 1 & 0 & 1 \\ \hline $F_1$ & 1 & 1 & 0 & 0 & -1 & 0 & 0 & 0 \\ \hline $F_2$ & 0 & 0 & 1 & 1 & 0 & -1 & 0 & 0 \\ \hline $G_1$ & 0 & 1 & 1 & 0 & 0 & 0 & -1 & 0 \\ \hline $G_2$ & 1 & 0 & 0 & 1 & 0 & 0 & 0 & -1 \\ \hline \end{tabular} \end{center} From the above intersection matrix we can find a basis of $Pic(X).$ \begin{lemma} $F_1,E_1,G_2,E_4,F_2,E_3$ form a $\mathbb{Z}$-basis of $Pic(X)/tors.$ \end{lemma} \begin{proof} One can compute the intersection matrix of $F_1,E_1,G_2,E_4,F_2,E_3.$ The determinant of the above matrix is $-1.$ Therefore we get the desired result. \end{proof} Now let us compute the canonical bundle. \begin{lemma} We have the following isomorphism. $$ K_X \sim_{num} E_1+2G_2+2E_2+2F_2+E_3 $$ \end{lemma} \begin{proof} From the above lemma we have the following numerical equivalence relation. $$ K_X \sim_{num} f_1F_1+e_1E_1+g_2G_2+e_4E_4+f_2F_2+e_3E_3 $$ Then we have $f_1=f_1+g_2-2e_1=g_2+f_2-2e_4=f_2-2e_3=0$ and $-f_1+e_1=e_1+e_4-g_2=e_3+e_4-f_2=e_3=1.$ Therefore we have $f_1=0, e_1=1, g_2=2, e_4=2, f_2=2, e_3=1.$ \end{proof} The following numerical equivalences will play an important role. \begin{lemma} We have the following numerical equivalences. $$ 2F_1+E_1+E_2 \sim_{num} 2F_2+E_3+E_4 $$ $$ 2G_1+E_2+E_3 \sim_{num} 2G_2+E_1+E_4 $$ $$ F_1+E_1+G_2 \sim_{num} F_2+E_3+G_1 $$ $$ F_1+E_2+G_1 \sim_{num} F_2+E_4+G_2 $$ \end{lemma} \begin{proof} One can directly check the above numerical equivalences via intersecting $F_1,E_1,G_2,E_4,F_2,E_3$ which form a $\mathbb{Z}$-basis of $Pic(X)/tors.$ \end{proof} We can compute the effective cone of $X$ as follows. \begin{proposition} Effective cone of $X$ is generated by $E_1,E_2,E_3,E_4,F_1,F_2,G_1,G_2.$ \end{proposition} \begin{proof} Let $D$ be an irreducible integral effective divisor on $X$ which lies on an extremal ray of $Eff(X).$ Suppose that $D$ is does not lie on the convex cone generated by $E_1,E_2,E_3,E_4,F_1,F_2,G_1,G_2.$ Then intersection of $D$ with any divisor among the $E_1,E_2,E_3,E_4,F_1,F_2,G_1,G_2$ is nonnegative. Let us write $D \sim e_1E_1+e_2E_2+e_4E_4+f_1F_1+f_2F_2+g_2G_2. $ Then we have $f_1 \geq 0$ and $e_3 \geq 0.$ Intersecting $D$ with $F_1$ gives us $-f_1+e_1 \geq 0.$ Therefore we see that $e_1 \geq 0.$ Intersecting $D$ with $E_3$ gives us $-2e_3+f_2 \geq 0.$ Therefore we see that $f_2 \geq 0.$ Because $F_1+E_1+G_2$ is nef, we see that $e_4 \geq 0.$ Because $E_4+2F_2+E_3$ is nef, we see that $g_2 \geq 0.$ Then $D$ is a linear combination of $F_1,E_1,G_2,E_4,F_2,E_3$ with nonnegative coefficient which gives a contradiction. Therefore we get the desired result. \end{proof} Therefore we see that $Eff(X)$ is a rational polyhedral cone generated by $E_1,E_2,E_3,E_4,$ $F_1,F_2,G_1,G_2.$ Then from the general facts of convex geometry we can find generators of $Nef(X)$ explicitly. Let us recall the process to find the generators of $Nef(X)$ described in \cite{Fulton2}. Let $A$ be a rational polyhedral cone in a real vector space $V$ of dimension $\rho.$ For every set of $\rho-1$ linearly independent vectors among generators of $A,$ consider a nonzero vector $w$ annihilating the set. If either $w$ or $-w$ is nonnegative for all generators of $A,$ then take $w$ as one of the generators of $A^\vee.$ From this process we can compute generators of $Nef(X).$ \\ \begin{proposition} The generators of $Nef(X)$ are semiample and therefore $Nef(X)=SAmp(X).$ \end{proposition} \begin{proof} From the above process, we can describe all generators of $Nef(X).$ The above proposition tells us that $Eff(X)$ has eight extremal rays and hence we have 56 sets of five elements among the generators. Because the configuration of curves $E_1,E_2,E_3,E_4,F_1,F_2,G_1,G_2$ has symmetry (numerically), it is enough to check 10 configurations(it is easy to see this when we see the complement of the five elements from $\{ E_1,E_2,E_3,E_4,F_1,F_2,G_1,G_2 \}$) among the 56 sets. Let us check that $Nef(X)=SAmp(X)$ as follows. \\ (1) Consider the set $\{ F_1, E_1, G_2, E_4, F_2 \}.$ Then $F_1+E_1+G_2-F_2-E_3$ is a nonzero element which is orthogonal to all of them. However it is not multiple of a nef divisor since $(F_1+E_1+G_2-F_2-E_3) \cdot G_1 < 0$ and $(F_1+E_1+G_2-F_2-E_3) \cdot E_2 > 0.$ \\ (2) Consider the set $\{ E_1, G_2, E_4, F_2, E_3 \}.$ Then $-2F_1-E_1+E_4+2F_2+E_3$ is a nonzero element which is orthogonal to all of them. However it is not multiple of a nef divisor since $(-2F_1-E_1+E_4+2F_2+E_3) \cdot E_2 < 0$ and $(-2F_1-E_1+E_4+2F_2+E_3) \cdot G_1 > 0.$ \\ (3) Consider the set $\{ E_1, G_2, E_4, F_2, G_1 \}.$ Then $-F_1+G_2+E_4+F_2$ is a nonzero element which is orthogonal to all of them. However it is not multiple of a nef divisor since $(-F_1+G_2+E_4+F_2) \cdot E_2 < 0$ and $(-F_1+G_2+E_4+F_2) \cdot E_3 > 0.$ \\ (4) Consider the set $\{ E_1, G_2, E_4, E_3, G_1 \}.$ Then $E_1+2G_2+E_4$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $E_1+2G_2+E_4 \sim E_2+2G_1+E_3.$ \\ (5) Consider the set $\{ G_2, E_4, F_2, G_1, E_2 \}.$ Then $G_2+E_4+F_2$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $G_2+E_4+F_2 \sim G_1+E_2+F_1.$ \\ (6) Consider the set $\{ G_2, E_4, F_2, E_3, E_2 \}.$ Then $-E_1+E_4+2F_2+E_3$ is a nonzero element which is orthogonal to all of them. However it is not multiple of a nef divisor since $(-E_1+E_4+2F_2+E_3) \cdot F_1 < 0$ and $(-E_1+E_4+2F_2+E_3) \cdot G_1 > 0.$ \\ (7) Consider the set $\{ E_1, G_2, E_4, E_3, E_2 \}.$ Then $E_1+2G_2+E_4$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $E_1+2G_2+E_4 \sim E_3+2G_1+E_2.$ \\ (8) Consider the set $\{ E_1, E_4, F_2, G_1, E_2 \}.$ Then $E_1+2G_2+2E_4+2F_2$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $E_1+2G_2+2E_4+2F_2 \sim E_1+2F_1+2E_2+2G_1 \sim E_4+2F_2+E_3+2G_1+E_2.$ \\ (9) Consider the set $\{ E_1, G_2, F_2, G_1, E_2 \}.$ Then $E_1+2G_2+E_4+F_2$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $E_1+2G_2+E_4+F_2 \sim F_2+E_3+2G_1+E_2 \sim E_1+G_2+G_1+E_2+F_1.$ \\ (10) Consider the set $\{ G_2, F_2, G_1, E_2, F_1 \}.$ Then $G_2+E_4+F_2$ is a nonzero element which is orthogonal to all of them. It is a nef divisor which is semiample since $G_2+E_4+F_2 \sim G_1+E_2+F_1.$ \\ Therefore we see that $Nef(X)=SAmp(X).$ \end{proof} Therefore we have the following conclusion. \begin{theorem} $X$ is a Mori dream space. \end{theorem} \section{Negative curves and bounded negativity conjecture} Bounded negativity conjecture is one of the oldest problems in the theory of algebraic surfaces. It is still a widely open problem. See \cite{BHKKMSRS} for more details about the conjecture. \begin{conjecture}[Bounded negativity conjecture] Let $X$ be a smooth projective surface. Then there is a nonnegative integer $b_X \geq 0$ such that for any negative curve $C$ the following inequality holds. $$ C^2 \geq -b_X $$ \end{conjecture} It seems to be well-known that bounded negativity conjecture is true for Mori dream surfaces among experts. Indeed, the proof is obvious. \begin{proposition}[Bounded negativity conjecture] Let $X$ be a smooth projective surface whose $Eff(X)$ is a rational polyhedral cone. There are only finitely many negative curves on $X.$ In particular, the bounded negativity conjecture holds for Mori dream surfaces. \end{proposition} \begin{proof} Let $C$ be a negative curve on $X.$ Then $C$ lies on one of the extremal rays of $Eff(X)$ and it is the only irreducible reduced curve on the extremal ray. From our assumption $Eff(X)$ is a rational polyhedral cone. Therefore there are only finitely many negative curves on $X.$ \end{proof} Therefore the surfaces of general type with $p_g=0$ from Theorem \ref{Main} satisfy the bounded negativity conjecture. Moreover, from our explicit computation of $Eff(X)$, we can describe all negative curves as in Theorem \ref{main}. Indeed, when $\rho(X)=1,$ we see that the self-intersection of a curve is always positive. When $X$ is a surfaces isogenous to a higher product of unmixed type, then we see that the intersection matrix is hyperbolic. When $\rho(X) \geq 3,$ $Eff(X)$ is a convex polyhedral cone generated by negative curves since $Eff(X)$ is a rational polyhedral cone. Conversely, every negative curve lies on the extremal rays of $Eff(X).$ Therefore we got the desired results from the previous discussions. \section{Discussions} \subsection{Mimimal surfaces of general type with $p_g \neq 0$} In this paper we have discussed Mori dream surfaces of general type with $p_g = 0.$ However, there is no reason to restrict one's attention to only those surfaces. Indeed, there are lots of Mori dream spaces with $p_g \neq 0.$ A simple example is as follows. \begin{lemma} A hypersurface $X$ in $\mathbb{P}^3$ is a Mori dream space if $\rho(X)=1.$ \end{lemma} \begin{proof} Let $X$ be a degree $d$ hypersurface in $\mathbb{P}^3.$ Then it is enough to prove that $q(X)=0.$ Consider the following short exact sequence. $$ 0 \to \mathcal{O}_{\mathbb{P}^3}(-d) \to \mathcal{O}_{\mathbb{P}^3} \to \mathcal{O}_X \to 0 $$ We know that $H^1(\mathbb{P}^3,\mathcal{O}_{\mathbb{P}^3})=H^2(\mathbb{P}^3,\mathcal{O}_{\mathbb{P}^3}(-d))=0$ and hence $q=0.$ Because $\rho(X)=1, q=0$ we see that $X$ is a Mori dream space. \end{proof} From Noether-Lefschetz theorem, we have the following corollary. \begin{corollary} A very general hypersurface of degree $d \geq 4$ in $\mathbb{P}^3$ is a Mori dream space. \end{corollary} Indeed, we can find more examples as follows. \begin{lemma} A complete intersection variety of dimension greater than or equal to two with $\rho=1$ is a Mori dream space. \end{lemma} \begin{proof} Let $X$ be a complete intersection variety, i.e. a zero locus of a regular section $s$ of $\mathcal{E}$ on $\mathbb{P}^N$ where $\mathcal{E}$ is a direct sum of ample line bundles. Then it is enough to prove that $q(X)=0.$ The Koszul resolution $$ 0 \to \bigwedge^r\mathcal{E}^\vee \to \cdots \to \bigwedge^2\mathcal{E}^\vee \to \mathcal{E}^\vee \to \mathcal{O}_{\mathbb{P}^N} \to \mathcal{O}_X \to 0 $$ splits into short exact sequences $$ 0 \to \mathcal{F}_0=I_X \to \mathcal{O}_{\mathbb{P}^N} \to \mathcal{O}_X \to 0 $$ $$ 0 \to \mathcal{F}_1 \to \mathcal{E}^\vee \to I_X \to 0 $$ $$ 0 \to \mathcal{F}_2 \to \bigwedge^2\mathcal{E}^\vee \to \mathcal{F}_1 \to 0 $$ $$ \cdots $$ $$ 0 \to 0=\mathcal{F}_r \to \bigwedge^r\mathcal{E}^\vee \to \mathcal{F}_{r-1} \to 0 $$ Then we have $H^1(X,\mathcal{O}_{X})=H^2(\mathbb{P}^N,I_X)=H^3(\mathbb{P}^N,\mathcal{F}_1)=\cdots=H^{r+1}(\mathbb{P}^N,\mathcal{F}_{r-1})=0,$ since $\mathcal{O}_{\mathbb{P}^N}, \mathcal{E}^\vee, \bigwedge^2\mathcal{E}^\vee, \cdots, \bigwedge^r\mathcal{E}^\vee$ are ACM bundles. Therefore we have $q=0,$ $\rho(X)=1$ and we see that $X$ is a Mori dream space. \end{proof} \begin{corollary} A general complete intersection variety of dimension greater than or equal to two is a Mori dream space. \end{corollary} It will be an interesting task to have a systematic approach to study Mori dream spaces with $p_g \neq 0.$ \subsection{Numerical Godeaux surfaces and surfaces with $\kappa=1$} When a minimal surface of general type has $p_g=q=0$ and $K^2=1$, we call this surface a numerical Godeaux surface. It is an interesting task to find examples of numerical Godeaux surfaces which are Mori dream surfaces. For example, it will be interesting to know whether the classical Godeaux surface is a Mori dream surface or not. It is also an interesting task to find an example of surface with $\kappa=1$ which is a Mori dream surface.
1,116,691,501,403
arxiv
\section{Introduction} \begin{figure}[t] \centering \includegraphics[width=1.0\columnwidth]{figure1.pdf} \caption{Illustration of the four feature pyramids, (a) illustrates the feature-based pyramid method \cite{Liu2016SSD} based on anchor for multi-scale objects detection, (b) fuses different horizontal features from top-to-bottom and bottom-to-top to detect multi-scale objects, (c) shows that M2Det \cite{Zhao2018M2Det} extracts features through many U-shape modules, then combines attention mechanisms to improve detection performance. However, these methods require more time and space. (d) illustrates our multi-scale objects detection with a shared encoder-decoder module for learning shared features on multi-scale objects.} \label{figure1} \end{figure} \begin{figure*}[t] \centering \includegraphics[width=.90\textwidth]{figure4} \caption{ResNet\cite{Fei2017Residual} has utilized CBAM. The spatial attention mechanism exploits average-pooling and max-pooling, and followed by a sigmoid layer to normalize features, the channel attention only exploits average-pooling and a sigmoid layer.} \label{figure4} \end{figure*} In recent years, according to the rich representation, CNNs have significantly improved performance of many computer vision tasks (classification, detection and segmentation). Top-5 average precision exceeds 90\% on ImageNet. \cite{xie2019self} proposes a simple self-training method, achieving top-1 average precision of 87.4\%. However, detection performance is poor. There are different methods to improve detection performance. According to region proposals, the object detection methods are divided into the two-stage \cite{girshick14CVPR} \cite{Dai2016R} \cite{Girshick2015Fast} \cite{Ren2017Faster} which mainly focus on the region proposals, and the one-stage \cite{Redmon2017YOLO9000} \cite{Liu2016SSD} \cite{fu2017dssd} \cite{kong2017ron}. The two-stage methods perform better than the one-stage because of multi-scale proposals, but the speed is much slower. According to anchor, detectors are divided into anchor-based \cite{Ren2017Faster} \cite{Lin2016Feature} \cite{Zhao2018M2Det} which they require more information related to objects, such as, the density, size or shape, and the anchor-free detectors \cite{Yu2016UnitBox} \cite{Huang2015DenseBox} \cite{Redmon2015You} \cite{Redmon2018YOLOv3} \cite{Law2018CornerNet} \cite{Lin2017Focal} \cite{Zhu2019Feature} which use the fully convolution, corner points of the object or adaptive selection of different features, reducing the inference time. Some anchor-free detection methods use feature pyramid to improve multi-scale objects detection performance. Based on feature pyramid and anchor-free methods, we construct a shared encoder-decoder to improve detection performance. However, for object detection, there are many problems, such as lighting, size, overlapping, etc., resulting in the poor performance. Especially for multi-scale objects, \cite{Lin2016Feature} \cite{SinghAn} \cite{Liu2016SSD} \cite{Zhao2018M2Det} exploit feature pyramids with attention mechanisms to improve the performance, but they require more cost. FPN \cite{Lin2016Feature} changes anchors for different backbones to implement better, obtaining detection AP of 33.9\%. \cite{Lin2016Feature} successes in obtaining detection AP of 35.8\%, which achieves detection AP of 3\% higher than \cite{Ren2017Faster}. \cite{Xiaowei2018SINet} proposes a new context-aware ROI pooling method, achieving AP of 89.6\% on LSVH. \cite{LiPerceptual} applies a GAN for small objects detection. As shown in Figure \ref{figure1}, feature pyramids \cite{Lin2016Feature} \cite{Liu2016SSD} \cite{Zhao2018M2Det} are obtained by the top-to-bottom, bottom--to-up or both, and parameters from different levels are independent. Inspired by them, we assume that a shared module can be implemented for multi-level features, extracting shared features. The attention mechanism enhances the key information and suppresses the useless. The attention mainly focuses on the spatial attention, the channel attention or both. Through the attention mechanism, \cite{jaderberg2015spatial} transforms the spatial information into another distribution, retaining the key information. \cite{hu2018squeeze} divides the attention into three parts across channel domain: squeeze, excitation and scale. Compared with the standard residual module, \cite{Fei2017Residual} uses the soft attention and the mask mechanism, and combines the current-level information with the previous. As shown in Figure \ref{figure4}, based on \cite{hu2018squeeze}, \cite{Woo2018CBAM} infers the attention map for two independent dimensions (channels and spatial), and multiplies the attention map with the input feature to improve performance. CBAM \cite{Woo2018CBAM} on ImageNet dataset is 1.76\% higher than the basis ResNet-50. Therefore, for different sizes, we can assume different pooling operation have different detection performance? Can the minimum pooling improve the detection performance on small objects? Our contributions are as follows: \begin{quote} \begin{itemize} \item We propose a shared module, learning feature pyramid by the encoder-decoder with attention mechanism for object detection, extracting the multi-level features by shared parameters to improve the performance on multi-scale objects. \item We propose a semantic-revised method corresponding to geometric location. Based on the semantic features, our detector can detect objects, adaptively, which is more flexible than state-of-the-state methods of just geometric prediction, and our method is more suitable for the actual scene. \item This work experiments the impact of the maximum, average, and minimum pooling operations for the small and large objects. The method combining a minimum pooling with \cite{Woo2018CBAM} improves the detection performance on small objects. \item Based on ResNet-50, our experiment achieves detection [email protected] of 49.8\% on standard MSCOCO 2014 benchmark. \end{itemize} \end{quote} \section{Related Works} \textbf{Feature Pyramid.} For multi-scale objects detection, the traditional methods obtain feature pyramids by different algorithms. SSD \cite{Liu2016SSD} directly predicts features from different levels, and solves the multi-scale problem to a certain extent. \cite{Zhao2018M2Det} uses U-shape module to extract high-level features, then, the method combines the extracted features with the next basis features as the input of the next U-shape. The multi-level weights are independent, resulting in more cost and less correlation. \cite{BaeObject} solves the problem by splitting feature into different modules, and learns the relationship between the original features and each sub-module. Because the relationship between the sub-modules is complex, the relationship becomes a challenge. Therefore, we use a shared module to obtain the multi-level shared features. \textbf{Encoder-Decoder.} The traditional algorithms \cite{Simonyan2014Very} \cite{Fei2017Residual} learn more discriminative features by deeper network, \cite{Fei2017Residual} introduces the residual module. Each residual module contains two paths, a direct path of the input features and a two-to-three convolution operation on the features, fusing the features obtained by the two paths to get the enhanced features. \cite{sutskever2014sequence} proposes an encoder-decoder to learn text sequences. \cite{badrinarayanan2017segnet} uses an encoder-decoder for classification task. Corresponding to the encoder, a decoder has the same spatial size and the number of channels is same. Therefore, we exploit a shared encoder-decoder to improve detection performance. \textbf{Attention Mechanism.} Generally, deeper network or attention mechanism can improve the performance. \cite{Fei2017Residual} fuses multi-level features to improve the classification performance. \cite{hu2018squeeze} improves performance by the correlation between channels. A light-weight module \cite{Woo2018CBAM} adopts the channel and spatial attention mechanisms, further improving the performance. We analyze the impact of the detection performance of different attention mechanisms (CBAM with the minimum pooling operation and our attention mechanism) on multi-scale objects. We find that combing CBAM with minimum pooling can improve the detection performance on small objects for standard MSCOCO2014 Dataset. \section{Our Approach} In this section, as shown in Figure \ref{figure3}, based on ResNet-50, our detection structure includes a shared encoder-decoder module with the attention mechanism for feature pyramid, and a shared detector header with a classification prediction branch, a detection branch, a center-ness branch and a semantic-related center branch which revises regression prediction branch. We introduce a semantic-revised branch to make the detector more suitable for the actual application. For feature pyramids, the shared encoder is down-sampled by a convolution, where the stride is 2, followed by a group normalization and a non-linear activation function. Details as follow. \begin{figure*}[t] \centering \includegraphics[width=1.0\textwidth]{figure3} \caption{An overview of the proposed anchor-free Pixel-Semantic Revising of Position. The architecture exploits the backbone and the shared encoder-decoder module with attention mechanism, obtaining more details for location. Then, the regression prediction produces the four distances (from top boundary to center, center to bottom, right to center, center to left). The semantic-related center prediction branch (center regression +sigmoid in figure) obtains semantic-related center position for revising the pixel-level positions prediction (regression in figure).} \label{figure3} \end{figure*} \subsection{Shared Encoder-Decoder Module with Attention Mechanism (SEDAM)} As shown in Figure \ref{figure1}, we propose a shared module for multi-level feature pyramids. Since the semantic features within a category are similar, we present that the shared module learns the common features on multi-scale objects for a class, improving the generalization performance. It is a symmetrical structure. In the encoder, features are down-sampled by the convolution with 2 strides and 1 padding, following by a 32 groups normalization and a non-linear activation function. The more the number of layers is, the more the discriminative features extracted are, on the contrary, losing more details about location. In the decoder, the features are up-sampled by a bilinear interpolation, followed by a convolution with 1$\times$1 kernel size and a nonlinear function. Additional, for more useful information, we analyze different attention mechanism in the shared encoder-decoder, including a spatial attention with different pooling operations, a channel attention or the both. \begin{figure}[t] \centering \includegraphics[width=0.9\columnwidth]{figure5.pdf} \caption{Illustrations of our attention mechanism module, we use the average pooling and maximum pooling, followed by a fully connected layer, then, multiplied by the original features.} \label{figure5} \end{figure} \textbf{Our Attention Mechanism.} As shown in Figure \ref{figure4}, the state-of-the-art CBAM \cite{Woo2018CBAM} mainly contains channel and spatial attention mechanisms for classification task. We use the channel attention to improve the detection AP, as shown in Figure \ref{figure5}, for our attention mechanism, the features pass two paths (an average-pooling followed by a fully connection layer, and a maximum-pooling followed by a fully connection layer) respectively. We fuse outputs from the two paths, then multiplies with the features which are the input of paths to enhance the key information. \subsection{Shared Detector Header} We apply a shared detector header, and regard the fusion of the output of the shared encoder-decoder with the original features as the input to maintain more knowledge about the location and ensure the key information for location. When features of different levels use the same detector header, the detection AP on small objects is better. As shown in Figure. \ref{figure3}, we use semantic-related location prediction to obtain outputs which revise the results from regression branch. We regard geometric position of each bounding box as a $4-D$ vector. The semantic feature can get the semantic-related center location, so that we ensure that the location prediction is related to the semantic information. \subsection{Margin Regression} In the feature pyramid, we use four-level features to detect objects. We elaborate the prediction processing of the $i-$level in detail, other levels are similar. Many candidates of bounding boxes are obtained at the level $i$. We define all candidates as {${D_i}$} at the level $i$, where ${D_i}$=(${x^k}_s$, ${y^k}_s$, ${x^k}_m$, ${y^k}_m$, $c_k$) $\in$$R^4\times$\{1,2,3,...,C\}. $C$ is the number of categories, we set it to 80 on MSCOCO, $c_k$ represents the class label in the $k-th$ bounding box. We propose a semantic-related location, as shown in the Equation \ref{eq2}. For the semantic center, $B_i$=(${x^j}_i$, ${y^j}_i$) represents the $j-th$ semantic-related center position prediction at the level $i$, the number of the semantic center $B_i$ and the number of the candidates $D_i$ is same. In classification module at the level $i$, if the center proposal position $(x^k_i+x^j_i, y^k_i+y^j_i)$ falls into the truth proposal at the level $i$, the bounding box is a positive example and the class label is $c_k$. Otherwise, the bounding box is a negative example and the label is 0 (background class). \begin{equation} \small x1= (x^k_i+x^j_i)-x^k_s, y1=(y^k_i+y^j_i)-y^k_s, x2= x^k_m+(x^k_i+x^j_i), y3=y^k_m+(y^k_i+y^j_i) \label{eq2} \end{equation} Where $(x^k_i, y^k_i)$ denotes the $k-th$ center proposal position at the level $i$. $(x^j_i, y^j_i)$ represents the $j-th$ semantic-related center prediction for revising the $k-th$ center proposal position. $(x^k_s, y^k_s)$ is the left-top margin of the $k-th$ prediction, and $(x^k_m, y^k_m)$ is the right-bottom margin of the $k-th$ prediction. $(x1, y1)$ and $(x2, y2)$ represent the left-top postion and right-bottom position of the $k-th$ prediction at the level $i$, respectively. \subsection{Network Configures} Based on ResNet-50 backbone network, as shown in Figure.\ref{figure3}, the encoder uses three down-sampling modules (a convolution, a group normalization and a ReLU), followed by a smooth layer, the decoder uses three up-sampling modules (a bilinear interpolation, a convolution, a group normalization, a ReLU and the attention mechanism). The channel of the basis features is 256. Therefore, if we set more channels in SEDAM, such as 1024, 4096, etc., the number of channels far exceeds the basis features, resulting in unnecessary computation. In this work, we set it to 640. As shown in Figure \ref{figure3}, we set the input size as 800$\times$800 for backbone. \textbf{Loss Function.}The structure contains center prediction, regression and classification losses. If a location prediction is closer to the center of the target, the probability value is closer to 1.0. The classification loss is a focal loss with an alpha 0.25 and a gamma 2. Finally, we use a cross-correlation loss with a correlation coefficient to avoid non-overlapping parts. As described in Equation. \ref{eq3}, the loss function details. In our experiment, we set all balance factors ($\gamma$ and $\beta$) to 1. Details are explained by \cite{tian2019fcos}. Details as formulate: \begin{equation} \begin{split} L(p_{x,y},d_{x,y})= & \frac{1}{N_{pos}}\sum\limits_{ x,y}L_{cls}(p_{ x,y},{c^\ast}_{ x,y})\\ &+\frac{\gamma}{N_{pos}}\sum\limits_{ x,y}L_{reg}( d_{x,y},{d^\ast}_{ x,y})\\ &+\frac{\beta}{N_{pos}}\sum\limits_{ x,y}L_{center}(p_{ x,y},{c^\ast}_{ x,y}) \end{split} \label{eq3} \end{equation} Where $L_{cls}(p_{ x,y},{c^\ast}_{ x,y})$ is the cross-entropy classification loss between predicted labels and truth labels. $L_{reg}( d_{x,y},{d^\ast}_{ x,y})$ denotes the regression loss with target center weights between the predicted locations and the target locations, and the weights are related to target margins (the left, the right, the top and the bottom), we regard the IOU-loss as our regression loss. $L_{center}(p_{ x,y},{c^\ast}_{ x,y})$ represents the center loss, which is the cross-entropy loss between center-ness predictions and target center weights. $N_{pos}$ is the number of positive examples which are not background. \section{Experiments and Results} In this section, we experiment different detection methods on large-scale standard MSCOCO 2014 benchmark. We experiment 80 classes of train/validation, the training set includes 82783 images, and the validation set includes 40504 images. To compare with the state-of-the-art methods, we compare with traditional methods based on FPN \cite{Lin2016Feature}. In our experiments, we experiment four methods, the A (without a shared encoder-decoder), the B (a shared encoder-decoders with CBAM), the C (a shared encoder-decoder combing CBAM with minimum pooling), and ours (a shared encoder-decoder with our attention mechanism). \textbf{Implementation Details.} Based on ResNet-50, our network uses a random gradient descent method for $300k$ iterations, where an initial learning rate, a decay rate and momentum are 0.01, 0.0005, 0.9, respectively. We use ImageNet weights to initialize ResNet-50. For the shared encoder-decoder and detector header, we use a gauss function to initialize weights. When the channel convolution is larger than 32 for the shared encoder-decoder and detector header, we apply group normalization to make the training more stable. In our work, we use 2 TITAN Xp GPUs, 8 batch size for training. \begin{table*}[t] \caption{Comparison with using different attention mechanisms, there are four methods, the A (without a shared encoder-decoder), the B (a shared encoder-decoders with CBAM), the C (a shared encoder-decoder combing CBAM with minimum pooling), and ours (a shared encoder-decoder with our attention mechanism).} \label{table1} \smallskip \resizebox{0.98\textwidth}{!}{ \centering \begin{tabular}{|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|l|} \hline Method&SED&CBAM& IOU& Aera& person&airplane&bus&train&fire hydrant&stop sign&cat&elephant&bear&zebra&giraffe&toilet&clock\\ \hline A&-&-& 0.5:0.95& S& 18.8&23.7&6.66&6.96&20.7&11.0&11.3&21.6&4.9&29.1&24.9&11.0&22.4\\ \hline B&$\checkmark$&$\checkmark$& 0.5:0.95&S& 19.4&23.2&9.02&7.07&22.5&12.0&10.1&24.0&8.11&28.6&26.2&12.0&22.9\\ \hline C&$\checkmark$&*& 0.5:0.95& S& 19.2&25.4&8.76&7.4&20.8&12.1&11.6&23.4&8.17&28.2&25.9&16.7&24.7\\ \hline \textbf{Ours}&$\checkmark$&-& 0.5:0.95& S& 19.6&23.8&8.26&7.23&21.9&12.3&9.68&23.4&9.41&29.8&26.6&13.6&24.0\\ \hline\hline A&-&-& 0.5:0.95&M& 44.3&40.4&31.9&25.3&52.4&55.6&43.4&44.5&58.9&50.8&54.9&41.7&48.6\\ \hline B&$\checkmark$&$\checkmark$& 0.5:0.95& M& 45.2&41.4&34.5&25.5&55.2&56.3&44.5&47.4&62.0&50.6&54.6&44.9&50.2\\ \hline C&$\checkmark$&*& 0.5:0.95& M& 45.5&43.7&34.1&28.0&57.4&57.6&43.7&46.4&58.4&51.8&56.0&43.7&49.9\\ \hline \textbf{Ours}&$\checkmark$&-& 0.5:0.95& M& 45.2&43.1&34.8&23.8&57.5&56.5&43.6&47.3&59.9&51.5&54.8&44.3&48.8\\ \hline\hline A&-&-& 0.5:0.95& L&52.5&51.1&63.1&54.3&62.0&77.7&49.4&57.0&59.4&56.4&54.0&49.0&50.5\\ \hline B&$\checkmark$&$\checkmark$& 0.5:0.95& L& 55.4&56.4&67.9&57.8&67.8&80.5&55.3&63.3&63.0&58.0&59.8&54.3&53.0\\ \hline C&$\checkmark$&*& 0.5:0.95& L& 56.3&58.6&68.2&59.9&69.1&81.4&57.3&63.6&64.6&60.8&60.2&56.7&52.3\\ \hline \textbf{Ours}&$\checkmark$&-& 0.5:0.95& L& \textbf{58.4}&\textbf{59.7}&\textbf{69.3}&59.5&\textbf{69.3}&80.6&57.1&\textbf{64.5}&\textbf{64.8}&\textbf{61.7}&\textbf{62.5}&\textbf{57.1}&\textbf{53.0}\\ \hline\hline A&-&-& 0.5& -&64.1&70.0&69.1&76.9&74.2&66.8&77.7&76.4&81.8&81.5&80.7&70.3&67.2\\ \hline B&$\checkmark$&$\checkmark$& 0.5& -& 68.9&74.0&73.2&80.0&77.5&69.4&81.7&81.2&84.9&83.9&84.8&74.1&69.0\\ \hline C&$\checkmark$&*& 0.5& -& 68.5&75.2&72.8&80.7&78.8&69.1&82.3&80.9&84.7&85.1&84.4&76.1&69.1\\ \hline \textbf{Ours}&$\checkmark$&-& 0.5& -& 69.3&73.5&72.9&79.1&79.0&69.9&81.9&81.0&84.5&83.9&85.0&75.1&68.1\\ \hline\hline A&-&-& 0.75&-& 33.7&42.6&55.1&54.0&56.2&57.7&52.6&51.9&65.9&52.4&53.6&49.0&36.8\\ \hline B&$\checkmark$&$\checkmark$& 0.75& -& 34.5&45.2&59.0&57.2&61.0&59.1&58.1&56.9&72.2&53.2&56.1&53.8&38.4\\ \hline C&$\checkmark$&*& 0.75& -& 35.1&47.7&58.9&59.4&61.2&59.4&59.8&56.3&70.2&53.7&57.8&56.0&39.6\\ \hline \textbf{Ours}&$\checkmark$&-& 0.75& -& 36.3&47.7&59.4&58.7&61.9&58.7&59.8&57.9&70.9&56.1&58.3&56.9&38.9\\ \hline\hline A&-&-& 0.5:0.95&-& 35.1&40.5&47.9&48.5&48.6&50.4&47.5&47.9&57.7&49.8&50.5&44.6&37.3\\ \hline B&$\checkmark$&$\checkmark$& 0.5:0.95&-& 36.9&43.4&52.1&51.6&53.0&52.2&52.4&52.8&61.2&50.7&53.8&49.2&38.7\\ \hline C&$\checkmark$&*& 0.5:0.95&-& 37.2&45.5&52.2&53.8&54.0&53.0&53.9&52.4&61.7&52.5&54.5&50.6&39.1\\ \hline \textbf{Ours}&$\checkmark$&-& 0.5:0.95&-& \textbf{38.0}&45.4&\textbf{52.7}&52.7&54.0&52.4&53.8&\textbf{53.6}&\textbf{62.2}&\textbf{53.0}&\textbf{55.8}&\textbf{51.0}&\textbf{38.7}\\ \hline \end{tabular} } \end{table*} \subsection{Ablation Studies} \textbf{The Importance of the Shared Encoder-Decoder.} As mentioned before, the feature pyramid can improve performance on multi-scale objects. As shown in Table \ref{table1}, the method A is poor on small objects. For example, the clock, the stop sign and bear achieve an AP of 22.4\%, 11.0\% and 4.9\%, respectively. We find that ours is better than the A for large and medium objects. For large objects, the person, the airplane, the fire hydrant and the toilet achieve 5.9\%, 8.6\%, 7.3\%, and 8.1\% higher than the A, respectively. As shown in Table \ref{table2}, ours with the semantic-related center is 1.0\% higher on small object detection than the B with the semantic-revised. For small, middle and large objects, ours with semantic-revised module achieves 1.3\%, 1.8\% and 6.3\% higher than the A without semantic-revised module, respectively. Therefore, the shared encoder-decoder with our attention mechanism performances on multi-scale objects better. \textbf{Comparison of Different Attention Mechanisms.}We think that attention mechanisms can improve the performance for object detection. As shown in Table \ref{table2}, the A with semantic-revised module, the B with semantic-revised module, the C with semantic-revised module and ours with semantic-revised module achieve detection [email protected]:0.95 of 25.3\%, 27.4\%, 27.8\%, and 28.4\%. The minimum pooling operation is not obvious for improving detection AP. According to different IOU values, the A with semantic-revised module, the B with semantic-revised module, the C with semantic-revised module and ours with semantic-revised module achieve detection [email protected] of 24.9\%, 26.7\%, 27.3\%, and 28.1\%, respectively. At the same time, they achieve detection [email protected] of 45.4\%, 49.2\%, 49.5\% and 49.8\%. Therefore, the shared encoder-decoder module with our attention mechanism can improve the detection performance. As shown in Table \ref{table1}, the C is better than the B on small objects. For clock, toilet and airplane, the C are detection AP of 1.8\%, 4.7\% and 2.2\% higher than the B, respectively. According to Table \ref{table2}, we find that the minimum pooling performs better on small objects for detection task, and the channel attention mechanism is more suitable to detect multi-scale objects. \begin{table} \small \caption{Comparisons of Detection APs(\%) on MS COCO2014 benchmark.} \label{table2} \resizebox{0.95\columnwidth}{!}{ \smallskip\begin{tabular}{|l|l|l|lll|lll|} \hline Method& Backbone& Revise& \multicolumn{3}{l|}{Avg.Precision, IOU:} &\multicolumn{3}{l|}{Avg.Precision, Area:} \\ &&& 0.5:0.95& 0.5& 0.75& S& M& L\\ \hline Faster R-CNN \cite{Ren2017Faster}& VGG-16& -& 21.9&42.7&-&-&-&-\\ \hline OHEM++ \cite{Shrivastava}& VGG-16& -& 25.5&45.9&26.1& 7.4&27.7&40.3\\ \hline SSD \cite{Liu2016SSD}& VGG-16& -& 25.1&43.1&25.8& 6.6&25.9&41.4\\ \hline SSD&MobileNet-v2&-& 22.1&-&-&-&-&-\\ \hline DSSD321 \cite{fu2017dssd}&ResNet-101&-& 28.0&46.1&29.2&7.4&28.1&47.6\\ \hline R-FCN \cite{Dai2016R} &ResNet-50&-& 27.0&48.7&26.9&9.8&30.9&40.3\\ \hline MNC \cite{DaiInstance}&ResNet-101&-& 24.6&44.3&24.8&4.7&25.9&43.6\\ \hline A&ResNet-50&-&25.1&45.4&24.6&10.5&29.3&32.6\\ \hline A&ResNet-50&$\checkmark$&25.3&45.4&24.9&10.8&29.2&33.0\\ \hline B&ResNet-50&-&27.3&49.4&26.5&11.1&30.7&36.8\\ \hline B&ResNet-50&$\checkmark$&27.4&49.2&26.7&11.5&30.6&36.6\\ \hline C&ResNet-50&-&27.5&49.5&26.9&11.3&30.9&37.4\\ \hline C&ResNet-50&$\checkmark$&27.8&49.5&27.3&11.9&31.1&37.3\\ \hline Ours&ResNet-50&-&28.4&49.9&28.1&11.5&31.2&39.0\\ \hline Ours& ResNet-50&$\checkmark$& \textbf{28.4}& \textbf{49.8}&28.1&\textbf{11.8}&31.1&38.9\\ \hline \end{tabular} } \end{table} \textbf{The Importance of the Semantic-Revised.} In this work, we propose a semantic-revised center location. When the network without the semantic-revised center at an inference, the network performs worse on small objects. There are four methods without the semantic-revised, the method A, the method B, the method C, and ours, they are poor detection AP of 0.3\%, 0.4\%, 0.6\% and 0.3\% lower than methods with the semantic-revised center module, respectively. Therefore, we think that the semantic-revised branch makes detection performance better on multi-scale objects, adaptively. \subsection{Comparison with State-of-the-art Detectors} To further illustrate that the method which assembles the semantic-revised center branch and the shared encoder-decoder module with attention mechanism can improve detection performance on the multi-scale objects. As shown in Table \ref{table2}, our method is better than \cite{fu2017dssd} and MNC \cite{DaiInstance}, and ours achieve detection AP of 0.4\% and 3.8\% higher than the others, respectively. On the other hand, our method consumes less time and space. For MSCOCO dataset benchmark, the four methods (the A, the B, the C and ours) are better than traditional detectors \cite{Ren2017Faster} \cite{Shrivastava} \cite{Liu2016SSD} \cite{Dai2016R} \cite{DaiInstance}. \begin{figure*}[t] \centering \includegraphics[width=0.9\textwidth]{figure6.pdf} \caption{As shown in the figure, the examples from different methods about a shared encoder-decoder and attention mechanisms, the first two columns show the detection results by the semantic-revised center at an inference, and the last two columns show the ones only by the geometric position. From the first row to the fourth row, these objects detected by the method A, B, C, and ours, respectively.} \label{figure6} \end{figure*} \section{Discussion} As shown in Table \ref{table1} and Table \ref{table2}, we believe that the attention mechanism plays an important role in objects detection. We find that the minimum pooling performs better on small objects. The minimum pooling extracts much more discriminative features, optimizing the model toward features from small objects, so that the method C performs better than the others on small objects. However, for multi-scale objects, the detection AP of the shared encoder-decoder with channel attention mechanism is higher than others. On the other hand, according to Table \ref{table1}, the shared encoder-decoder can learn the similar semantic features on multi-scale objects. As shown in Figure \ref{figure6}, we use two inference methods, with or without semantic-revised center for all methods (the A, the B, the C and ours). More importantly, our encoder-decoder module can extract the common semantic features on multi-scale objects. However, the semantic distribution between different categories may hurt performance because of the difference of distribution. According to these experiments, we find that our attention mechanism is more effective than the traditional attention mechanism for multi-scale objects detection. \section{Conclusions} We propose one-stage anchor-free detector with shared encoder-decoder with attention mechanism, exploiting SEDAM to detect multi-scale objects, adaptively. More importantly, our method uses the semantic-related center to revise the geometric position prediction adaptively, which improves detection performance for the multi-scale objects on the MSCOCO 2014 benchmark, and the semantic-revised branch is more suitable for the actual scene. The attention mechanism with the minimum pooling improves performance on small objects detection better. Therefore, a shared encoder-decoder structure with the attention mechanism can improve detection AP. Our approach reduces cost and performs better than traditional methods for multi-scale objects detection. We believe that our approach can be used to detect multi-scale objects in other basis structures to a certain extent.
1,116,691,501,404
arxiv
\section{Introduction} \label{sec:intro} It is a useful and well-known fact that plane partitions in an $a \times b \times c$ box, lozenge tilings of a hexagon with side lengths $(a,b,c)$, families of non-intersecting lattice path in such a hexagon, and perfect matchings of a suitable honeycomb graph are all in bijection. In this work we refine the latter three bijections by establishing signed versions of them for regions that are more general than hexagons. More specifically, we consider certain subregions of a triangular region $\mathcal{T}_d$. The latter is an equilateral triangle of side length $d$ subdivided by equilateral triangles of side length one. We view a hexagon with side lengths $a, b, c$ as the region obtained by removing triangles of side lengths $a, b$, and $c$ at the vertices of $\mathcal{T}_d$, where $d = a + b + c$. More generally, we consider subregions $T \subset \mathcal{T} = \mathcal{T}_d$ (for some $d$) that arise from $\mathcal{T}$ by removing upward-pointing triangles, each of them being a union of unit triangles. We refer to the removed upward-pointing triangles as \emph{punctures}. The punctures may overlap (see Figure \ref{fig:triregion-intro}). We call the resulting subregions of $\mathcal{T}$ \emph{triangular subregions}. Such a region is said to be \emph{balanced} if it contains as many upward-pointing unit triangles as down-pointing pointing unit triangles. For example, hexagonal subregions are balanced. Lozenge tilings of triangular subregions have been studied in several areas. For example, they are used in statistical mechanics for modeling bonds in dimers (see, e.g., \cite{Ke}) or in statistical mechanics when studying phase transitions (see, e.g., \cite{Ci-2005}). \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-gcd-1} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-tiling} \end{minipage} \caption{A triangular region together with one of its $13$ lozenge tilings.} \label{fig:triregion-intro} \end{figure} For an arbitrary triangular region, the bijection between lozenge tilings and plane partitions breaks down. However, there are still bijections between lozenge tilings, perfect matchings, and families of lattice paths. Here we establish a signed version of these bijections. In particular, we show that, for each balanced triangular region $T$, there is a bijection between the signed perfect matchings and the signed families of non-intersecting lattice paths. This is achieved via the links to lozenge tilings. \begin{figure}[!ht] \begin{minipage}[b]{0.42\linewidth} \centering \includegraphics[scale=1]{figs/build-pm-2}\\ \emph{A perfect matching.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/build-nilp-2}\\ \emph{A family of non-intersecting lattice paths.} \end{minipage} \caption{Bijections to lozenge tilings.} \label{fig:bijections} \end{figure} Indeed, the perfect matchings determined by any triangular region $T$ can be enumerated by the permanent of a zero-one matrix $Z(T)$ that is the bi-adjacency matrix of a bipartite graph. This suggests to introduce the sign of a perfect matching such that the signed perfect matchings are enumerated by the determinant of $Z(T)$. We call this sign the \emph{perfect matching sign} of the lozenge tiling that corresponds to the perfect matching (see Definition \ref{def:pm-sign}). Using the theory pioneered by Gessel and Viennot \cite{GV-85}, Lindstr\"om \cite{Li}, Stembridge \cite{Stembridge}, and Krattenthaler \cite{Kr-95}, the sets of signed families of non-intersecting lattice paths in $T$ can be enumerated by the determinant of a matrix $N(T)$ whose entries are binomial coefficients. We define the sign used in this enumeration as the \emph{lattice path sign} of the corresponding lozenge tiling of the region $T$ (see Definition \ref{def:nilp-sign}). Typically, the matrix $N(T)$ is much smaller than the matrix $Z(T)$. However, the entries of $N(T)$ can be much bigger than one. In order to compare enumerations of signed perfect matchings and signed lattice paths we introduce a new combinatorial construction that we call \emph{resolution of a puncture}. Roughly speaking, it replaces a triangular subregion with a fixed lozenge tiling by a larger triangular subregion with a compatible lozenge tiling and one puncture less. Carefully analyzing the change of sign under resolutions of punctures and using induction on the number of punctures of a given region, we establish that, for each balanced triangular subregion, the perfect matching sign and the lattice path sign are in fact equivalent, and thus (see Theorem \ref{thm:detZN}) \[ |\det Z(T)| = |\det N(T)|. \] The proof also reveals instances where the absolute value of $\det Z(T)$ is equal to the permanent of $Z(T)$. This includes hexagonal regions, for which the result is well-known. The results of this paper will be used in forthcoming work \cite{CN-small-type} in order to study the so-called Weak Lefschetz Property \cite{HMNW} of monomial ideals. The latter is an algebraic property that has important connections to combinatorics. For example, it has been used for establishing unimodality results and the g-Theorem on the face vectors of simplicial polytopes (see, e.g., \cite{Stanley-1980, St-faces}). The paper is organized as follows. In Section~\ref{sec:trireg}, we introduce triangular regions and establish a criterion for the tileability of such a region. In Section~\ref{sec:signed}, we introduce the perfect matching and lattice path signs for a lozenge tiling. Section~\ref{sec:resolution} contains our main results. There we introduce the method of resolving a puncture and use it to prove the equivalence of the two signs. \section{Tiling triangular regions with lozenges} \label{sec:trireg} In this section, we introduce a generalization of hexagonal regions, which we call triangular regions, and we investigate the tileability of such regions. We use monomial ideals as a bookkeeping device. \subsection{Triangular regions and monomial ideals}\label{sub:ideal}~ Let $I$ be a monomial ideal of a standard graded polynomial ring $R= K[x,y,z]$ over a field $K$. Thus, $I$ has a unique generating set of monomials with least cardinality. Its elements are called the minimal generators of $I$. We denote the degree $d$ component of the graded ring $R/I$ by $[R/I]_d$. Note that the degree $d$ monomials of $R$ that are \emph{not} in $I$ form a $K$-basis of $[R/I]_d$. Let $d \geq 1$ be an integer. Consider an equilateral triangle of side length $d$ that is composed of $\binom{d}{2}$ downward-pointing ($\bigtriangledown$) and $\binom{d+1}{2}$ upward-pointing ($\triangle$) equilateral unit triangles. We label the downward- and upward-pointing unit triangles by the monomials in $[R]_{d-2}$ and $[R]_{d-1}$, respectively, as follows: place $x^{d-1}$ at the top, $y^{d-1}$ at the bottom-left, and $z^{d-1}$ at the bottom-right, and continue labeling such that, for each pair of an upward- and a downward-pointing triangle that share an edge, the label of the upward-pointing triangle is obtained from the label of the downward-pointing triangle by multiplying with a variable. The resulting labeled triangular region is the \emph{triangular region (of $R$) in degree $d$} and is denoted $\mathcal{T}_d$. See Figure~\ref{fig:triregion-R}(i) for an illustration. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-R4}\\ \emph{(i) $\mathcal{T}_4$} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-RmodI}\\ \emph{(ii) $T_4(xy, y^2, z^3)$} \end{minipage} \caption{A triangular region with respect to $R$ and with respect to $R/I$.} \label{fig:triregion-R} \end{figure} Throughout this manuscript we order the monomials of $R$ by using the \emph{graded reverse-lexico\-graphic order}, that is, $x^a y^b z^c > x^p y^q z^r$ if either $a+b+c > p+q+r$ or $a+b+c = p+q+r$ and the \emph{last} non-zero entry in $(a-p, b-q, c-r)$ is \emph{negative}. For example, in degree $3$, \[ x^3 > x^2y > xy^2 > y^3 > x^2z > xyz > y^2z > xz^2 > yz^2 > z^3. \] Thus in $\mathcal{T}_4$, see Figure~\ref{fig:triregion-R}(i), the upward-pointing triangles are ordered starting at the top and moving down-left in lines parallel to the upper-left edge. We generalise this construction to quotients by monomial ideals. Let $I$ be a monomial ideal of $R$. The \emph{triangular region (of $R/I$) in degree $d$}, denoted by $T_d(I)$, is the part of $\mathcal{T}_d$ that is obtained after removing the triangles labeled by monomials in $I$. Note that the labels of the downward- and upward-pointing triangles in $T_d(I)$ form $K$-bases of $[R/I]_{d-2}$ and $[R/I]_{d-1}$, respectively. It is sometimes more convenient to illustrate such regions with the removed triangles darkly shaded instead of being removed; both illustration methods will be used throughout this manuscript. See Figure~\ref{fig:triregion-R}(ii) for an example. Notice that the regions missing from $\mathcal{T}_d$ in $T_d(I)$ can be viewed as a union of (possibly overlapping) upward-pointing triangles of various side lengths that include the upward- and downward-pointing triangles inside them. Each of these upward-pointing triangles corresponds to a minimal generator of $I$ that has, necessarily, degree at most $d-1$. We can alternatively construct $T_d(I)$ from $\mathcal{T}_d$ by removing, for each minimal generator $x^a y^b z^c$ of $I$ of degree at most $d-1$, the \emph{puncture associated to $x^a y^b z^c$} which is an upward-pointing equilateral triangle of side length $d-(a+b+c)$ located $a$ triangles from the bottom, $b$ triangles from the upper-right edge, and $c$ triangles from the upper-left edge. See Figure~\ref{fig:triregion-punctures} for an example. We call $d-(a+b+c)$ the \emph{side length of the puncture associated to $x^a y^b z^c$}, regardless of possible overlaps with other punctures in $T_d (I)$. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-punctures}\\ \emph{(i) $T_{d}(x^a y^b z^c)$} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/triregion-punctures-ex}\\ \emph{(ii) $T_{10}(xy^3z^2)$} \end{minipage} \caption{$T_d(I)$ as constructed by removing punctures.} \label{fig:triregion-punctures} \end{figure} We say that two punctures \emph{overlap} if they share at least an edge. Two punctures are said to be \emph{touching} if they share precisely a vertex. \subsection{Tilings with lozenges} \label{sub:tiling} A \emph{lozenge} is a union of two unit equilateral triangles glued together along a shared edge, i.e., a rhombus with unit side lengths and angles of $60^{\circ}$ and $120^{\circ}$. Lozenges are also called calissons and diamonds in the literature. Fix a positive integer $d$ and consider the triangular region $\mathcal{T}_d$ as a union of unit triangles. Thus a \emph{subregion} $T \subset \mathcal{T}_d$ is a subset of such triangles. We retain their labels. We say that a subregion $T$ is \emph{$\bigtriangledown$-heavy}, \emph{$\triangle$-heavy}, or \emph{balanced} if there are more downward pointing than upward pointing triangles or less, or if their numbers are the same, respectively. A subregion is \emph{tileable} if either it is empty or there exists a tiling of the region by lozenges such that every triangle is part of exactly one lozenge. A tileable subregion is necessarily balanced as every unit triangle is part of exactly one lozenge. Let $T \subset \mathcal{T}_d$ be any subregion. Given a monomial $x^a y^b z^c$ with degree less than $d$, the \emph{monomial subregion} of $T$ associated to $x^a y^b z^c$ is the part of $T$ contained in the triangle $a$ units from the bottom edge, $b$ units from the upper-right edge, and $c$ units from the upper-left edge. In other words, this monomial subregion consists of the triangles that are in $T$ and the puncture associated to the monomial $x^a y^b z^c$. See Figure~\ref{fig:triregion-subregion} for an example. \begin{figure}[!ht] \includegraphics[scale=1]{figs/triregion-subregion} \caption{The monomial subregion of $T_{8}(x^7, y^7, z^6, x y^4 z^2, x^3 y z^2, x^4 y z)$ (see Figure~\ref{fig:triregion-intro}) associated to $x y^2 z$.} \label{fig:triregion-subregion} \end{figure} Replacing a tileable monomial subregion by a puncture of the same size does not alter tileability. \begin{lemma} \label{lem:replace-tileable} Let $T \subset \mathcal{T}_d$ be any subregion. If the monomial subregion $U$ of $T$ associated to $x^a y^b z^c$ is tileable, then $T$ is tileable if and only if $T \setminus U$ is tileable. Moreover, each tiling of $T$ is obtained by combining a tiling of $T \setminus U$ and a tiling of $U$. \end{lemma} \begin{proof} Suppose $T$ is tileable, and let $\tau$ be a tiling of $T$. If a tile in $\tau$ contains a downward-pointing triangle of $U$, then the upward-pointing triangle of this tile also is in $U$. Hence, if any lozenge in $\tau$ contains exactly one triangle of $U$, then it must be an upward-pointing triangle. Since $U$ is balanced, this would leave $U$ with a downward-pointing triangle that is not part of any tile, a contradiction. It follows that $\tau$ induces a tiling of $U$, and thus $T \setminus U$ is tileable. Conversely, if $T \setminus U$ is tileable, then a tiling of $T \setminus U$ and a tiling of $U$ combine to a tiling of $T$. \end{proof} Let $U \subset \mathcal{T}_d$ be a monomial subregion, and let $T, T' \subset \mathcal{T}_d$ be any subregions such that $T \setminus U = T' \setminus U$. If $T \cap U$ and $T' \cap U$ are both tileable, then $T$ is tileable if and only if $T'$ is, by Lemma \ref{lem:replace-tileable}. In other words, replacing a tileable monomial subregion of a triangular region by a tileable monomial subregion of the same size does not affect tileability. Using this observation, we find a tileability criterion of triangular regions associated to monomial ideals. If it is satisfied the argument below constructs a tiling. \begin{theorem} \label{thm:tileable} Let $T = T_d(I)$ be a balanced triangular region, where $I \subset R$ is any monomial ideal. Then $T$ is tileable if and only if $T$ has no $\bigtriangledown$-heavy monomial subregions. \end{theorem} \begin{proof} Suppose $T$ contains a $\bigtriangledown$-heavy monomial subregion $U$. That is, $U$ has more downward-pointing triangles than upward-pointing triangles. Since the only triangles of $T \setminus U$ that share an edge with $U$ are downward-pointing triangles, it is impossible to cover every downward-pointing triangle of $U$ with a lozenge. Thus, $T$ is non-tileable. Conversely, suppose $T$ has no $\bigtriangledown$-heavy monomial subregions. In order to show that $T$ is tileable, we may also assume that $T$ has no non-trivial tileable monomial subregions by Lemma~\ref{lem:replace-tileable}. Consider any pair of touching or overlapping punctures in $\mathcal{T}_d$. The smallest monomial subregion $U$ containing both punctures is tileable. (In fact, such a monomial region is uniquely tileable by lozenges.) If further triangles stemming from other punctures of $T$ have been removed from $U$, then the resulting region $T \cap U$ becomes $\bigtriangledown$-heavy or empty. Thus, our assumptions imply that $T$ has no overlapping and no touching punctures. Now we proceed by induction on $d$. If $d \leq 2$, then $T$ is empty or consists of one lozenge. Thus, it is tileable. Let $d \geq 3$, and let $U$ be the monomial subregion of $T$ associated to $x$, i.e., $U$ consists of the upper $d-1$ rows of $T$. Let $L$ be the bottom row of $T$. If $L$ does not contain part of a puncture of $T$, then $L$ is $\triangle$-heavy forcing $U$ to be a $\bigtriangledown$-heavy monomial subregion, contradicting an assumption on $T$. Hence, $L$ must contain part of at least one puncture of $T$. See Figure~\ref{fig:thm-tileable}(i). \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/thm-tileable-1}\\ \emph{(i) The region $T$ split in to $U$ and $L$.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/thm-tileable-2}\\ \emph{(ii) Creating $U'$ and $L'$.} \end{minipage} \caption{Illustrations for the proof of Theorem~\ref{thm:tileable}.} \label{fig:thm-tileable} \end{figure} Place an up-down lozenge in $T$ just to the right of each puncture along the bottom row \emph{except} the farthest right puncture. Notice that putting in all these tiles is possible since punctures are non-overlapping and non-touching. Let $U' \subset U$ and $L' \subset L$ be the subregions that are obtained by removing the relevant upward-pointing and downward-pointing triangles of the added lozenges from $U$ and $L$, respectively. See Figure~\ref{fig:thm-tileable}(ii). Notice, $L'$ is uniquely tileable. As $T$ and $L'$ are balanced, so is $U'$. Assume $U'$ contains a monomial subregion $V'$ that is $\bigtriangledown$-heavy. Then $V' \neq U'$, and hence $V'$ fits into a triangle of side length $d-2$. Furthermore, the assumption on $T$ implies that $V'$ is not a monomial subregion of $U$. In particular, $V'$ must be located at the bottom of $U'$. Let $\tilde{V}$ be the smallest monomial subregion of $U$ that contains $V'$. It is obtained from $V'$ by adding suitable upward-pointing triangles that are parts of the added lozenges. Expand $\tilde{V}$ down one row to a monomial subregion $V$ of $T$. Thus, $V$ fits into a triangle of side length $d-1$ and is not $\bigtriangledown$-heavy. If $V$ is balanced, then, by induction, $V$ is tileable. However, we assumed $T$ contains no such non-trivial regions. Hence, $V$ is $\triangle$-heavy. Observe now that the region $V \cap L'$ is either balanced or has exactly one more upward-pointing triangle than downward-pointing triangles. Since $V'$ is obtained from $V$ by removing $V \cap L$ and some of the added lozenges, it follows that $V'$ cannot be $\bigtriangledown$-heavy, a contradiction. Therefore, we have shown that each monomial subregion of $U'$ is not $\bigtriangledown$-heavy. By induction on $d$, we conclude that $U'$ is tileable. Using the lozenges already placed, along with the tiling of $L'$, we obtain a tiling of $T$. \end{proof} \begin{remark} \label{rem:complexity} The preceding proof yields a recursive construction of a canonical tiling of the triangular region. In fact, the tiling can be seen as minimal, in the sense of Subsection~\ref{sub:nilp}. Moreover, the theorem yields an exponential (in the number of punctures) algorithm to determine the tileability of a region. Thurston~\cite{Th} gave a linear (in the number of triangles) algorithm to determine the tileability of a \emph{simply-connected region}, i.e., a region with a polygonal boundary. Thurston's algorithm also yields a minimal canonical tiling. \end{remark} \section{Signed lozenge tilings} \label{sec:signed} In Theorem~\ref{thm:tileable}, we established a tileability criterion for a triangular region. Now we want to \emph{enumerate} the lozenge tilings of a tileable triangular region $T_d(I)$. In fact, we introduce two ways for assigning a sign to a lozenge tiling here and then compare the resulting enumerations in the next section. In order to derive the (unsigned) enumeration, we consider the enumeration of perfect matchings of an associated bipartite graph. The permanent of its bi-adjacency matrix, a zero-one matrix, yields the desired enumeration. We define a first sign of a lozenge tiling in such a way that the determinant of the bi-adjacency matrix gives a \emph{signed} enumeration of the perfect matchings of the graph and hence of lozenge tilings of $T_d(I)$. We also introduce a second sign of a lozenge tiling by considering an enumeration of families of non-intersecting lattice paths on an associated finite sub-lattice inside $T_d(I)$. This is motivated by the Lindstr\"om-Gessel-Viennot theory \cite{Li}, \cite{GV}. Using the sub-lattice, we generate a matrix whose entries are binomial coefficients and whose determinant gives a signed enumeration of families of non-intersecting lattice paths inside $T_d(I)$, hence of lozenge tilings. The two signed enumerations appear to be different, but we show that they are indeed the same, up to sign, in the following section. ~\subsection{Perfect matchings}\label{sub:pm}~\par A subregion $T (G) \subset \mathcal{T}_d$ can be associated to a bipartite planar graph $G$ that is an induced subgraph of the honeycomb graph. Lozenge tilings of $T(G)$ can be then associated to perfect matchings on $G$. The connection was used by Kuperberg in~\cite{Kup}, the earliest citation known to the authors, to study symmetries on plane partitions. Note that $T(G)$ is often called the \emph{dual graph} of $G$ in the literature (e.g., \cite{Ci-1997}, \cite{Ci-2005}, and \cite{Ei}). Here we begin with a subregion $T$ and then construct a suitable graph $G$. Let $T \subset \mathcal{T}_d$ be any subregion. As above, we consider $T$ as a union of unit triangles. We associate to $T$ a bipartite graph. First, place a vertex at the center of each triangle. Let $B$ be the set of centers of the downward-pointing triangles, and let $W$ be the set of centers of the upward-pointing triangles. Consider both sets ordered by the reverse-lexicographic ordering applied to the monomial labels of the corresponding triangles (see Section~\ref{sub:ideal}). The \emph{bipartite graph associated to $T$} is the bipartite graph $G(T)$ on the vertex set $B \cup W$ that has an edge between vertices $B_i \in B$ and $W_j \in W$ if the corresponding upward- and downward-pointing triangle share are edge. In other words, edges of $G(T)$ connect vertices of adjacent triangles. See Figure~\ref{fig:build-pm}(i). \begin{figure}[!ht] \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-pm-1}\\ \emph{(i) The graph $G(T)$.} \end{minipage} \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-pm-2}\\ \emph{(ii) Selected covered edges.} \end{minipage} \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-pm-3}\\ \emph{(iii) The perfect matching.} \end{minipage} \caption{The perfect matching of the bipartite graph $G(T)$ associated to the tiling of $T$ in Figure~\ref{fig:triregion-intro}.} \label{fig:build-pm} \end{figure} Using the above ordering of the vertices, we define the \emph{bi-adjacency matrix} of $T$ as the bi-adjacency matrix $Z(T) := Z(G(T))$ of the graph $G(T)$. It is the zero-one matrix $Z(T)$ of size $\# B \times \# W$ with entries $Z(T)_{(i,j)}$ defined by \begin{equation*} Z(T)_{(i,j)} = \begin{cases} 1 & \text{if $(B_i, W_j)$ is an edge of $G(T)$ } \\ 0 & \text{otherwise.} \end{cases} \end{equation*} \begin{remark} \label{rem:Z-non-square} Note that $Z(T)$ is a square matrix if and only if the region $T$ is balanced. Observe also that the construction of $G(T)$ and $Z(T)$ do not require any restrictions on $T$. In particular, $T$ need not be balanced, and so $Z(T)$ need not be square. \end{remark} A \emph{perfect matching of a graph $G$} is a set of pairwise non-adjacent edges of $G$ such that each vertex is matched. There is a well-known bijection between lozenge tilings of a balanced subregion $T$ and perfect matchings of $G(T)$. A lozenge tiling $\tau$ is transformed in to a perfect matching $\pi$ by overlaying the triangular region $T$ on the bipartite graph $G(T)$ and selecting the edges of the graph that the lozenges of $\tau$ cover. See Figures~\ref{fig:build-pm}(ii) and~(iii) for the overlayed image and the perfect matching by itself, respectively. \begin{remark} The graph $G(T)$ is a ``honeycomb graph,'' a type of graph that has been studied, especially for its perfect matchings. \begin{enumerate} \item In particular, honeycomb graphs are investigated for their connections to physics. Honeycomb graphs model the bonds in dimers (polymers with only two structural units), and perfect matchings correspond to so-called \emph{dimer coverings}. Kenyon~\cite{Ke} gave a modern recount of explorations on dimer models, including random dimer coverings and their limiting shapes. See the recent memoir~\cite{Ci-2005} of Ciucu for further results in this direction. \item Kasteleyn~\cite{Ka} provided, in 1967, a general method for computing the number of perfect matchings of a planar graph by means of a determinant. In the following observation, we compute the number of perfect matchings on $G(T)$ by means of a permanent. \end{enumerate} \end{remark} Recall that the \emph{permanent} of an $n \times n$ matrix $M = (M_{(i, j)})$ is given by \[ \per{M} := \sum_{\sigma \in \mathfrak{S}_n} \prod_{i=1}^{n} M_{(i, \sigma(i))}. \] \begin{proposition} \label{pro:per-enum} Let $T \subset \mathcal{T}_d$ be a non-empty balanced subregion. Then the lozenge tilings of $T$ and the perfect matchings of $G(T)$ are both enumerated by $\per{Z(T)}$. \end{proposition} \begin{proof} As $T$ is balanced, $Z(T)$ is a square zero-one matrix. Each non-zero summand of $\per{Z(T)}$ corresponds to a perfect matching, as it corresponds to a bijection between the two colour classes $B$ and $W$ of $G(T)$ (determined by the downward- and upward-pointing triangles of $T$). Hence, $\per{Z(T)}$ enumerates the perfect matchings of $G(T)$, and thus the tilings of $T$. \end{proof} Recall that the \emph{determinant} of an $n \times n$ matrix $M$ is given by \[ \det{M} := \sum_{\sigma \in \mathfrak{S}_n} \sgn{\sigma} \prod_{i=1}^{n} M_{(i, \sigma(i))}, \] where $\sgn{\sigma}$ is the signature (or sign) of the permutation $\sigma$. We take the convention that the permanent and determinant of a $0 \times 0$ matrix its one. By the proof of Proposition~\ref{pro:per-enum}, each lozenge tiling $\tau$ corresponds to a perfect matching $\pi$ of $G(T)$, that is, a bijection $\pi: B \to W$. Considering $\pi$ as a permutation on $\#\triangle(T) = \#\bigtriangledown (T)$ letters, it is natural to assign a sign to each lozenge tiling using the signature of the permutation $\pi$. \begin{definition} \label{def:pm-sign} Let $T \subset \mathcal{T}_d$ be a non-empty balanced subregion. Then we define the \emph{perfect matching sign} of a lozenge tiling $\tau$ of $T$ as $\msgn{\tau} := \sgn{\pi}$, where $\pi \in \mathfrak{S}_{\#\triangle(T)}$ is the perfect matching determined by $\tau$. \end{definition} It follows that the determinant of $Z(T)$ gives an enumeration of the \emph{perfect matching signed lozenge tilings} of $T$. \begin{theorem} \label{thm:pm-matrix} Let $T \subset \mathcal{T}_d$ be a non-empty balanced subregion. Then the perfect matching signed lozenge tilings of $T$ are enumerated by $\det{Z(T)}$, that is, \[ \sum_{\tau \text{tiling of } T} \msgn{\tau} = \det Z(T). \] \end{theorem}~ \begin{example} \label{exa:Z-matrix} Consider the triangular region $T = T_6(x^3, y^4, z^5)$, as seen in the first picture of Figure~\ref{fig:three-rotations} below. Then $Z(T)$ is the $11 \times 11$ matrix \[ Z(T) = \left[ \begin{array}{ccccccccccc} 1&1&0&0&0&0&0&0&0&0&0\\ 0&1&1&0&0&0&0&0&0&0&0\\ 0&0&1&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0\\ 0&1&0&0&1&1&0&0&0&0&0\\ 0&0&1&0&0&1&1&0&0&0&0\\ 0&0&0&1&0&0&1&1&0&0&0\\ 0&0&0&0&1&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&1&1&0\\ 0&0&0&0&0&0&1&0&0&1&1\\ 0&0&0&0&0&0&0&1&0&0&1 \end{array} \right]. \] We note that $\per Z(T) = \det{Z(T)} = 10$. Thus, $T$ has exactly $10$ lozenge tilings, all of which have the same sign. We derive a theoretical explanation for this fact in the following section. \end{example}~ \subsection{Families of non-intersecting lattice paths}\label{sub:nilp}~\par We follow~\cite[Section~5]{CEKZ} (similarly,~\cite[Section~2]{Fi}) in order to associate to a subregion $T \subset \mathcal{T}_d$ a finite set $L(T)$ that can be identified with a subset of the lattice $\mathbb{Z}^2$. Abusing notation, we refer to $L(T)$ as a sub-lattice of $\mathbb{Z}^2$. We then translate lozenge tilings of $T$ into families of non-intersecting lattice paths on $L(T)$. We first construct $L(T)$ from $T$. Place a vertex at the midpoint of the edge of each triangle of $T$ that is parallel to the upper-left boundary of the triangle $\mathcal{T}_d$. These vertices form $L(T)$. We will consider paths in $L(T)$. There we think of rightward motion parallel to the bottom edge of $\mathcal{T}_d$ as ``horizontal'' and downward motion parallel to the upper-right edge of $\mathcal{T}_d$ as ``vertical'' motion. If one simply orthogonalises $L(T)$ with respect to the described ``horizontal'' and ``vertical'' motions, then we can consider $L(T)$ as a finite sub-lattice of $\mathbb{Z}^2$. As we can translate $L(T)$ in $\mathbb{Z}^2$ and not change its properties, we may assume that the vertex associated to the lower-left triangle of $\mathcal{T}_d$ is the origin. Notice that each vertex of $L(T)$ is on the upper-left edge of an upward-pointing triangle of $\mathcal{T}_d$ (even if this triangle is not present in $T$). We use the monomial label of this upward-pointing triangle to specify a vertex of $L(T)$. Under this identification the mentioned orthogonalization of $L(T)$ moves the vertex associated to the monomial $x^a y^b z^{d-1-(a+b)}$ in $L(T)$ to the point $(d-1-b, a)$ in $\mathbb{Z}^2$. We next single out special vertices of $L(T)$. We label the vertices of $L(T)$ that are only on upward-pointing triangles in $T$, from smallest to largest in the reverse-lexicographic order, as $A_1, \ldots, A_m$. Similarly, we label the vertices of $L(T)$ that are only on downward-pointing triangles in $T$, again from smallest to largest in the reverse-lexicographic order, as $E_1, \ldots, E_n$. See Figure~\ref{fig:build-nilp}(i). We note that there are an equal number of vertices $A_1, \ldots, A_m$ and $E_1, \ldots, E_n$ if and only if the region $T$ is balanced. This follows from the fact these vertices are precisely the vertices of $L(T)$ that are in exactly one unit triangle of $T$. A \emph{lattice path} in a lattice $L \subset \mathbb{Z}^2$ is a finite sequence of vertices of $L$ so that all single steps move either to the right or down. Given any vertices $A, E \in \mathbb{Z}^2$, the number of lattice paths in $\mathbb{Z}^2$ from $A$ to $E$ is a binomial coefficient. In fact, if $A$ and $E$ have coordinates $(u,v), (x,y) \in \mathbb{Z}^2$ as above, there are $\binom{x-u+v-y}{x-u}$ lattice paths from $A$ to $E$ as each path has $x-u + v-y$ steps and $x-u \geq 0$ of these must be horizontal steps. Using the above identification of $L(T)$ as a sub-lattice of $\mathbb{Z}^2$, a \emph{lattice path} in $L(T)$ is a finite sequence of vertices of $L(T)$ so that all single steps move either to the East or to the Southeast. The \emph{lattice path matrix} of $T$ is the $m \times n$ matrix $N(T)$ with entries $N(T)_{(i,j)}$ defined by \[ N(T)_{(i,j)} = \# \text{lattice paths in $\mathbb{Z}^2$ from $A_i$ to $E_j$}. \] Thus, the entries of $N(T)$ are binomial coefficients. Next we consider several lattice paths simultaneously. A \emph{family of non-intersecting lattice paths} is a finite collection of lattice paths such that no two lattice paths have any points in common. We call a family of non-intersecting lattice paths \emph{minimal} if every path takes vertical steps before it takes horizontal steps, whenever possible. That is, every time a horizontal step is followed by a vertical step, then replacing these with a vertical step followed by a horizontal step would cause paths in the family to intersect. Assume now that the subregion $T$ is balanced, so $m = n$. Let $\Lambda$ be a family of $m$ non-intersecting lattice paths in $L(T)$ from $A_1, \ldots, A_m$ to $E_1, \ldots, E_m$. Then $\Lambda$ determines a permutation $\lambda \in \mathfrak{S}_m$ such that the path in $\Lambda$ that begins at $A_i$ ends at $E_{\lambda(i)}$. Now we are ready to apply a beautiful theorem relating enumerations of signed families of non-intersecting lattice paths and determinants. In particular, we use a theorem first given by Lindstr\"om in~\cite[Lemma~1]{Li} and stated independently in~\cite[Theorem~1]{GV} by Gessel and Viennot. Stanley gives a very nice exposition of the topic in~\cite[Section~2.7]{Stanley-2011}. \begin{theorem}{\cite[Lemma~1]{Li} \& \cite[Theorem~1]{GV}} \label{thm:lgv} Assume $T \subset \mathcal{T}_d$ is a non-empty balanced subregion with identified lattice points $A_1, \ldots, A_m, E_1, \ldots, E_m \in L(T)$ as above. Then \[ \det{N(T)} = \sum_{\lambda \in \mathfrak{S}_m} \sgn(\lambda) \cdot P^+_\lambda(A\rightarrow E), \] where, for each permutation $\lambda \in \mathfrak{S}_m$, $P^+_\lambda(A \rightarrow E)$ is the number of families of non-intersecting lattice paths with paths in $L(T)$ going from $A_i$ to $E_{\lambda(i)}$. \end{theorem} We now use a well-know bijection between lozenge tilings of $T$ and families of non-intersecting lattice paths from $A_1, \ldots, A_m$ to $E_1, \ldots, E_m$; see, e.g., the survey~\cite{Pr}. Let $\tau$ be a lozenge tiling of $T$. Using the lozenges of $\tau$ as a guide, we connect each pair of vertices of $L(T)$ that occur on a single lozenge. This generates a family of non-intersecting lattice paths $\Lambda$ of $L(T)$ corresponding to $\tau$. See Figures~\ref{fig:build-nilp}(ii) and~(iii) for the overlayed image and the family of non-intersecting lattice paths by itself, respectively. \begin{figure}[!ht] \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-nilp-1}\\ \emph{(i) The sub-lattice $L(T)$.} \end{minipage} \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-nilp-2}\\ \emph{(ii) The overlayed image.} \end{minipage} \begin{minipage}[b]{0.32\linewidth} \centering \includegraphics[scale=1]{figs/build-nilp-3}\\ \emph{(iii) The family $\Lambda$.} \end{minipage} \caption{The family of non-intersecting lattice paths $\Lambda$ associated to the tiling $\tau$ in Figure~\ref{fig:triregion-intro}.} \label{fig:build-nilp} \end{figure} This bijection provides another way for assigning a sign to a lozenge tiling, this time using the signature of the permutation $\lambda$. \begin{definition} \label{def:nilp-sign} Let $T \subset \mathcal{T}_d$ be a non-empty balanced subregion as above, and let $\tau$ be a lozenge tiling of $T$. Then we define the \emph{lattice path sign} of $\tau$ as $\lpsgn{\tau} := \sgn{\lambda}$, where $\lambda \in \mathfrak{S}_m$ is the permutation such that, for each $i$, the lattice path determined by $\tau$ that starts at $A_i$ ends at $E_{\lambda (i)}$. \end{definition} It follows that the determinant of $N(T)$ gives an enumeration of the \emph{lattice path signed lozenge tilings of $T$}. \begin{theorem} \label{thm:nilp-matrix} Let $T \subset \mathcal{T}_d$ be a non-empty balanced subregion. Then the lattice path signed lozenge tilings of $T$ are enumerated by $\det{N(T)}$, that is, \[ \sum_{\tau \text{tiling of } T} \lpsgn{\tau} = \det{N(T)}. \] \end{theorem} \begin{remark} \label{rem:rotations} Notice that we can use the above construction to assign, for each subregion $T$, three (non-trivially) different lattice path matrices. The matrix $N(T)$ from Theorem~\ref{thm:nilp-matrix} is one of these matrices, and the other two are the $N(\cdot)$ matrices of the $120^{\circ}$ and $240^{\circ}$ rotations of $T$. See Figure~\ref{fig:three-rotations} for an example. \begin{figure}[!ht] \includegraphics[scale=1]{figs/three-rotations} \caption{The triangular region $T_6(x^3, y^4, z^5)$ and its rotations, along with their lattice path matrices. } \label{fig:three-rotations} \end{figure} \end{remark} \section{Resolution of punctures} \label{sec:resolution} In the previous section we associated two different signs, the perfect matching sign and the lattice path sign, to each lozenge tiling of a balanced region $T$. In the case where $T$ is a triangular region, we demonstrate in this section that the signs are equivalent, up to a scaling factor dependent only on $T$. In particular, Theorem~\ref{thm:detZN} states that $|\det{Z(T)}| = |\det{N(T)}|$. In order to prove this result, we introduce a new method that we call resolution of a puncture. Throughout this section $T$ is a tileable triangular region. In particular, $T$ is balanced. \subsection{The construction}\label{sub:rez}~\par Our first objective is to describe a construction that removes a puncture from a triangular region, relative to some tiling, in a controlled fashion. More precisely, starting from a given region with a puncture, we produce a larger triangular region without this puncture. We begin by considering the special case, in which we assume that $T \subset \mathcal{T}_d$ has at least one puncture, call it $\mathcal{P}$, that is not overlapped by any other puncture of $T$. Let $\tau$ be some lozenge tiling of $T$, and denote by $k$ the side length of $\mathcal{P}$. Informally, we will replace $T$ by a triangular region in $\mathcal{T}_{d + 2k}$, where the place of the puncture $\mathcal{P}$ of $T$ is taken by a tiled regular hexagon of side length $k$ and three corridors to the outer vertices of $\mathcal{T}_{d + 2k}$ that are all part of the new region. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/resolve-abstract-1}\\ \emph{(i) The splitting chains.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/resolve-abstract-2}\\ \emph{(ii) The resolution $T'$.} \end{minipage} \caption{The abstract resolution of a puncture.} \label{fig:resolve-abstract} \end{figure} As above, we label the vertices of $\mathcal{T}_d$ such that the label of each unit triangle is the greatest common divisor of its vertex labels. For ease of reference, we denote the lower-left, lower-right, and top vertex of the puncture $\mathcal{P}$ by $A, B$, and $C$, respectively. Similarly, we denote the lower-left, lower-right, and top vertex of $\mathcal{T}_d$ by $O, P$, and $Q$, respectively. Now we select three chains of unit edges such that each edge is either in $T$ or on the boundary of a puncture of $T$. We start by choosing chains connecting $A$ to $O$, $B$ to $P$, and $C$ to $Q$, respectively, subject to the following conditions: \begin{itemize} \item The chains do not cross, that is, do not share any vertices. \item There are no redundant edges, that is, omitting any unit edge destroys the connection between the desired end points of a chain. \item There are no moves to the East or Northeast on the lower-left chain $OA$. \item There are no moves to the West or Northwest on the lower-right chain $PB$. \item There are no moves to the Southeast or Southwest on the top chain $CQ$. \end{itemize} For these directions we envision a particle that starts at a vertex of the puncture and moves on a chain to the corresponding corner vertex of $\mathcal{T}_d$. Now we connect the chains $OA$ and $CQ$ to a chain of unit edges $OACQ$ by using the Northeast edge of $\mathcal{P}$. Similarly we connect the chains $OA$ and $BP$ to a chain $OABP$ by using the horizontal edge of $\mathcal{P}$, and we connect $PB$ and $CQ$ to the chain $PBCQ$ by using the Northwest side of $\mathcal{P}$. These three chains subdivide $\mathcal{T}_d$ into four regions. Part of the boundary of three of these regions is an edge of $\mathcal{T}_d$. The fourth region, the central one, is the area of the puncture $\mathcal{P}$. See Figure~\ref{fig:resolve-abstract}(i) for an illustration. Now consider $T \subset \mathcal{T}_d$ as embedded into $\mathcal{T}_{d+ 2k}$ such that the original region $\mathcal{T}_d$ is identified with the triangular region $T_{d+2k} (x^k y^k)$. Retain the names $A, B, C, O, P$, and $Q$ for the specified vertices of $T$ as above. We create new chains of unit edges in $\mathcal{T}_{d+ 2k}$. First, multiply each vertex in the chain $PBCQ$ by $\frac{z^k}{y^k}$ and connect the resulting vertices to a chain $P'B'C'Q'$ that is parallel to the chain $PBCQ$. Here $P', B', C'$, and $Q'$ are the images of $P, B, C$, and $Q$ under the multiplication by $\frac{z^k}{y^k}$. Informally, the chain $P'B'C'Q'$ is obtained by moving the chain $PBCQ$ just $k$ units to the East. Second, multiply each vertex in the chain $OA$ by $\frac{z^k}{x^k}$ and connect the resulting vertices to a chain $O'A'$ that is parallel to the chain $OA$. Here $A'$ and $O'$ are the points corresponding to $A$ and $O$. Informally the chain $O'A'$ is obtained by moving the chain $OA$ just $k$ units to the Southeast. Third, multiply each vertex in the chain $P'B'$ by $\frac{y^k}{x^k}$ and connect the resulting vertices to a chain $P^*B^*$ that is parallel to the chain $P'B'$, where $P^*$ and $B^*$ are the images of $P'$ and $B'$, respectively. Thus, $P^*B^*$ is $k$ units to the Southwest of the chain $P'B'$. Connecting $A'$ and $B^*$ by horizontal edges, we obtain a chain $O'A'B^*P^*$ that has the same shape as the chain $OABP$. See Figure~\ref{fig:resolve-abstract}(ii) for an illustration. We are ready to describe the desired triangular region $T' \subset \mathcal{T}_{d+2k}$ along with a tiling. Place lozenges and punctures in the region bounded by the chain $OACQ$ and the Northeast boundary of $\mathcal{T}_{d+2k}$ as in the corresponding region of $T$. Similarly place lozenges and punctures in the region bounded by the chain $P'B'C'Q'$ and the Northwest boundary of $\mathcal{T}_{d+2k}$ as in the corresponding region of $T$ that is bounded by $PBCQ$. Next, place lozenges and punctures in the region bounded by the chain $O'A'B^*P^*$ and the horizontal boundary of $\mathcal{T}_{d+2k}$ as in the exterior region of $T$ that is bounded by $OABP$. Observe that corresponding vertices of the parallel chains $BCQ$ and $B'C'Q'$ can be connected by horizontal edges. The region between two such edges that are one unit apart is uniquely tileable. This gives a lozenge tiling for the region between the two chains. Similarly, the corresponding vertices of the parallel chains $OAC$ and $O'A'C'$ can be connected by Southeast edges. Respecting these edges gives a unique lozenge tiling for the region between the chains $OAC$ and $O'A'C'$. In a similar fashion, the corresponding vertices of the parallel chains $P'B'$ and $P^*B^*$ can be connected by Southwest edges, which we use as a guide for a lozenge tiling of the region between the two chains. Finally, the rhombus with vertices $A', B^*, B'$, and $B$ admits a unique lozenge tiling. Let $\tau'$ the union of all the lozenges we placed in $\mathcal{T}_{d+2k}$, and denote by $T'$ the triangular region that is tiled by $\tau'$. Thus, $T' \subset \mathcal{T}_{d+2k}$ has a puncture of side length $k$ at each corner of $\mathcal{T}_{d+2k}$. See Figure~\ref{fig:resolve-simple} for an illustration of this. We call the region $T'$ with its tiling $\tau'$ a \emph{resolution of the puncture $\mathcal{P}$ in $T$ relative to $\tau$} or, simply, a \emph{resolution of $\mathcal{P}$}. Observe that the tiles in $\tau'$ that were not carried over from the tiling $\tau$ are in the region that is the union of the regular hexagon with vertices $A, A', B^*, B', C'$ and $C$ and the regions between the parallel chains $OA$ and $O'A'$, $CQ$ and $C'Q'$ as well as $P'B'$ and $P^*B^*$. We refer to the latter three regions as the \emph{corridors} of the resolution. Furthermore, we call the chosen chains $OA$, $PB$, and $CQ$ the \emph{splitting chains} of the resolution. The resolution blows up each splitting chain to a corridor of width $k$. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/resolve-simple-1}\\ \emph{(i) The selected lozenge and puncture edges.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/resolve-simple-2}\\ \emph{(ii) The resolution $T'$ with tiling $\tau'$.} \end{minipage} \caption{A resolution of the puncture associated to $x y^4 z^2$, given the tiling $\tau$ in Figure~\ref{fig:triregion-intro} of $T$.} \label{fig:resolve-simple} \end{figure} Finally, in order to deal with an arbitrary puncture suppose a puncture $\mathcal{P}$ in $T$ is overlapped by another puncture of $T$. Then we cannot resolve $\mathcal{P}$ using the above technique directly as it would result in a non-triangular region. Thus, we adapt the construction. Since $T$ is balanced, $\mathcal{P}$ is overlapped by exactly one puncture of $T$ (see Theorem~\ref{thm:tileable}). Let $U$ be the smallest monomial subregion of $T$ that contains both punctures. We call $U$ the \emph{minimal covering region} of the two punctures. It is is uniquely tileable, and we resolve the puncture $U$ of $T \setminus U$. Notice that the lozenges inside $U$ are lost during resolution. However, since $U$ is uniquely tileable, they are recoverable from the two punctures of $T$ in $U$. See Figure~\ref{fig:resolve-family} for an illustration. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/resolve-family-1}\\ \emph{(i) The selected lozenge and puncture edges.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=0.75]{figs/resolve-family-2}\\ \emph{(ii) The resolution $T'$ with tiling $\tau'$.} \end{minipage} \caption{Resolving overlapping punctures, given the tiling in Figure~\ref{fig:triregion-intro}.} \label{fig:resolve-family} \end{figure} \subsection{Cycles of lozenges}\label{sub:cyc}~\par We now introduce another concept. It will help us to analyze the changes when resolving a puncture. Let $\tau$ be some tiling of a triangular region $T$. An \emph{$n$-cycle (of lozenges)} $\sigma$ in $\tau$ is an ordered collection of distinct lozenges $\ell_1, \ldots, \ell_n$ of $\tau$ such that the downward-pointing triangle of $\ell_i$ is adjacent to the upward-pointing triangle of $\ell_{i+1}$ for $1 \leq i < n$ and the downward-pointing triangle of $\ell_n$ is adjacent to the upward-pointing triangle of $\ell_1$. The smallest cycle of lozenges is a three-cycle; see Figure~\ref{fig:three-cycle}. \begin{figure}[!ht] \includegraphics[scale=1]{figs/three-cycle} \caption{$T_3(x^2, y^2, z^2)$ has two tilings, both are three-cycles of lozenges.} \label{fig:three-cycle} \end{figure} Let $\sigma = \{\ell_1, \ldots, \ell_n\}$ be an $n$-cycle of lozenges in the tiling $\tau$ of $T$. If we replace the lozenges in $\sigma$ be the $n$ lozenges created by adjoining the downward-pointing triangle of $\ell_i$ with the upward-pointing triangle of $\ell_{i+1}$ for $1 \leq i < n$ and the downward-pointing triangle of $\ell_n$ with the upward-pointing triangle of $\ell_1$, then we get a new tiling $\tau'$ of $T$. We call this new tiling the \emph{twist of $\sigma$} in $\tau$. The two three-cycles in Figure~\ref{fig:three-cycle} are twists of each other. See Figure~\ref{fig:cycle-twist} for another example of twisting a cycle. A puncture is \emph{inside} the cycle $\sigma$ if the lozenges of the cycle fully surround the puncture. In Figure~\ref{fig:cycle-twist}(i), the puncture associated to $x y^4 z^2$ is inside the cycle $\sigma$ and all other punctures of $T$ are not inside the cycle $\sigma$. \begin{figure}[!ht] \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/cycle-twist-1}\\ \emph{(i) A $10$-cycle $\sigma$.} \end{minipage} \begin{minipage}[b]{0.48\linewidth} \centering \includegraphics[scale=1]{figs/cycle-twist-2}\\ \emph{(ii) The twist of $\sigma$ in $\tau$.} \end{minipage} \caption{A $10$-cycle $\sigma$ in the tiling $\tau$ (see Figure~\ref{fig:triregion-intro}(ii)) and its twist.} \label{fig:cycle-twist} \end{figure} Recall that the perfect matching sign of a tiling $\tau$ is denoted by $\msgn{\tau}$ (see Definition~\ref{def:pm-sign}). \begin{lemma} \label{lem:twist-sign} Let $\tau$ be a lozenge tiling of a triangular region $T = T_d(I)$, and let $\sigma$ be an $n$-cycle of lozenges in $\tau$. Then the twist $\tau'$ of $\sigma$ in $\tau$ satisfies $\msgn{\tau'} = (-1)^{n-1}\msgn{\tau}$. \end{lemma} \begin{proof} Let $\pi$ and $\pi'$ be the perfect matching permutations associated to $\tau$ and $\tau'$, respectively (see Definition \ref{def:pm-sign}). Without loss of generality, assume each lozenge $\ell_i$ in $\sigma$ corresponds to the upward- and downward-pointing triangles labeled $i$. As $\tau'$ is a twist of $\tau$ by $\sigma$, then $\pi'(i) = i+1$ for $1 \leq i < n$ and $\pi'(n) = 1$. That is, $\pi' = (1, 2, \ldots, n) \cdot \pi$, as permutations. Hence, $\msgn{\tau'} = (-1)^{n-1}\msgn{\tau}$. \end{proof} \subsection{Resolutions, cycles of lozenges, and signs}\label{sub:rez-cyc}~\par Now we are going to establish the equivalence of the perfect matching and the lattice path sign of a lozenge tiling. We begin by describing the modification of a cycle of lozenges when a puncture is resolved. We first need a definition. It uses the starting and end points of lattice paths $A_1,\ldots,A_m$ and $E_1,\ldots,E_m$, as introduced at the beginning of Subsection~\ref{sub:nilp}. The \emph{$E$-count} of a cycle is the number of lattice path end points $E_j$ ``inside'' the cycle. Alternatively, this can be seen as the sum of the side lengths of the non-overlapping punctures plus the sum of the side lengths of the minimal covering regions of pairs of overlapping punctures. For example, the cycles shown in Figure~\ref{fig:three-cycle} have $E$-counts of zero, the cycles shown in Figure~\ref{fig:cycle-twist} have $E$-counts of $1$, and the (unmarked) cycle going around the outer edge of the tiling shown in Figure~\ref{fig:cycle-twist}(i) has an $E$-count of $1 + 3 = 4$. Now we describe the change of a cycle surrounding a puncture when this puncture is resolved. \begin{lemma} \label{lem:cycle-res} Let $\tau$ be a lozenge tiling of $T = T_d(I)$, and let $\sigma$ be an $n$-cycle of lozenges in $\tau$. Suppose $T$ has a puncture $P$ (or a minimal covering region of a pair of overlapping punctures) with $E$-count $k$. Let $T'$ be a resolution of $P$ relative to $\tau$. Then the resolution takes $\sigma$ to an $(n+kl)$-cycle of lozenges $\sigma'$ in the resolution, where $l$ is the number of times the splitting chains of the resolution cross the cycle $\sigma$ in $\tau$. Moreover, $l$ is odd if and only if $P$ is inside $\sigma.$ \end{lemma} \begin{proof} Fix a resolution $T' \subset \mathcal{T}_{d+2k}$ of $P$ with tiling $\tau'$ as induced by $\tau$. First, note that if $P$ is a minimal covering region of a pair of overlapping punctures, then any cycle of lozenges must avoid the lozenges present in $P$ as all such lozenges are forcibly chosen, i.e., immutable. Thus, all lozenges of $\sigma$ are present in $\tau'$. The resolution takes the cycle $\sigma$ to a cycle $\sigma'$ by adding $k$ new lozenges for each unit edge of a lozenge in $\sigma$ that belongs to a splitting chain. More precisely, such an edge is expanded to $k+1$ parallel edges. Any two consecutive edges form the opposite sides of a lozenge (see Figure~\ref{fig:cycle-insert}). Thus, each time a splitting chain of the resolution crosses the cycle $\sigma$ we insert $k$ new lozenges. As $l$ is the number of times the splitting chains of the resolution cross the cycle $\sigma$ in $\tau$, the resolution adds exactly $k l$ new lozenges to the extant lozenges of $\sigma$. Thus, $\sigma'$ is an $(n + k l)$-cycle of lozenges in $\tau'$. \begin{figure}[!ht] \includegraphics[scale=1]{figs/cycle-insert} \caption{Expansion of a lozenge cycle at a crossing of a splitting chain.} \label{fig:cycle-insert} \end{figure} Since the splitting chains are going from $P$ to the boundary of the triangle $\mathcal{T}_d$, the splitting chains terminate outside the cycle. Hence if the splitting chain crosses into the cycle, it must cross back out. If $P$ is outside $\sigma$, then the splitting chains start outside $\sigma$, and so $l$ must be even. On the other hand, if $P$ is inside $\sigma$, then the splitting chains start inside of $\sigma$, and so $l = 3 + 2j$, where $j$ is the number of times the splitting chains cross \emph{into} the cycle. \end{proof} Let $\tau_1$ and $\tau_2$ be tilings of $T$, and let $\pi_1$ and $\pi_2$ be their respective perfect matching permutations. Suppose $\pi_2 = \rho \pi_1$, for some permutation $\rho$. Write $\rho$ as a product of disjoint cycles whose length is at least two. (Note that these cycles will be of length at least three.) Each factor corresponds to a cycle of lozenges of $\tau_1$. If all these cycles are twisted we get $\tau_2$. We call these lozenge cycles the \emph{difference cycles} of $\tau_1$ and $\tau_2$. Using the idea of difference cycles, we characterise when two tilings have the same perfect matching sign. \begin{corollary} \label{cor:msgn-cycle} Let $\tau$ be a lozenge tiling of $T = T_d(I)$, and let $\sigma$ be an $n$-cycle of lozenges in $\tau$. Then the following statements hold. \begin{itemize} \item The $E$-count of $\sigma$ is even if and only if $n$ is odd. \item Two lozenge tilings of $T$ have the same perfect matching sign if and only if the sum of the $E$-counts of the difference cycles is even. \end{itemize} \end{corollary} \begin{proof} Suppose $T$ has $a$ punctures and pairs of overlapping punctures, $P_1, \ldots, P_a$, inside $\sigma$ that are \emph{not} in a corner, i.e., not associated to $x^k$, $y^k$, or $z^k$, for some $k$. Let $j_i$ be the $E$-count of $P_i$. Similarly, suppose $T$ has $b$ punctures and pairs of overlapping punctures, $Q_1, \ldots, Q_b$, outside $\sigma$ that are \emph{not} in a corner, i.e., not associated to $x^k$, $y^k$, or $z^k$, for some $k$. Let $k_i$ be the $E$-count of $Q_i$. If we resolve all of the punctures $P_1, \ldots, P_a, Q_1, \ldots, Q_b$, then $\sigma$ is taken to a cycle $\sigma'$. By Lemma~\ref{lem:cycle-res}, $\sigma'$ has length \[ n' := n + (j_1 l_1 + \cdots + j_a l_a) + (k_1 m_1 + \cdots + k_b m_b), \] where the integers $l_1, \ldots, l_a$ are odd and the integers $m_1, \ldots, m_b$ are even. Denote the region obtained from $T$ by resolving its $a+b$ punctures by $T'$. After merging touching punctures, it becomes a hexagon. By \cite[Theorem~1.2]{CGJL}, every tiling of $T'$ is thus obtained from any other tiling of $T'$ through a sequence of three-cycle twists, as in Figure~\ref{fig:three-cycle}. By Lemma~\ref{lem:twist-sign}, such twists do not change the perfect matching sign of the tiling, hence $n'$ is an odd integer. Since $n'$ is odd, $n' - (k_1 m_1 + \cdots + k_b m_b) = n + (j_1 l_1 + \cdots + j_a l_a)$ is also odd. Thus, $n$ is odd if and only if $j_1 l_1 + \cdots + j_a l_a$ is even. Since the integers $l_1, \ldots, l_a$ are odd, we see that $j_1 l_1 + \cdots + j_a l_a$ is even if and only if an even number of the $l_i$ are odd, i.e., the sum $l_1 + \cdots + l_a$ is even. Notice that this sum is the $E$-count of $\sigma$. Thus, claim (i) follows. Suppose two tilings $\tau_1$ and $\tau_2$ of $T$ have difference cycles $\sigma_1, \ldots, \sigma_p$. Then by Lemma~\ref{lem:twist-sign}, $\msgn{\tau_2} = \sgn{\sigma_1} \cdots \sgn{\sigma_p} \msgn{\tau_1}$. By claim (i), $\sigma_i$ is a cycle of odd length if and only if the $E$-count of $\sigma_i$ is even. Thus, $\sgn{\sigma_1} \cdots \sgn{\sigma_p} = 1$ if and only if an even number of the $\sigma_i$ have an odd $E$-count. An even number of the $\sigma_i$ have an odd $E$-count if and only if the sum of the $E$-counts of $\sigma_1, \ldots, \sigma_p$ is even. Hence, claim (ii) follows. \end{proof} Next, we describe the change of a lattice path permutation when twisting a cycle of lozenges. To this end we single out certain punctures. We recursively define a puncture of $T \subset \mathcal{T}_d$ to be a \emph{non-floating} puncture if it touches the boundary of $ \mathcal{T}_d$ or if it overlaps or touches a non-floating puncture of $T$. Otherwise we call a puncture a \emph{floating} puncture. We also distinguish between \emph{preferred} and \emph{acceptable directions} on the splitting chains used for resolving a puncture. Here we use again the perspective of a particle that starts at a vertex of the puncture and moves on a chain to the corresponding corner vertex of $\mathcal{T}_d$. Our convention is: \begin{itemize} \item On the lower-left chain the preferred direction are Southwest and West, the acceptable directions are Northwest and Southeast. \item On the lower-right chain the preferred directions are Southeast and East, the acceptable directions are Northeast and Southwest. \item On the top chain the preferred directions are Northeast and Northwest, the acceptable directions are East and West. \end{itemize} \begin{lemma} \label{lem:lpsgn-cycle} Let $\tau$ be a lozenge tiling of $T = T_d(I)$, and let $\sigma$ be a cycle of lozenges in $\tau$. Then the lattice path signs of $\tau$ and the twist of $\sigma$ in $\tau$ are the same if and only if the $E$-count of $\sigma$ is even. \end{lemma} \begin{proof} Suppose $T$ has $n$ floating punctures. We proceed by induction on $n$ in five steps. \emph{Step $1$: The base case.} If $n = 0$, then every tiling of $T$ induces the same bijection $\{A_1,\ldots,A_m\} \to \{E_1,\ldots,E_m\}$. Thus, all tilings have the same lattice path sign. Since $T$ has no floating punctures, $\sigma$ has an $E$-count of zero. Hence, the claim is true if $n=0$. \emph{Step $2$: The set-up.} Suppose now that $n > 0$, and choose $P$ among the floating punctures and the minimal covering regions of two overlapping floating punctures of $T$ as the one that covers the upward-pointing unit triangle of $\mathcal{T}_d$ with the smallest monomial label. Let $s > 0$ be the side length of $P$, and let $k$ be the $E$-count of $\sigma$. Furthermore, let $\upsilon$ be the lozenge tiling of $T$ obtained as twist of $\sigma$ in $\tau$. Both, $\tau$ and $\upsilon$, induce bijections $\{A_1,\ldots,A_m\} \to \{E_1,\ldots,E_m\}$, and we denote by $\lambda \in \mathfrak{S}_m$ and $\mu \in \mathfrak{S}_m$ the corresponding lattice path permutations, respectively. We have to show $\lpsgn \tau = (-1)^k \lpsgn \upsilon$, that is, \[ \sgn \lambda = (-1)^k \sgn \mu. \] \emph{Step $3$: Resolutions.} We resolve $P$ relative to the tilings $\tau$ and $\upsilon$, respectively. For the resolution of $P$ relative to $\tau$, choose the splitting chains so that each unit edge has a preferred direction, except possibly the unit edges on the boundary of a puncture of $T$; this is always possible. By our choice of $P$, no other floating punctures are to the lower-right of $P$. It follows that no edge on the lower-right chain crosses a lattice path, except possibly at the end of the lattice path. For the resolution of $P$ relative to $\upsilon$, use the splitting chains described in the previous paragraph, except for the edges that cross the lozenge cycle $\sigma$. They have to be adjusted since these unit edges disappear when twisting $\sigma$. We replace each such unit edge by a unit edge in an acceptable direction followed by a unit edge in a preferred direction so that the result has the same starting and end point as the unit edge they replace. Note that this is always possible and that this determines the replacement uniquely. The new chains meet the requirements on splitting chains. Using these splitting chains we resolve the puncture $P$ relative to $\tau$ and $\upsilon$, respectively. The result is a triangular region $T' \subset \mathcal{T}_{d+2s}$ with induced tilings $\tau'$ and $\upsilon'$, respectively. Denote by $\sigma'$ the extension of the cycle $\sigma$ in $T'$ (see Lemma~\ref{lem:cycle-res}). Since $\tau$ and $\upsilon$ differ exactly on the cycle $\sigma$ and the splitting chains were chosen to be the same except on $\sigma$, it follows that twisting $\sigma'$ in $\tau'$ results in the tiling $\upsilon'$ of $T'$. \begin{figure}[!ht] \includegraphics[scale=1]{figs/res-comm-diag} \caption{The commutative diagram used in the proof of Lemma~\ref{lem:lpsgn-cycle}.} \label{fig:res-comm-diag} \end{figure} \emph{Step $4$: Lattice path permutations.} Now we compare the signs of $\lambda, \mu \in \mathfrak{S}_m$ with the signs of $\lambda'$ and $\mu'$, the lattice path permutations induced by the tilings $\tau'$ and $\upsilon'$ of $T'$, respectively. First, we compare the starting and end points of lattice paths in $T$ and $T'$. Resolution of the puncture identifies each starting and end point in $T$ with one such point in $T'$. We refer to these points as the \emph{old} starting and end points in $T'$. Note that the end points on the puncture $P$ correspond to the end points on the puncture in the Southeast corner of $T'$. The starting points in $T$ that are on one of the splitting chains used for resolving $P$ relative to $\tau$ and $\upsilon$ are the same. Assume there are $t$ such points. After resolution, each point gives rise to a new starting and end point in $T'$. Both are connected by a lattice path that is the same in both resolutions of $P$. Hence, in order to compare the signs of the permutations $\lambda'$ and $\mu'$ on $m+t$ letters, it is enough to compare the lattice paths between the old starting and end points in both resolutions. Retain for these points the original labels used in $T$. Using this labeling, the lattice paths induce permutations $\tilde{\lambda}$ and $\tilde{\mu}$ on $m$ letters. Again, this is the same process in both resolutions. It follows that \begin{equation}\label{eq:compare-res-signs} \sgn (\tilde{\lambda}) \cdot \lpsgn (\tau') = \sgn (\tilde{\mu}) \cdot \lpsgn (\upsilon'). \end{equation} Assume now that $P$ is a puncture. Then the end points on $P$ are indexed by $s$ consecutive integers. Since we retain the labels, the same indices label the end points on the puncture in the Southeast corner of $T'$. The end points on $P$ correspond to the points in $T'$ whose labels are obtained by multiplying by $x^s y^s$. Consider now the case, where all edges in the lower-right splitting chain in $T$ are in preferred directions. Then the lattice paths induced by $\tau'$ connect each point in $T'$ that corresponds to an end point on $P$ to the end point in the Southeast corner of $T'$ with the same index. Thus, $\sgn (\lambda) = \sgn (\tilde{\lambda})$. Next, assume that there is exactly one edge in acceptable direction on the lower-right splitting chain of $T$. If this direction is Northeast, then the $s$ lattice paths passing through the points in $T'$ corresponding to the end points on $P$ are moved one unit to the North. If the acceptable direction was Southwest, then the edge in this direction leads to a shift of these paths by one unit to the South. In either case, this shift means that the paths in $T$ and $T'$ connect to end points that differ by $s$ transpositions, so $\sgn (\tilde{\lambda})= (-1)^{s} \sgn (\lambda)$. More generally, if $j$ is the number of unit edges on the lower-right splitting chain of $T$ that are in acceptable directions, then \[ \sgn (\tilde{\lambda}) = (-1)^{js} \sgn (\lambda). \] Next, denote by $c$ the number of unit edges on the lower-right splitting chain that have to be adjusted when twisting $\sigma$. Since each of these edges is replaced by an edge in a preferred and one edge in an acceptable direction, after twisting the lower-right splitting chain in $T$ has exactly $j+c$ unit edges in acceptable directions. It follows as above that \[ \sgn (\tilde{\mu}) = (-1)^{(j+c)s} \sgn (\mu). \] Since a unit edge on the splitting chain has to be adjusted when twisting if and only if it is shared by two consecutive lozenges in the cycle $\sigma$, the number $c$ is even if and only if the puncture $P$ is outside $\sigma$. Moreover, as the puncture $P$ has been resolved in $T'$, we conclude by induction that $\tau'$ and $\upsilon'$ have the same lattice path sign if and only if the $E$-count of $\sigma'$ is even. Thus, we get \begin{equation} \lpsgn (\upsilon') = \begin{cases} (-1)^{k-s} \lpsgn (\tau') & \text{if $P$ is inside $\sigma$}, \\ (-1)^{k} \lpsgn (\tau') & \text{if $P$ is outside $\sigma$}. \end{cases} \end{equation} \emph{Step $5$: Bringing it all together.} We consider the two cases separately: \begin{enumerate} \item Suppose $P$ is inside $\sigma$. Then $c$ is odd. Hence, the above considerations imply \begin{equation*} \begin{split} \sgn (\lambda) & = (-1)^{js}\sgn (\tilde{\lambda}) = (-1)^{js + k-s}\sgn (\tilde{\mu}) \\ & = (-1)^{js + k-s + (j+c) s} \sgn ({\mu}) \\ & = (-1)^k \sgn ({\mu}), \end{split} \end{equation*} as desired. \item Suppose $P$ is outside of $\sigma$. Then $c$ is even, and we conclude \begin{equation*} \begin{split} \sgn (\lambda) & = (-1)^{js}\sgn (\tilde{\lambda}) = (-1)^{js + k}\sgn (\tilde{\mu}) \\ & = (-1)^{js + k-s + (j+c) s} \sgn ({\mu}) \\ & = (-1)^k \sgn ({\mu}). \end{split} \end{equation*} \end{enumerate} Finally, it remains to consider the case where $P$ is the minimal covering region of two overlapping punctures of $T$. Let $\hat{T}$ be the triangular region that differs from $T$ only by having $P$ as a puncture, and let $\hat{\tau}$ and $\hat{\upsilon}$ be the tilings of $\hat{T}$ induced by $\tau$ and $\upsilon$, respectively. Since we order the end points of lattice paths using monomial labels, it is possible that the indices of the end points on the Northeast boundary of $P$ in $\tilde{T}$ differ from those of the points on the Northeast boundary of the overlapping punctures in $T$. However, the lattice paths induced by $\tau$ and $\upsilon$ connecting the points on the Northeast boundary of $P$ to the points on the Northeast boundary of the overlapping punctures are the same. Hence the lattice paths sign of $\tau$ and $\hat{\tau}$ differ in the same ways as the signs of $\upsilon$ and $\hat{\upsilon}$. Since we have shown our assertion for $\hat{\tau}$ and $\hat{\upsilon}$, it also follows for $\tau$ and $\upsilon$. \end{proof} Using difference cycles, we now characterise when two tilings of a region have the same lattice path sign. \begin{corollary} \label{cor:lpsgn-cycle} Let $T = T_d(I)$ be a non-empty, balanced triangular region. Then two tilings of $T$ have the same lattice path sign if and only if the sum of the $E$-counts (which may count some end points $E_j$ multiple times) of the difference cycles is even. \end{corollary} \begin{proof} Suppose two tilings $\tau_1$ and $\tau_2$ of $T$ have difference cycles $\sigma_1, \ldots, \sigma_p$. By Lemma~\ref{lem:lpsgn-cycle}, $\lpsgn{\tau_1} = \lpsgn{\tau_2}$ if and only if an even number of the $\sigma_i$ have an odd $E$-count. The latter is equivalent to the sum of the $E$-counts of $\sigma_1, \ldots, \sigma_p$ being even. \end{proof} Our above results imply that the two signs that we assigned to a given lozenge tiling, the perfect matching sign (see Definition~\ref{def:pm-sign}) and the lattice path sign (see Definition~\ref{def:nilp-sign}), are the same up to a scaling factor depending only on $T$. The main result of this section follows now easily. \begin{theorem} \label{thm:detZN} Let $T = T_d(I)$ be a balanced triangular region. The following statements hold. \begin{enumerate} \item Let $\tau$ and $\tau'$ be two lozenge tilings of $T$. Then their perfect matching signs are the same if and only if their lattice path signs are the same, that is, \[ \msgn (\tau) \cdot \lpsgn (\tau) = \msgn (\tau') \cdot \lpsgn (\tau'). \] \item In particular, we have that \[ |\det{Z(T)}| = |\det{N(T)}|. \] \end{enumerate} \end{theorem} \begin{proof} Consider two lozenge tilings of $T$. According to Corollaries~\ref{cor:msgn-cycle} and~\ref{cor:lpsgn-cycle}, they have the same perfect matching and the same lattice path signs if and only if the sum of the $E$-counts of the difference cycles is even. Hence using Theorems~\ref{thm:pm-matrix} and~\ref{thm:nilp-matrix}, it follows that $|\det{Z(T)}| = |\det{N(T)}|$. \end{proof} Theorem \ref{thm:detZN} allows us to move freely between the points of view using lozenge tilings, perfect matchings, and families of non-intersecting lattice paths, as needed. In particular, it implies that rotating a triangular region by $120^{\circ}$ or $240^{\circ}$ does not change the enumerations. Thus, for example, the three matrices described in Remark~\ref{rem:rotations} as well as the matrix given in Example~\ref{exa:Z-matrix} all have the same determinant, up to sign. \subsection{A single sign}\label{sub:singlesign}~\par We exhibit triangular regions such that all lozenge tilings have the same sign, that is, the signed and the unsigned enumerations are the same. This is guaranteed to happen if all floating punctures (see the definition preceding Lemma~\ref{lem:lpsgn-cycle}) have an even side length. \begin{corollary} \label{cor:same-sign} Let $T$ be a tileable triangular region, and suppose all floating punctures of $T$ have an even side length. Then every lozenge tiling of $T$ has the same perfect matching sign as well as the same lattice path sign, and so $\per{Z(T)} = |\det{Z(T)}|$. In particular, simply-connected regions that are tileable have this property. \end{corollary} \begin{proof} The equality of the perfect matching signs follows from Corollary~\ref{cor:msgn-cycle}, and the equality of the lattice path signs from Corollary~\ref{cor:lpsgn-cycle}. Now Theorem~\ref{thm:pm-matrix} implies $\per{Z(T)} = |\det{Z(T)}|$. The second part is immediate as simply-connected regions have no floating punctures. \end{proof} \begin{remark} The above corollary vastly extends \cite[Theorem~1.2]{CGJL}, where hexagons are considered, using a different approach. This special case was also established independently in \cite[Section~3.4]{Ke}, with essentially the same proof as~\cite{CGJL}. Corollary~\ref{cor:same-sign} can also be derived from Kasteleyn's theorem on enumerating perfect matchings~\cite{Ka}. To see this, notice that in the case, where all floating punctures have even side lengths, all ``faces'' of the bipartite graph $G(T)$ have size congruent to $2 \pmod{4}$. \end{remark} We now extend Corollary~\ref{cor:same-sign}. To this end we define the \emph{shadow} of a puncture to be the region of $T$ that is both below the puncture and to the right of the line extending from the upper-right edge of the puncture. See Figure~\ref{fig:puncture-shadow}. \begin{figure}[!ht] \includegraphics[scale=1]{figs/puncture-shadow} \caption{The puncture $P$ has the puncture $Q$ in its shadow (light grey), but $Q$ does not have a puncture in its shadow (dark grey).} \label{fig:puncture-shadow} \end{figure} \begin{corollary}\label{cor:same-sign-shadow} Let $T = T_d(I)$ be a balanced triangular region. If all floating punctures (and minimal covering regions of overlapping punctures) with other punctures in their shadows have even side length, then any two lozenge tilings of $T$ have the same perfect matching and the same lattice path sign. Thus, $\per{Z(T)} = |\det{Z(T)}|$. \end{corollary} \begin{proof} Let $P$ be a floating puncture or a minimal covering region with no punctures in its shadow. Then the shadow of $P$ is uniquely tileable, and thus the lozenges in the shadow are fixed in each lozenge tiling of $T$. Hence, no cycle of lozenges in any tiling of $T$ can contain $P$. Using Corollary~\ref{cor:msgn-cycle} and Corollary~\ref{cor:lpsgn-cycle}, we see that $P$ does not affect the sign of the tilings of $T$. Now our assumptions imply that all floating punctures (or minimal covering regions of overlapping punctures) of $T$ that can be contained in a difference cycle of two lozenge tilings of $T$ have even side length. Thus, we conclude $\per{Z(T)} = |\det{Z(T)}|$ as in the proof of Corollary~\ref{cor:same-sign}. \end{proof}
1,116,691,501,405
arxiv
\section{Introduction} \IEEEPARstart{A}{ccess} control systems are critical components of information systems that help protect information resources from unauthorized accesses. Various access control models and approaches have been proposed in the literature including Discretionary Access Control (DAC) \cite{sandhu1994access} \cite{harrison1976protection}, Mandatory Access Control (MAC) \cite{bell1973secure} \cite{sandhu1993lattice}, and Role-Based Access Control (RBAC) \cite{sandhu1996role}. However, with the rapid advances in newer computing and information technologies (e.g., social networks, Internet of Things (IoT), cloud/edge computing, etc.), existing access control (AC) approaches have become inadequate in providing flexible and expressive authorization services \cite{fong2011relationship}. For example, a health care environment requires a more expressive AC model that meets the needs of patients, health care providers as well as other stakeholders in the health care ecosystem \cite{jin2009patient, karimi2017multi}. \emph{Attribute Based Access Control} (ABAC) models present a promising approach that addresses newer challenges in emerging applications \cite{hu2013guide}. An ABAC approach grants access rights to users based on attributes of entities in the system (i.e., user attributes, object attributes, and environmental conditions) and a set of authorization rules. Although organizations and developers are interested in employing the next generation AC models, adopting such policy frameworks poses a significant challenge. Many large organizations need to grant authorization to their vast user populations distributed across disparate computing environments, including legacy systems. Each of these computing environments may have its own AC model. The manual development of a single policy for the entire organization is tedious and error-prone. \emph{Policy Mining} techniques have been proposed in the literature to address such challenges to help organizations cut the cost, time, and error of policy development/management. Policy mining algorithms ease the migration to more recent/appropriate authorization models by completely (or partially) automating the process of constructing AC policies. Policy mining techniques were first introduced for developing RBAC policies. Kuhlmann \textit{et al.} coined the term ``role mining" to refer to a data mining approach that constructs roles from a given permission assignment dataset \cite{kuhlmann2003role}; this work was followed by various role mining techniques, such as \cite{schlegelmilch2005role, molloy2008mining, xu2012algorithms}. Although the proposed approaches are beneficial in developing optimal sets of roles, they are not applicable in extracting ABAC policies. Xu and Stoller were the first to study the problem of mining ABAC policies from given access control matrices or logs \cite{xu2014mining, xu2015mining}. Following that, several researchers have investigated various ABAC policy mining techniques \cite{medvet2015evolutionary, iyer2018mining, cotrini2018mining}. However, these studies suffer from several limitations, as follows: \begin{itemize} \item First, the existing approaches do not support mining authorization rules with negative filters. An ABAC policy rule can be comprised of a set of positive and negative filters. Negative filters are useful in scenarios when an exception needs to be expressed. For example, a healthcare provider can express the following rule using a negative attribute filter: ``\textit{A nurse can read a patient's record except for payment purposes}." Using negative filters in rule expressions results in a more concise authorization policy (Section \ref{evaluation}). \item Second, some proposed approaches such as in \cite{xu2014mining, xu2015mining, iyer2018mining} are unable to mine a high-quality policy when the given access log is not complete in the sense that every possible combination of attribute values is not included in the access log (Section \ref{problem}). \item Third, the proposed approaches are unable to mine a policy from noisy access logs containing over-assignments and under-assignments \cite{medvet2015evolutionary, cotrini2018mining}. Having noisy access records is a common problem in evolving domains such as IoT or social networks \cite{marinescu2017ivd}. It is essential that an ABAC policy miner should be capable of handling a reasonable amount of noise to be applicable to real-world applications. \item Last but not the least, the existing approaches do not include techniques for improving the mined policy after the first round of policy extraction. In addition, in scenarios where the authorization policies may change over time (such as in social networks with addition and removal of various applications), these approaches do not provide any guidelines for adjusting the policy. This makes practical deployment of these approaches very difficult. \end{itemize} Furthermore, none of the existing work addresses these issues in an integrated way. In this paper, we propose a machine learning based ABAC policy mining approach to address these challenges. To summarize, the primary contributions of this paper are as follows: \begin{enumerate} \item We propose an unsupervised learning based approach to extract ABAC policy rules that contain both positive and negative attribute filters as well as positive and negative relation conditions. \item The proposed policy mining approach is effective even with an incomplete set of access logs and in presence of noise. \item As part of the unsupervised learning based approach, we propose the rule pruning and policy refinement algorithms to enhance the quality of the mined policy and to ease its maintenance. \item We propose a \emph{policy quality metric} based on policy correctness and conciseness to be able to compare different sets of mined policy rules and to select the best one based on some given criteria. \item We implement a prototype of the proposed model and evaluate it using various ABAC policies to show its efficiency and effectiveness. \end{enumerate} To the best of our knowledge, our proposed approach is the first unsupervised learning based ABAC policy mining method that can be used to extract ABAC policies with both positive and negative attribute and relationship filters. The rest of the paper is organized as follows. In Section \ref{preliminaries}, we overview the ABAC model and its policy language as well as the unsupervised learning algorithm. In Section \ref{problem}, we define the ABAC policy extraction problem, discuss the related challenges, and introduce the metrics for evaluating the extracted policy. In Section \ref{proposed}, we present the proposed ABAC policy extraction approach. In Section \ref{evaluation}, we present the evaluation of the proposed approach on various sets of policies. We present the related work in Section \ref{relatedwork} and the conclusions and future work in Section \ref{conclusion}. \section{Preliminaries}\label{preliminaries} In this section, we overview ABAC, the ABAC policy language, and the unsupervised learning algorithm. \subsection{ABAC Model} In 2013, NIST published a ``\textit{Guide to ABAC Definition and Consideration}" \cite{hu2013guide}, according to which, ``\textit{the ABAC engine can make an access control decision based on the assigned attributes of the requester, the assigned attributes of the object, environment conditions, and a set of policies that are specified in terms of those attributes and conditions}.'' Throughout the paper, we use \emph{user attributes}, \emph{object attributes}, and \emph{session attributes} to refer to the attributes of the requester, attributes of the object, and the environmental attributes/conditions, respectively. Accordingly, $U$, $O$, $S$, $OP$ are sets of users, objects, sessions, and operations in a system and user attributes ($A_u$), object attributes ($A_o$), and session attributes ($A_s$) are mappings of subject attributes, object attributes, and environmental attributes as defined in the NIST Guide \cite{hu2013guide}. $E = U \cup O \cup S$ and $A = A_u \cup A_o \cup A_s$ are the sets of all entities and all attributes in the system, respectively. \begin{definition} (\textbf{Attribute Range}). Given an attribute $a \in A$, the \emph{attribute range} $V_a$ is the set of all valid values for $a$ in the system. \end{definition} \begin{definition} (\textbf{Attribute Function}). Given an entity $e \in E$, an \emph{attribute function} $f_{a\_e}$ is a function that maps an entity to a specific value from the attribute range. Specifically, $f_{a\_e}(e, a)$ returns the value of attribute $a$ for entity $e$. \end{definition} \begin{example}\label{ex1} $f_{a\_e}(John, position) = \mathit{faculty}$ indicates that the value of attribute \emph{position} for user \emph{John} is \emph{faculty}. \end{example} \begin{example}\label{ex2} $f_{a\_e}(dep1, crs) = \{cs101, cs601, cs602\}$ indicates that the value of attribute \emph{crs} for object \emph{dep1} is a set $\{cs101, cs601, cs602\}$. \end{example} Each attribute in the system can be a single-valued (atomic) or multi-valued (set). In Example \ref{ex1} \emph{position} is a single-valued attribute while \emph{crs} is a multi-valued attribute in Example \ref{ex2}. For simplicity, we consider only atomic attributes in this work. Actually, the process of extracting ABAC policy with multi-valued attributes is exactly the same as that with atomic attributes, however, we need to pre-process data to convert each multi-valued attribute to a set of atomic attributes. This can be done using various techniques such as defining dummy variables \cite{suits1957use}, 1-of-$K$ scheme \cite{bishop2006pattern}, etc. At the end of the process and when policy rules are extracted, we need one more step to convert back atomic attribute filters to the corresponding multi-valued attribute filters. Attribute filters are used to denote the sets of users, objects, and sessions to which an authorization rule applies. \begin{definition} (\textbf{Attribute Filter}). An \emph{attribute filter} is defined as a set of tuples $\mathcal{F} = \{ \langle a, v | !v \rangle | \: a \in A$ and $v \in V_a \}$. Here $\langle a, v \rangle $ is a positive attribute filter tuple that indicates $a$ has value $v$, and $ \langle a, !v \rangle $ is a negative attribute filter tuple that indicates $a$ has any value in its range except $v$. \end{definition} \begin{example}\label{ex3} Tuple $\langle label, !top\text{-}secret \rangle $ points to all entities in the system that do not have ``\emph{top-secret}" as their security label ``\emph{label}". \end{example} \begin{definition} (\textbf{Attribute Filter Satisfaction}). An entity $e \in E$ satisfies an attribute filter $\mathcal{F}$, denoted as $e \models \mathcal{F}$, iff \begin{equation*} \begin{gathered} \forall \langle {a}_i, {v}_i\rangle \: \in \mathcal{F} : f_{a\_e}(e, a_i) = {v}_i \: \land \\ \forall \langle {a}_i, !{v}_i\rangle \: \in \mathcal{F} : f_{a\_e}(e, a_i) \neq {v}_i. \end{gathered} \end{equation*} \end{definition} \begin{example} Suppose $A_u = \{dept, position, courses\}$. The set of tuples $\mathcal{F_U} = \{\langle dept, CS\rangle, \langle position, \allowbreak grad\rangle\}$ denotes a user attribute filter. Here, the graduate students in the CS department satisfy $\mathcal{F_U}$. \end{example} \begin{definition} (\textbf{Relation Condition}). A \emph{relation condition} is defined as a set of tuples $\mathcal{R} = \{\langle a, b | !b\rangle | \: a, b \in A \land \: a \neq b \}$. Here $\langle a, b \rangle $ is a positive relation condition tuple that indicates $a$ and $b$ have the same values, and $ \langle a, !b \rangle $ is a negative relation condition tuple that indicates $a$ and $b$ do not have the same values. \end{definition} A relation is used in a rule to denote the equality condition between two attributes of users, objects, or sessions. Note that the two attributes in the relation condition must have the same range. \begin{definition} (\textbf{Relation Condition Satisfaction}). An entity $e \in E$ satisfies a relation condition $\mathcal{R}$, denoted as $e \models \mathcal{R}$, iff \begin{equation*} \begin{gathered} \forall \langle{a}_i, {b}_i\rangle \: \in \mathcal{R} : f_{a\_e}(e, a_i) = f_{a\_e}(e, b_i) \: \\ \forall \langle{a}_i, !{b}_i\rangle \: \in \mathcal{R} : f_{a\_e}(e, a_i) \neq f_{a\_e}(e, b_i). \end{gathered} \end{equation*} \end{definition} \begin{definition} (\textbf{Access Request}). An \emph{access request} is a tuple $q = \langle u, o, s, op\rangle$ where user $u \in U$ sends a request to the system to perform operation $op \in OP$ on object $o \in O$ in session $s \in S$. \end{definition} \begin{definition} (\textbf{Authorization Tuple/Access Log}). An \emph{authorization tuple} is a tuple $t = \langle q, d\rangle$ containing decision $d$ made by the access control system for request $q$. An \emph{Access Log} $\mathcal{L}$ is a set of such tuples. \end{definition} The decision $d$ of an authorization tuple can be \emph{permit} or \emph{deny}. The tuple with \emph{permit} decision means that user $u$ can perform an operation $op$ on an object $o$ in session $s$. The authorization tuple with \emph{deny} decision means that user $u$ cannot perform operation $op$ on object $o$ in session $s$. Access log is a union of \emph{Positive Access Log}, $\mathcal{L^+}$, and \emph{Negative Access Log}, $\mathcal{L^-}$, where: $$\mathcal{L^+} = \{\langle q, d\rangle | \langle q, d\rangle \: \in \mathcal{L} \land d = permit \},$$ and $$\mathcal{L^-} = \{\langle q, d\rangle | \langle q, d\rangle \: \in \mathcal{L} \land d = deny \}.$$ \begin{definition} (\textbf{ABAC Rule}). An \emph{access rule} $\rho$ is a tuple $ \langle \mathcal{F}, \mathcal{R}, op | !op\rangle $, where $\mathcal{F}$ is an attribute filter, $\mathcal{R}$ is a relation condition, and $op$ is an operation. $!op$ is a negated operation that indicates the operation can have any value except $op$. \end{definition} \begin{example} Consider rule ${\rho}_1 = \langle \{\langle position,student\rangle, \: \: \allowbreak \langle location, campus\rangle, \langle type, article\rangle\}, \{\langle dept_u, dept_o \rangle \},\: \: \allowbreak read\rangle$. It can be interpreted as ``\textit{A student can read an article if he/she is on campus and his/her department matches the department of the article}". \end{example} \begin{definition} (\textbf{Rule Satisfaction}) An access request $q = \langle u, o, s, op \rangle $ is said to satisfy a rule $\rho$, denoted as $q \models \rho$, iff \begin{equation*} \langle u, o, s \rangle \models \mathcal{F} \land \langle u, o, s \rangle \models \mathcal{R} \land op_{q} = op_{\rho}. \end{equation*} \end{definition} \begin{definition} (\textbf{ABAC Policy}). An ABAC policy is a tuple $\pi = \langle E, OP, A, f_{a\_e}, \mathcal{P} \rangle $ where $E$, $OP$, $A$, and $\mathcal{P}$ are sets of entities, operations, attributes, and ABAC rules in the system and $f_{a\_e}$ is the attribute function. \end{definition} \begin{definition} (\textbf{ABAC Policy Decision}). The decision of an ABAC policy $\pi$ for an access request $q$ denoted as $d_\pi(q)$ is \emph{permit} iff: $$\exists \rho \in \pi : q \models \rho $$ otherwise, the decision is \emph{deny}. \end{definition} If an access request satisfies a rule of the access control policy, then the decision of the system for such access request is \textit{permit}. If the access request does not satisfy any rule in the access control policy then the decision of the system for such access request is \textit{deny}. TABLE \ref{tab:notations} summarizes the notations used in this paper. \begin{table*} \centering \caption{Notations} \label{tab:notations} \begin{tabular}{cl} \toprule Notation & Definition \\ \midrule $U$,$O$, $S$, $OP$ & Sets of users, objects, sessions, and operations \\ $A_u$, $A_o$, and $A_s$ & Sets of user attributes, object attributes, and session attributes \\ $E = U \cup O \cup S$ & Set of all entities \\ $A = A_u \cup A_o \cup A_s$ & Set of all attributes \\ $V_a$ & Attribute Range: set of all valid values for $a \in A$ \\ $f_{a\_e}(e, a)$ & Attribute Function: a function that maps an entity $e \in E$ to a value from $V_a$ \\ $\mathcal{F} = \{ \langle a, v | !v \rangle | \: a \in A \land v \in V_a \}$ & Attribute Filter \\ $\mathcal{R} = \{\langle a, b\rangle | \: a, b \in A \land \: a \neq b \land V_a = V_b\}$ & Relation Condition \\ $q = \langle u, o, s, op\rangle$ & Access Request \\ $t = \langle q, d\rangle$ & Authorization Tuple, showing decision $d$ made by the system for request $q$ \\ $\mathcal{L}$ & Access Log, set of authorization tuples \\ $\mathcal{L^+} = \{\langle q, d\rangle | \langle q, d\rangle \: \in \mathcal{L} \land d = permit \}$ & Positive Access Log \\ $\mathcal{L^-} = \{\langle q, d\rangle | \langle q, d\rangle \: \in \mathcal{L} \land d = deny \}$ & Negative Access Log \\ $\rho = \langle \mathcal{F}, \mathcal{R}, op|!op\rangle$ & ABAC Rule \\ $\mathcal{P}$ & Set of all policy rules \\ $\pi = \langle E, OP, A, f_{a\_e}, \mathcal{P} \rangle$ & ABAC Policy \\ $d_\pi(q)$ & The decision of an ABAC policy $\pi$ for an access request $q$ \\ $TP_{\pi|\mathcal{L}}$, $FP_{\pi|\mathcal{L}}$, $TN_{\pi|\mathcal{L}}$, and $FN_{\pi|\mathcal{L}}$ & Relative True Positive, False Positive, True Negative, and False Negative Rates \\ $ACC_{\pi|\mathcal{L}}$ & Relative Accuracy Rate \\ $F{\text -}score_{\pi|\mathcal{L}}$ & Relative F-score \\ $WSC(\pi)$ & Weighted Structural Complexity of policy $\pi$ \\ $\mathcal{Q}_{\pi}$ & Policy Quality Metric\\ \bottomrule \end{tabular} \end{table*} \subsection{Unsupervised Learning Algorithm} Unsupervised learning algorithms try to infer a function that describes the structure of unlabeled data. They are useful when no or very few labeled data is available. We leverage such methods for extracting ABAC policies from access logs. In particular, given a set of authorization tuples, we employ an unsupervised learning approach to mine and extract an \emph{ABAC policy} that has high quality. An unsupervised learning approach is suitable because there is no labeled data available for desired ABAC rules. ABAC policy extraction, in this case, can be considered as a mapping between authorization tuples to a set of clusters that are representative of the desired ABAC rules. Such a mapping can be expressed as a function, $h : \mathcal{X} \to \mathcal{Y}$, where: \begin{enumerate} \item $\mathcal{X}$ is a set of authorization tuples (i.e., access log). \item $\mathcal{Y}$ is a set of numbered labels (i.e., cluster labels, each cluster corresponding to a rule of the ABAC policy $\pi$). \end{enumerate} The goal is then to learn the function $h$ with low clustering error and mine the desired policy that is high quality. \section{Problem Definition} \label{problem} \subsection{ABAC Policy Extraction Problem} Although organizations are interested in employing an ABAC model, adopting it is a big challenge for them. The manual development of such a policy is tedious and error-prone. \emph{Policy Mining} techniques have been proposed to address such challenges in order to reduce the cost, time, and error of policy development/maintenance. ABAC policy mining algorithms ease the migration to the ABAC framework by completely (or partially) automating the development of ABAC policy rules. The primary input to a policy mining algorithm is the log of authorization decisions in the system. The log indicates authorization decision (i.e., permit or deny) for any given access request by a user of the system. For ABAC policy mining, such a log is accompanied by attributes of entities involved in the log entries. The goal of a policy mining algorithm is to extract ABAC policy rules from access logs that have high quality with respect to some quality metrics (e.g., policy size and correctness). We define the ABAC policy extraction problem formally as follows: \begin{definition} (\textbf{ABAC Policy Extraction Problem}). Let $I = <E, OP, A, f_{a\_e}, \mathcal{L}>$, where the components are as defined earlier, then the \emph{ABAC policy extraction problem} is to find a set of rules $\mathcal{R}$ such that the ABAC policy $\pi = <E, OP, A, f_{a\_e}, \mathcal{R}>$ has high quality with respect to $\mathcal{L}$. \end{definition} \subsection{Challenges and Requirements} For an ABAC policy extraction approach to be applicable to a wide range of real-world scenarios, we identify the following challenges and requirements: \begin{enumerate} \item \textit{Correctness of Mined Policy}: The mined policy must be consistent with original authorization log in that the access decision of the mined policy must result in the same access decision of the log entry. An inconsistent extracted policy may result in situations in which an originally authorized access is denied (\textit{more restrictive}) or originally unauthorized access is permitted (\textit{less restrictive}) by the system. \item \textit{Complexity of Mined Policy}: The policy mining algorithm should endeavor to extracting a policy that is as concise as possible. Since the policy rules need to be manipulated by human administrators, the more concise they are, the more manageable and easier to interpret they would be. In addition, succinct rules are desirable as they are easier to audit and manage. \item \textit{Negative Attribute Filters}: The ABAC policy mining solution should support both positive and negative attribute filters which will result in more concise and manageable mined policy. \item \textit{Relation Conditions}: The solution should support the extraction of relation conditions for policy mining in order to generate more concise and manageable mined policy. \item \textit{Sparse Logs}: In real-world, the access log that is input to the policy mining algorithm may be sparse, representing only a small fraction of all possible access requests. The policy mining algorithm must be able to extract useful rules even from a sparse log. \item \textit{Mining Negative Authorization Rules}: An ABAC policy can contain both positive and negative rules which permit or deny access requests, respectively. The use of negative rules is helpful in situations where specifying exceptions to more general rules is important. Including negative policy rules would help in generating a more concise ABAC policy. Thus, the policy mining algorithm should be able to extract both positive and negative authorization rules. \item \textit{Noisy Authorization Log}: In the real world and with complex and dynamic information systems, it is possible to have a noisy authorization log consisting of over-assignments and under-assignments. These issues occur either due to a wrong configuration of the original authorization system or improper policy updates by administrators. The policy mining algorithm should be capable of extracting meaningful rules even in presence of an acceptable amount of noise in the input access log. \item \textit{Dynamic and Evolving Policies}: Modern information systems are often dynamic. The authorization needs of these systems and the attributes of the entities in the environment evolve rapidly. These changes will result in over-assignments or under-assignments. The proposed method should employ a mechanism to support the dynamicity of the information systems and their authorization policies and ease the maintenance of evolving systems. \end{enumerate} Our proposed approach addresses all the requirements except the sixth one. Table \ref{tab:existing_techniques} shows the challenges that are addressed by our proposed approach and how it improves upon the state-of-the-art policy mining techniques. In Section \ref{relatedwork}, we discuss the existing solutions in details. \begin{table*} \centering \caption{State-of-the-art ABAC Rule Mining Techniques} \label{tab:existing_techniques} \begin{tabular}{lccccc} \toprule & Xu \textit{et al.} \cite{xu2015mining} & Medvet \textit{et al.} \cite{medvet2015evolutionary} & Iyer \textit{et. al} \cite{iyer2018mining} & Cotrini \textit{et al.} \cite{cotrini2018mining} & Our Proposed Approach \\ \midrule Policy Correctness & \checkmark &\checkmark & \checkmark & \checkmark & \checkmark \\ Policy Complexity & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark \\ Negative Attribute Filters & \ding{55} & \ding{55} & \ding{55} & \ding{55} & \checkmark \\ Relation Conditions & \checkmark & \checkmark & \checkmark & \ding{55} & \checkmark \\ Sparse Logs & \ding{55} & \checkmark & \ding{55} & \checkmark & \checkmark \\ Negative Authorization Rules & \ding{55} & \ding{55} & \checkmark & \ding{55} & \ding{55} \\ Noisy Authorization Log & \checkmark & \ding{55} & \ding{55} & \ding{55} & \checkmark \\ System Dynamicity & \ding{55} & \ding{55} & \ding{55} & \ding{55} & \checkmark \\ \bottomrule \end{tabular} \end{table*} \subsection{Evaluation Metrics} One of the main metrics for evaluating the quality of an extracted policy is how accurately it matches the original policy. That means the authorization decisions made by the extracted policy for a set of access requests should be similar to the decisions made by the original policy for that set of requests. As an example, if the decision of the original policy for an access request $q$ is permit, then the decision of the mined policy for the same access request must be permit as well. If the mined policy denies the same access request, then we record this authorization tuple as a \textit{False Negative}. We define \textit{Relative True Positive}, \textit{Relative False Positive}, \textit{Relative True Negative}, and \textit{Relative False Negative} rates, respectively, as follows: \begin{definition} (\textbf{Relative True Positive Rate}). Given an access log $\mathcal{L}$ and an ABAC policy $\pi$, the relative true positive rate of $\pi$ regarding $\mathcal{L}$ denoted as $TP_{\pi|\mathcal{L}}$ is the portion of positive access logs for which the decision of $\pi$ is \emph{permit}: \begin{equation*} TP_{\pi|\mathcal{L}} = \dfrac{|\{ \langle q,d \rangle \in \mathcal{L}^+ | d_\pi(q) = permit \}|}{|\mathcal{L}^+|} \end{equation*} Here, $|s|$ is the cardinality of set $s$. \end{definition} \begin{definition} (\textbf{Relative False Positive Rate}). The relative false positive rate of $\pi$ regarding $\mathcal{L}$ denoted as $FP_{\pi|\mathcal{L}}$ is the portion of negative access logs for which the decision of $\pi$ is \emph{permit}: \begin{equation*} FP_{\pi|\mathcal{L}} = \dfrac{|\{ \langle q,d \rangle \in \mathcal{L}^- | d_\pi(q) = permit \}|}{|\mathcal{L}^-|} \end{equation*} \end{definition} Similarly, we calculate the relative true negative rate and false negative rate of $\pi$ regarding $\mathcal{L}$, denoted as $TN_{\pi|\mathcal{L}}$ and $FN_{\pi|\mathcal{L}}$, respectively, as follows: \begin{equation*} TN_{\pi|\mathcal{L}} = \dfrac{|\{\langle q,d \rangle \in \mathcal{L}^- | d_\pi(q) = deny \}|}{|\mathcal{L}^-|} \end{equation*} \begin{equation*} FN_{\pi|\mathcal{L}} = \dfrac{|\{ \langle q,d \rangle \in \mathcal{L}^+ | d_\pi(q) = deny \}|}{|\mathcal{L}^+|} \end{equation*} The \textit{relative precision} and \textit{relative recall} are calculated as follows: \begin{equation*} Precision_{\pi|\mathcal{L}} = \dfrac{TP_{\pi|\mathcal{L}}}{TP_{\pi|\mathcal{L}} + FP_{\pi|\mathcal{L}}} \end{equation*} \begin{equation*} Recall_{\pi|\mathcal{L}} = \dfrac{TP_{\pi|\mathcal{L}}}{TP_{\pi|\mathcal{L}} + FN_{\pi|\mathcal{L}}} \end{equation*} The relative accuracy metric, $ACC_{\pi|\mathcal{L}}$, measures the accuracy of mined policy $\pi$ with regards to the decisions made by the original policy indicated by $\mathcal{L}$ and is defined formally as follows: \begin{definition} (\textbf{Relative Accuracy}). Given the relative true positive and negative rates, the relative accuracy of $\pi$ regarding $\mathcal{L}$ denoted as $ACC_{\pi|\mathcal{L}}$ is calculated as follows: \begin{equation*} ACC_{\pi|\mathcal{L}} = \dfrac{TP_{\pi|\mathcal{L}} + TN_{\pi|\mathcal{L}}}{TP_{\pi|\mathcal{L}} + TN_{\pi|\mathcal{L}} + FP_{\pi|\mathcal{L}} + FN_{\pi|\mathcal{L}}} \end{equation*} \end{definition} As accuracy may be misleading in unbalanced data sets \cite{accuracy_paradox} (which is very probable in case of access logs), we use \textbf{relative F-score} to better evaluate the mined policy: \begin{equation*} F{\text -}score_{\pi|\mathcal{L}} = 2 \cdot \dfrac{Precision_{\pi|\mathcal{L}} \cdot Recall_{\pi|\mathcal{L}}}{Precision_{\pi|\mathcal{L}} + Recall_{\pi|\mathcal{L}}} \end{equation*} Policies with higher relative F-score are better as they are more consistent with the original access log. On the other hand, as the number of filters in each rule and the number of rules in an access control policy increases, policy intelligibility would decrease and maintenance of the policy would become harder. Hence, complexity is another key metric for evaluating the quality of a policy. \textbf{Weighted Structural Complexity (WSC)} is a generalization of policy size and was first introduced for RBAC policies \cite{molloy2010mining} and later extended for ABAC policies \cite{xu2015mining}. WSC is consistent with usability studies of access control rules, which indicates that the more concise the policies are the more manageable they become \cite{beckerle2013formal}. Informally, for a given ABAC policy, its WSC is a weighted sum of its elements. Formally, for an ABAC policy $\pi$ with rules $\mathcal{P}$, its WSC is defined as follows: \begin{equation*} \begin{gathered} WSC(\pi) = WSC(\mathcal{P}) \end{gathered} \end{equation*} \begin{equation*} \begin{gathered} WSC(\mathcal{P}) = \sum\limits_{\rho \in \mathcal{P}} WSC(\rho) \end{gathered} \end{equation*} \begin{equation*} \begin{gathered} WSC(\rho = \langle \mathcal{F_U}, \mathcal{F_O}, \mathcal{F_S}, \mathcal{R}, op, d \rangle ) = w_1 WSC(\mathcal{F_U}) + \\ w_2 WSC(\mathcal{F_O}) + w_3 WSC(\mathcal{F_S}) + w_4 WSC(\mathcal{R}) \end{gathered} \end{equation*} \begin{equation*} \begin{gathered} \forall s \in \{\mathcal{F_U}, \mathcal{F_O}, \mathcal{F_S}, \mathcal{R}\} : WSC(s) = \sum\limits |s| \end{gathered} \end{equation*} where $|s|$ is the cardinality of set $s$ and each $w_i$ is a user-specified weight. Van Rijsbergen proposes an effectiveness measure for combining two different metrics $P$ and $R$ in \cite{rijsbergen_1979} as follows : \begin{equation*} \begin{gathered} E = 1 - \dfrac{1}{\dfrac{\alpha}{P} + \dfrac{1 - \alpha}{R}} \end{gathered} \end{equation*} Given relative F-score and WSC measures for various mined policies resulting from running different mining algorithms over access log, it may not be straightforward to select the best algorithm and, hence, the mined policy with the highest quality. So, to be able to compare the quality of different mined ABAC policies, we combine the two metrics based on Van Rijsbergen's effectiveness measure \cite{rijsbergen_1979} and define the \textbf{Policy Quality Metric} as follows: \begin{equation*} \begin{gathered} \mathcal{Q}_{\pi} = ( \dfrac{\alpha}{F{\text -}score_{\pi|\mathcal{L}}} + \dfrac{1 - \alpha}{\Delta WSC_{\pi}})^{-1} \end{gathered} \end{equation*} Here $\alpha = \dfrac{1}{1 + \beta^2}$ where $\beta$ determines the importance of relative F-score over policy complexity and $\Delta WSC_{\pi}$ shows the relative reduction in the complexity with regards to the complexity of the most complex mined policy. $\Delta WSC_{\pi}$ is calculated as follows: \begin{equation*} \begin{gathered} \Delta WSC_{\pi} = \dfrac{WSC_{max} - WSC(\pi) + 1}{WSC_{max}} \end{gathered} \end{equation*} $WSC_{max}$ is the weighted structural complexity of the most complex mined policy. \begin{definition} (\textbf{Most Complex Mined Policy}). The most complex mined policy is the mined policy with the highest weighted structural complexity. It is extracted by iterating through positive access log $\mathcal{L^+}$ and adding an access control rule for each authorization tuple if it's not already included in the mined policy. The corresponding rule for each authorization tuple includes all attributes of user, object, and subject of that authorization tuple. \end{definition} Considering the equal importance of relative F-score and relative loss of complexity of the policy, we calculate the quality measure as follows: \begin{equation*} \begin{gathered} \mathcal{Q}_{\pi} = \dfrac{2 \cdot F{\text -}score_{\pi|\mathcal{L}} \cdot \Delta WSC_{\pi}}{F{\text -}score_{\pi|\mathcal{L}} + \Delta WSC_{\pi}} \end{gathered} \end{equation*} A mined policy with a higher F-score would have a higher policy quality. On the other hand, as the complexity of a policy increases, its quality will decrease. The intuition here is that once an extracted policy reaches a high F-score, adding additional rules will lead to a decrease in $\mathcal{Q}_{\pi}$. For the most complex mined policy $\pi_w$, $\Delta WSC_{\pi_w} \approx 0$, so its policy quality $\mathcal{Q}_{\pi_w}$ is very close to zero. For an empty mined policy $\pi_e$ (a policy without any rule), while $\Delta WSC_{\pi_e} \approx 1$, as it denies all the access requests, its false negative rate is one and its true positive rate is zero. So its precision is zero and as a result, its F-score is zero as well. So the quality of the empty policy $\mathcal{Q}_{\pi_e}$ is zero, too. The most complex mined policy and the empty mined policy are the two extreme cases with policy quality equal to zero. Other mined policies between these two cases have higher policy quality than zero. \section{The Proposed Learning-based Approach} \label{proposed} Our proposed learning-based ABAC policy extraction pro-cedure consists of the steps summarized in Figure \ref{fig:overview}. \begin{figure}[htbp] \centering\@ifnextchar[{\ts@includegraphics}{\ts@includegraphics[width=\checkGraphicsWidth,height=\checkGraphicsHeight,keepaspectratio]}}[scale=0.5]{images/Overview.pdf} \caption{Overview of the Proposed Approach.} \label{fig:overview} \end{figure} \subsection{Data Pre-processing} As features of our learning algorithm are categorical variables, the first step in pre-processing the access log is to convert all numerical variables to their corresponding categorical values. For example, in ABAC, environmental attributes deal with time, location or dynamic aspects of the access control scenario. Hence, we need to pre-process and discretize such continuous variables to categorical ones (e.g. time of access to working hours and non working hours) so our proposed algorithm is applicable to them. We also need to handle \emph{missing values} in this step. As the frequency of each attribute value is an important factor in our rule extraction algorithm (Section \ref{rule_extraction}) for deciding if an attribute is effective or not, it is important to replace missing values in a way that it doesn't mess up with the original frequency of each attribute value. For this purpose, we replace each missing value by \emph{UNK} (i.e., unknown). \subsection{Selection of Learning Algorithm } We use the \textit{K-modes algorithm} \cite{cao2009new}, which is a well known unsupervised learning algorithm used for clustering categorical data. \textit{K-modes} has been proved effective in mining ABAC policies \cite{karimi2018unsupervised}; this algorithm uses an initialization method based on both the distance between data points and the density of data points. Using both density and distance when initializing clusters help avoid two problems: (i) clustering outliers as new clusters are based only on the distances; and (ii) creating new clusters surrounding one center based only on the density. Compared to a random initialization method, this method provides more robustness and better accuracy in the clustering process\cite{cao2009new}. \subsection{Parameter Tuning} In the next step, we \emph{tune the learning parameters}. There are several challenges that need to be addressed in this step, which include the following: \subsubsection{Number of Clusters (k)} One of the main challenges in an unsupervised learning is determining the number of clusters, $k$. In our sample policies, as we know the number of rules in each policy, we can set the number of clusters beforehand but in a real situation as we do not know the size of the rules in advance, making the correct choice of $k$ is difficult. One of the popular methods for determining the number of clusters in an unsupervised learning model is the \emph{Elbow Method} \cite{thorndike1953belongs,goutte1999clustering}. This method is based on total within group sum of squares. $k$ will be chosen as the number of clusters if adding another cluster doesn't give much better modeling of the data (i.e., the elbow point of the graph). As a second approach, we choose a number of clusters ($k$) which gives the best modeling of the data in terms of the policy \textit{quality} metric. For this purpose, we run our clustering algorithm for different values of $k$ and calculate the accuracy of the corresponding model using 10-fold cross-validation. The value of $k$ that maximizes the accuracy of the model is selected as the final number of clusters. Note that increasing $k$ will ultimately reduce the amount of clustering error or it will increase the accuracy of the model, but by increasing the number of clusters, the number of extracted rules will also increase resulting in more complexity (i.e., higher $\mathit{WSC}$). So it is important to find an optimal $k$ that balances between policy accuracy and WSC. \subsubsection{Cluster Initialization \& Local Optima} Different cluster initializations can lead to a different set of clusters as \emph{k}-means/\emph{k}-modes may converge to a local optima. To overcome this issue, for a given number of clusters, $k$, we train multiple models with different cluster initializations and then select the partition with the smallest clustering error. \subsection{Policy Rules Extraction} \label{rule_extraction} The main phase in our proposed approach is the extraction of ABAC policy rules. In the first step, we need to collect all the authorization tuples related to each rule of the policy. We use data clustering for this purpose. We divide the access log into clusters where the records in each cluster correspond to one AC rule in the system. This is done based on finding similar patterns between features (i.e., attribute values) of the records (i.e., access control tuples). In the second step, we extract the \emph{attribute filters} of such a rule. We adapt the rule extraction algorithm in \cite{karimi2018unsupervised} and extend it to extract both positive and negative attribute filters. We define \emph{effective positive attribute} and \emph{effective negative attribute} as follows: \begin{definition} (\textbf{Effective Positive (Negative) Attribute}). Let $S =\{ \langle a, v \rangle \}$ be the set of all possible attribute-value pairs in a system; we define $\langle a_j, v_j \rangle \: \in S$ ($ \langle a_j, !v_j \rangle \in S$) as an \emph{effective positive (negative) attribute} pair of $\rho_i$ corresponding to cluster $C_i$, where the frequency of occurrence of $v_j$ in the set of all the records of cluster $C_i$ is much higher (lower) than its frequency of occurrence in the original data; this is determined based on a threshold $\mathcal{T}_P$ ($\mathcal{T}_N$). The attribute expression $ \langle a_j, v_j \rangle $ ($ \langle a_j, !v_j \rangle $) is added to the attribute filters of the extracted rule $\rho_i$ for $C_i$ . \end{definition} In the final step, we extract the \emph{relation conditions} for AC rules for each cluster. This will be done based on the frequency of equality between pairs of attributes in the records of each cluster. We define \emph{effective positive relation} and \emph{effective negative relation} as follows: \begin{definition} (\textbf{Effective Positive (Negative) Relation}). Let $R = \{ \langle a, b \rangle \}$ be the set of all possible relations between pairs of attributes in the system; we define $ \langle a_j, b_j \rangle $ as an \emph{effective positive (negative) relation} pairs of $\rho_i$ corresponding to cluster $C_i$, where the frequency of $a_j$ equals $b_j$ in all the records of cluster $C_i$ is much higher (lower) than their frequency in the original data; this is determined based on a threshold $\theta_P$ ($\theta_N$). The relation $ \langle a_j, b_j \rangle $ ($ \langle a_j, !b_j \rangle $) is added to the relation conditions of the extracted rule $\rho_i$ for this cluster. \end{definition} We note that the values of the thresholds $\mathcal{T}_P$, $\mathcal{T}_N$, $\theta_P$, and $\theta_N$ will be different for each data set. To find the best threshold values for each data set, we run the rule extraction algorithm for different values of thresholds, and the values which result in the maximum accuracy over the cross-validation data set will be selected. Algorithms \ref{attrExtraction} and \ref{relExtraction} show effective attribute and effective relation extraction procedures, respectively. \begin{algorithm} \caption{Effective attribute extraction algorithm}\label{attrExtraction} \begin{algorithmic}[1] \Procedure{extractAttributeFilters}{} \Require {$C_i$, $A$, $V$, $\mathcal{L}$, $\mathcal{T}_P$, $\mathcal{T}_N$} \Ensure {$\mathcal{F}$} \State $\mathcal{F} \gets \oldemptyset$ \ForAll{$a \in A$} \ForAll{$v_j \in V_{a}$} \If {$Freq(v_j, C_i) - Freq(v_j, \mathcal{L}) > \mathcal{T}_P$} \State $\mathcal{F}i \gets \mathcal{F} \: \cup \langle a, v_j \rangle $ \EndIf \If {$Freq(v_j, \mathcal{L}) - Freq(v_j, C_i) > \mathcal{T}_N$} \State $\mathcal{F}i \gets \mathcal{F} \: \cup \langle a, !v_j \rangle $ \EndIf \EndFor \EndFor \Return $\rho_i$ \EndProcedure \end{algorithmic} \end{algorithm} \begin{algorithm} \caption{Effective relation extraction algorithm}\label{relExtraction} \begin{algorithmic}[1] \Procedure{extractRelations}{} \Require {$C_i$, $A$, $\mathcal{L}$, $\theta_P$, $\theta_N$} \Ensure {$\mathcal{R}$} \State $\mathcal{R} \gets \oldemptyset$ \ForAll{$a \in A$} \ForAll{$b \in A$ and $b \neq a$} \If {$Freq(a= b, C_i)$ - $Freq(a=b, \mathcal{L})$>$\theta_P$} \State $\mathcal{R} \gets \mathcal{R} \: \cup \langle a, b \rangle $ \EndIf \If {$Freq(a= b, \mathcal{L})$ - $Freq(a=b, C_i)$>$\theta_N$} \State $\mathcal{R} \gets \mathcal{R} \: \cup \langle a, !b \rangle $ \EndIf \EndFor \EndFor \Return $\mathcal{R}$ \EndProcedure \end{algorithmic} \end{algorithm} \subsection{Policy Enhancement} After the first phase of policy rule extraction, we get a policy which may not be as accurate and concise as we desire. We enhance the quality of the mined policy through iterations of policy improvement steps that include: \emph{rule pruning} and \emph{policy refinement}. \subsubsection{Rule Pruning} \label{pruning} During the rule extraction phase, it's possible to have two clusters that correspond to the same rule. As a result, the extracted rules of these clusters are very similar to each other. Having two similar rules in the final policy increases the complexity of the mined policy while it may not help the accuracy of the policy and as a result, it hurts the policy quality. To address such an issue, in the rule pruning step, we identify similar rules and eliminate the ones whose removal improves the policy quality more. If eliminating neither of the two rules improves the policy quality, we keep both the rules. This may happen when we have two very similar AC rules in the original policy. We measure the similarity between two rules using Jaccard similarity \cite{jaccard1912distribution} as follows: \begin{equation*} \begin{gathered} J(S_1, S_2) = |S_1 \cap S_2| / |S_1 \cup S_2| \end{gathered} \end{equation*} Based on this, we calculate the similarity between two rules $\rho_1$ and $\rho_2$ as follows: \begin{equation*} \begin{gathered} J(\rho_1, \rho_2) = \\ \frac{\big[ \sum\limits_{\mathcal{F} \in \{\mathcal{F_U}, \mathcal{F_O}, \mathcal{F_S}\}} |\mathcal{F}_{\rho_1} \cap \mathcal{F}_{\rho_2}| + |\mathcal{R}_{\rho_1} \cap \mathcal{R}_{\rho_2}| + |op_{\rho_1} \cap op_{\rho_2}| \big]} {\big[ \sum\limits_{\mathcal{F} \in \{\mathcal{F_U}, \mathcal{F_O}, \mathcal{F_S}\}} |\mathcal{F}_{\rho_1} \cup \mathcal{F}_{\rho_2}| + |\mathcal{R}_{\rho_1} \cup \mathcal{R}_{\rho_2}| + |op_{\rho_1} \cup op_{\rho_2}| \big] } \end{gathered} \end{equation*} We consider two rules to be similar if their Jaccard similarity score is more than 0.5, which means that the size of their common elements is more than half of the size of the union of their elements. Algorithm \ref{rulePruning} shows the rule pruning procedure. \begin{algorithm} \caption{Rule Pruning algorithm}\label{rulePruning} \begin{algorithmic}[1] \Procedure{rulePruning}{} \Require {$\pi$} \Ensure {$\pi$} \State $\mathcal{P} \gets \pi.\mathcal{P}$ \State $q \gets \Call{calcQuality}{\mathcal{P}}$ \ForAll{$\rho_i \in \mathcal{P}$} \ForAll{$\rho_j \in \mathcal{P}$ and $\rho_i \neq \rho_j$} \If {$\Call{Similarity}{\rho_i, \rho_j} > 0.5$} \State $\mathcal{P}_i \gets \mathcal{P}/\rho_i$ \State $\mathcal{P}_j \gets \mathcal{P}/\rho_j$ \State $q_i \gets \Call{calcQuality}{\mathcal{P}_i}$ \State $q_j \gets \Call{calcQuality}{\mathcal{P}_j}$ \If {$q_i >= q$ and $q_i >= q_j$} \State $\mathcal{P} \gets \mathcal{P}_i$ \EndIf \If {$q_j >= q$ and $q_j >= q_i$} \State $\mathcal{P} \gets \mathcal{P}_j$ \EndIf \EndIf \EndFor \EndFor \Return $\mathcal{P}$ \EndProcedure \end{algorithmic} \end{algorithm} \subsubsection{Policy Refinement} During the rule extraction phase, it is possible to extract rules that are either too restricted or too relaxed compared to the original policy rules. A rule is restricted if it employs more filters than the original rule. \begin{example} \label{exam6} Consider the following two rules: \begin{equation*} \begin{split} {\rho}_1 = \langle \{(position, faculty)\},& \{(type, gradebook)\}, \\ \{setScore\}, permit \rangle \\ {\rho}_2 = \langle \{(position, faculty),& (uDept, EE)\}, \\ \{(type, gradebook)\}, &\{setScore\}, permit \rangle \end{split} \end{equation*} \end{example} Here ${\rho}_2$ is more restricted than ${\rho}_1$ as it imposes more conditions on the user attributes. Having such a restricted rule in the mined policy would result in a larger number of \emph{FNs} as an access request that would be permitted by the original rule will be denied by the restricted rule. On the other hand, an extracted rule is more relaxed compared to the original rule if it misses some of the filters. In Example \ref{exam6}, ${\rho}_1$ is more relaxed than ${\rho}_2$. Such a relaxed rule would result in more \emph{FPs} as it permits access requests that should be denied as per the original policies. To address these issues, we propose a \emph{policy refinement} procedure which is shown in Algorithm \ref{policyrefinement}. Here, we try to refine the mined policy ($\pi_m$) based on the patterns discovered in the FN or FP records. These patterns are used to eliminate extra filters from restricted rules or append missing filters to relax the rules. To extract patterns from the FN or FP records, we apply our rule extraction procedure on these records to get the corresponding policies $\pi_{FN}$ and $\pi_{FP}$. Here our training data are FN and FP records, respectively. We compare the extracted FN or FP rules with the mined policy and remove the extra filters or append the missed ones to the corresponding rules. As an example, consider the FP records. Here, our goal is to extract the patterns that are common between access requests that were permitted based on the mined policy while they should have been denied based on the original policy. In each step of refinement, a rule from $\pi_m$ that is similar to a rule from $\pi_{FN}$ or $\pi_{FP}$ based on the Jaccard similarity (Section \ref{pruning}) is selected and then refined in two ways as discussed below. \vspace{2mm} \noindent\textit{\textbf{Policy refinement based on $\pi_{FN}$}}: In the case of FN records, two situations are possible: a rule is missing from the mined policy ($\pi_m$) or one of the rules in $\pi_m$ is more restrictive. To resolve this issue, for each rule $\rho_i \in \pi_{FN}$: \begin{itemize} \item if there is a similar rule $\rho_j \in \pi_m$ then we refine $\rho_j$ as follows: \begin{equation*} \begin{gathered} \forall f \in \mathcal{F} : {\mathcal{F}_{\rho}}_j = {\mathcal{F}_{\rho}}_j/({\mathcal{F}_{\rho}}_j/{\mathcal{F}_{\rho}}_i) \\ \end{gathered} \end{equation*} where $\mathcal{F} = \mathcal{F_U} \cup \mathcal{F_O} \cup \mathcal{F_S} \cup \mathcal{R}$. So, the extra filters are removed from the restricted rule ($\rho_j$). \item if there is no such rule, then $\rho_i$ is the missing rule and we add it to $\pi_m$. \end{itemize} \noindent\textit{\textbf{Policy refinement based on $\pi_{FP}$}}: In the case of FP records, some filters might be missing in an extracted rule in the mined policy ($\pi_m$); so for each rule $\rho_i \in \pi_{FP}$, we refine the mined policy as follows: \begin{equation*} \begin{gathered} \forall f \in \mathcal{F} : {\mathcal{F}_{\rho}}_j = {\mathcal{F}_{\rho}}_j \cup ({\mathcal{F}_{\rho}}_i/{\mathcal{F}_{\rho}}_j) \\ \end{gathered} \end{equation*} where $\mathcal{F} = \mathcal{F_U} \cup \mathcal{F_O} \cup \mathcal{F_S} \cup \mathcal{R}$ includes all the filters in the rule. So, the missing filters are added to the relaxed rule ($\rho_j$). These refinements can be done in multiple iterations until further refinement does not give a better model in terms of policy quality $\mathcal{Q}_{\pi}$. \begin{algorithm} \caption{Policy refinement algorithm}\label{policyrefinement} \begin{algorithmic}[1] \Procedure{refinePolicy}{} \Require {$A$, $\mathcal{L}$} \Ensure {$\pi_m$} \State $\mathcal{FN} \gets \Call{getFNs}{\pi_m, \mathcal{L}}$ \State $\pi_{FN} \gets \Call{extractPolicy}{\mathcal{FN}}$ \ForAll{$\rho_i \in \pi_{FN}.\mathcal{P}$} \State $R_s \gets \Call{getSimilarRules}{\pi_{FN}.\mathcal{P}, \pi_m.\mathcal{P}}$ \If {$|R_s| = 0$} \State $\pi_m.\mathcal{P} \gets \pi_m.\mathcal{P} \cup \rho_i$ \Else \ForAll{$\rho_j \in R_s$} \ForAll{$\mathcal{F} \in \mathcal{F_U} \cup \mathcal{F_O} \cup \mathcal{F_S} \cup \mathcal{R}$} \State $\mathcal{F}_{\rho_j} \gets \mathcal{F}_{\rho_j} \backslash (\mathcal{F}_{\rho_j}\backslash \mathcal{F}_{\rho_i})$ \EndFor \EndFor \EndIf \EndFor \State $\mathcal{FP} \gets \Call{getFPs}{\pi_m, \mathcal{L}}$ \State $\pi_{FP} \gets \Call{extractPolicy}{\mathcal{FP}}$ \ForAll{$\rho_i \in \pi_{FP}.\mathcal{P}$} \State $R_s \gets \Call{getSimilarRules}{\pi_{FP}.\mathcal{P}, \pi_m.\mathcal{P}}$ \If {$|R_s| \: != 0$} \ForAll{$\rho_j \in R_s$} \ForAll{$\mathcal{F} \in \mathcal{F_U} \cup \mathcal{F_O} \cup \mathcal{F_S} \cup \mathcal{R}$} \State $\mathcal{F}_{\rho_j} \gets \mathcal{F}_{\rho_j} \cup (\mathcal{F}_{\rho_i}\backslash \mathcal{F}_{\rho_j})$ \EndFor \EndFor \EndIf \EndFor \Return $\pi_m$ \EndProcedure \end{algorithmic} \end{algorithm} \section{Experimental Evaluation} \label{evaluation} We have implemented a prototype of our proposed approach presented in Section \ref{proposed}. Here, we present our experimental evaluation. \subsection{Datasets} We perform our experiments on multiple datasets including synthesized and real ones. The synthesized access logs are generated from two sets of ABAC policies. The first one is a manually written set of policies that is adapted from \cite{xu2015mining} to be compatible with our policy language. The second one includes a completely randomly generated set of policies. To synthesize our input data, for each ABAC policy (i.e., \emph{University Policy}, \emph{Healthcare Policy}, etc.), a set of authorization tuples is generated and the outcome of the ABAC policy for each access right is evaluated. The authorization tuples with \emph{permit} as their outcomes are the inputs to our unsupervised learning model. \begin{table*} \centering \caption{Details of the Synthesized and Real Policies} \label{tab:policies_details} \begin{tabular}{clcccccc} \toprule $\#$ & $\pi$ & $|\mathcal{P}|$ & $|A|$ & $|V|$ & $|\mathcal{L}|$ & $|\mathcal{L}^+|$ & $|\mathcal{L}^-|$ \\ \midrule $\pi_1$ & UniversityP & 10 & 11 & 45 & 2,700K & 231K & 2,468K \\ $\pi_2$ & HealthcareP & 9 & 13 & 40 & 982K & 229K & 753K \\ $\pi_3$ & ProjectManagementP & 11 & 14 & 44 & 5,900K & 505K &5,373K\\ $\pi_4$ & UniversityPN & 10 & 11 & 45 & 2,700K & 735K & 1,964K \\ $\pi_5$ & HealthcarePN & 9 & 13 & 40 & 982K & 269K & 713K \\ $\pi_6$ & ProjectManagementPN & 11 & 14 &44 & 5,900K & 960K & 4,918K\\ $\pi_7$ & Random Policy 1 & 10 & 8 & 27 & 17K &2,742 & 14K \\ $\pi_8$ & Random Policy 2 & 10 & 10 & 48 & 5,250K & 245K & 5,004K \\ $\pi_9$ & Random Policy 3 & 10 & 12 & 38 & 560K &100K &459K\\ $\pi_{10}$ & Amazon Kaggle & - & 10 & 15K & 32K & 30K & 1897 \\ $\pi_{11}$ & Amazon UCI & - & 14 & 7,153 & 70K & 36K & 34K\\ \bottomrule \end{tabular} \end{table*} Our real datasets are built from access logs provided by Amazon in Kaggle competition \cite{kaggle} and available in the UCI machine learning repository \cite{uci_amazon_access}. \textbf{Manual Policy - University:} This policy is adapted from \cite{xu2015mining} and it controls access of different users including students, instructors, teaching assistants, etc., to various objects (applications, gradebooks, etc.). \textbf{Manual Policy - Healthcare:} This policy is adapted from \cite{xu2015mining} and is used to control access by different users (e.g. nurses, doctors, etc.) to electronic health records (EHRs) and EHR items. \textbf{Manual Policy - Project Management:} This policy is adapted from \cite{xu2015mining} and it controls access by different users (e.g. department managers, project leaders, employees, etc.) to various objects (e.g. budgets,schedules and tasks). \textbf{Random Policies:} The authorization rules for this policy is generated completely randomly from random sets of attributes and attribute values. These randomly generated policies provide an opportunity to evaluate our proposed algorithm on access logs with various sizes and with varying structural characteristics. However, we note that, the performance of our algorithm on random policies might not be representative of its performance in real scenarios and over real policies. \textbf{Real Dataset - Amazon Kaggle:} The Kaggle competition dataset \cite{kaggle} includes access requests made by Amazon’s employees over two years. Each record in this dataset describes an employee’s request to a resource and whether the request was authorized or not. A record consists of the employee’s attribute values and the resource identifier. The dataset includes more than 12,000 users and 7,000 resources. \textbf{Real Dataset - Amazon UCI:} This dataset is provided by Amazon in the UCI machine learning repository \cite{uci_amazon_access}. It includes more than 36,000 users and 27,000 permissions. Since the dataset contains over 33,000 attributes, our focus in this experiment is narrowed only to the most requested 8 permissions in the dataset. \textbf{Partial Datasets:} To check the efficiency of the proposed algorithm over sparse datasets, we generate sparse datasets (partial datasets) by randomly selecting authorization tuples from the complete dataset. For example, a 10\% sparse (partial) dataset is generated by randomly selecting 10\% of tuples from the complete access logs. \textbf{Noisy Datasets:} To check the efficiency of the proposed algorithm over noisy datasets, we generate noisy datasets by randomly reversing the decision of authorization tuples. For instance, a 10\% noisy dataset is generated by randomly reversing the decision of 10\% of authorization tuples in the complete access logs. For each of the manual policies, we consider two different sets of policy rules; the first one only contains positive attribute filters and relations while the second one includes both positive and negative attribute filters and relations. We have included these policies in Appendix A. Table \ref{tab:policies_details} shows the details of the manual and random access log datasets. In this table, $|\mathcal{P}|$ shows the number of rules in the original policy, $|A|$ and $|V|$ show the number of attributes and attribute values and $|\mathcal{L}|$, $|\mathcal{L}^+|$, $|\mathcal{L}^-|$ show the number of access control tuples, the number of positive access logs, and the number of negative access logs in the given dataset, respectively. \subsection{Experimental Setup} To evaluate our proposed method, we use a computer with 2.6 GHz Intel Core i7 and 16 GB of RAM. We use Python 3 in the mining and the evaluation process. The algorithms were highly time-efficient (e.g., maximum time consumption is less than half an hour). We use kmodes library \cite{kmodes_implementation} for clustering our data. The initialization based on density (CAO) \cite{cao2009new} is chosen for cluster initialization in kmodes algorithm. To find optimal $k$, we apply the Silhouette method to test different values of $k$. We examine each value of $k$ in pre-defined set [10, 20]. Then the $k$ value that results in the highest Silhouette score is used in the final model. To generate the synthesized access log $\mathcal{L}$, we brute force through all attributes $A$ and their values $V_{a}$ to produce all possible combinations for the tuples. This method was used to generate a complete access log for the random and manual policy datasets. We generate two sets of partial datasets; the 10\% partial datasets are used to check the efficiency of the proposed approach over sparse datasets (Table \ref{tab:our_mined_policies}) and the 0.1\% partial datasets are used to compare the proposed approach with previous work (Table \ref{tab:policies_comparison}). We also generate a set of noisy datasets to check the efficiency of the proposed algorithm over noisy access log. The results of such experiments are reported in Table \ref{tab:our_mined_policies}. For all experiments, the optimal thresholds for selecting effective attributes and relations are between 0.2 and 0.3. \subsection{Results}\label{result} We first evaluate the performance of our policy mining algorithm on complete datasets. Table \ref{tab:our_mined_policies} shows the results of these experiments. Our second set of experiments is on partial datasets. The algorithm proposed by Xu and Stoller \cite{xu2014mining} and the approach presented by Cotrini \textit{et al.} \cite{cotrini2018mining} are not able to handle complete datasets as these datasets are huge. To be able to compare the performance of our proposed algorithm with their work, we generated 0.1\% sparse (partial) datasets and run all algorithms over these partial datasets. The results of these experiments are shown in Table \ref{tab:policies_comparison} and Figures \ref{fig:fscore_comparison}, \ref{fig:wsc_comparison}, and \ref{fig:quality_comparison}. The algorithm proposed by Xu and Stoller \cite{xu2014mining} and the approach presented by Cotrini \textit{et al.} \cite{cotrini2018mining} do not generate policy rules with negative attribute filters and relations, however we report the results of their algorithms over datasets related to policy rules including negations (policies $\pi_4$, $\pi_5$, $\pi_6$) to show how the quality of mined policies would be impacted if the mining algorithm does not extract rules that include negation. \subsubsection{The F-Score of the Mined Policies} Table \ref{tab:our_mined_policies} shows the final $F{\text -}score_{\pi|\mathcal{L}}$ of our proposed approach after several rounds of refinement over all complete datasets. As we can see in Table \ref{tab:our_mined_policies}, the proposed approach achieves high F-score across all experiments except for $\pi_6$. $\pi_6$ is a very complex dataset with both positive and negative attributes and relation filters including 14 attributes, 44 attribute values, and around six million access records. The final policy quality for this dataset is around 0.63, which is acceptable considering the complexity of the policy. Table \ref{tab:policies_comparison} and Figure \ref{fig:fscore_comparison} show the comparison of the F-Scores of policies mined by our proposed approach with that of previous work over partial datasets (with 0.1\% of the complete datasets). The F-Score of policies mined by our algorithm is very close to the one done by the approach proposed by Cotrini \textit{et al.} \cite{cotrini2018mining}. As we can see, our proposed approach outperforms theirs in half of the experiments. \subsubsection{The Complexity of the Mined Policies} In Table \ref{tab:our_mined_policies}, we can see the final $WSC$ of the policies mined by our proposed approach. All extracted policies have the complexity lower than 100 which is much lower than those of the most complex policies for individual datasets. According to \textit{Definition 17}, the most complex policy for each dataset has the same complexity as the original positive access log ($\mathcal{L^+}$). Given numbers in Tables \ref{tab:policies_details} and \ref{tab:our_mined_policies}, the most complex policies for these scenarios are thousands of times more complex than the extracted policies by our approach. We compare the complexity of the policies mined by different ABAC mining algorithms in Figure \ref{fig:wsc_comparison}. Among three different approaches, the Cotrini \textit{et al.} algorithms extracts the most complex policies with WSC greater than 1000 for some cases. The complexity of the policies mined by our algorithm is very close to the one extracted by the approach proposed by Xu and Stroller \cite{xu2014mining}. \subsubsection{The Policy Quality of the Mined Policies} Finally, Table \ref{tab:our_mined_policies} shows the quality of the extracted policies through our proposed approach. We can see that out of all datasets that our proposed algorithm was applied on, around 75\% of the cases reached the policy quality of more than 0.8, which is significant, considering the huge size of original access logs (each more than 30K records). According to Figure \ref{fig:quality_comparison}, in most cases the policy quality of the policies mined by our proposed approach is higher than those of the policies extracted by other ABAC mining algorithms. \begin{table*} \centering \caption{Results of Our Proposed Approach on Various Synthesized and Real Policy Datasets} \label{tab:our_mined_policies} \begin{tabular}{ccccccccc} \toprule $\pi$ & Total Running Time (s) & Optimal $k$ & $\mathcal{P}_{mined}$ & $ACC_{\pi|\mathcal{L}}$ & $F{\text -}score_{\pi|\mathcal{L}}$ & $WSC_{orig}$ & $WSC_{mined}$ & $\mathcal{Q}_{\pi}$ \\ \midrule $\pi_1$ & 9376.556 & 15 & 20 & 97.5\% & 83.6\% & 33 & 91 & 0.91 \\ Partial $\pi_1$ (10\%) & 1994.769 & 15 & 13 & 97.29\% & 82.21\% & 33 & 54 & 0.90\\ Noisy $\pi_1$ (10\%) & 4979.56 & 10 & 8 & 96.94\% & 80\% & 33 & 28 & 0.90 \\ $\pi_2$ & 2180.745 & 18 & 18& 85.49\% & 75.93\% & 33 & 71 & 0.86 \\ Partial $\pi_2$ (10\%) & 4787.98 & 10 & 8 & 96.94\% &85.33\% & 33 & 28 & 0.92 \\ Noisy $\pi_2$ (10\%) & 7339.91 &8 & 15 & 72.22\% & 82.13\% & 33 & 27 & 0.90 \\ $\pi_3$ & 7795.44 &15 & 17&95.6\% &65.63\% & 44 & 55& 0.80\\ Partial $\pi_3$ (10\%) & 1347.29 & 6 & 10 & 95.2\% &62.24\% & 44 & 56 & 0.77 \\ Noisy $\pi_3$ (10\%) & 1912.72 & 15 & 15 & 94.47\% &62.66\% & 44 & 81 & 0.77\\ $\pi_4$ & 13662.62 & 7 &16 &86.7\% & 71.58\% & 33 & 40 &0.83\\ $\pi_5$ & 8681.64 & 15&15 & 78.11\% &62\% & 33 & 67&0.76\\ $\pi_6$ & 12905.78 & 20&17 &88.05\% &46.28\% & 44 & 80 & 0.63 \\ $\pi_7$ & 24.63 & 8 & 20 & 93\% &78.33\% &33 & 65 & 0.88\\ $\pi_8$ & 13081.20 & 10 & 14 & 99.12\% & 91.28\% & 33& 51 & 0.95\\ $\pi_9$ & 2266.68 & 8 & 16 & 92.17\% & 79.66\% &33 & 46 & 0.89\\ $\pi_{10}$ & 265.3 & 15 & 20 & 94\% & 97\% & - & 44 & 0.98\\ $\pi_{11}$ & 1010.43 & 24 & 25 & 98.49\% &99\% & - & 92 & 0.82\\ \bottomrule \end{tabular} \end{table*} \begin{table*} \centering \begin{threeparttable} \caption{Comparison of Our Proposed Approach with Previous Work on Various Synthesized and Real Policy Datasets} \label{tab:policies_comparison} \begin{tabular}{lccccccc} \toprule Mining Alg. & $\pi$ & Time (s) & $ACC_{\pi|\mathcal{L}}$ & $F{\text -}score_{\pi|\mathcal{L}}$ & $\mathcal{P}_{\pi_{mined}}$ & $WSC(\pi)$ & $\mathcal{Q}_{\pi}$ \\ \midrule Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_1$ (0.1\%)} & 227 & 94.74\% & 65.87\% & 10 & 34 & 0.79\\ Cotrini \textit{et al.} \cite{cotrini2018mining} & &126 & 80.74\% & 45.3\% & 132 & 508 & 0.58 \\ Proposed Approch & & 7.3 & 96\% & 74.2\% & 7 &29 & 0.85\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_2$ (0.1\%)} & 32645 & 64.43 & 63.61 & 3 & 6 & 0.78 \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & &529 & 72.72\% & 64\% & 65 & 272 & 0.75\\ Proposed Approch & & 7.9 & 79.78\% & 68.23\% & 13 & 49 & 0.81 \\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_3$ (0.1\%)} & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 3587 & 91.57\%& 54.124\%&24 &77 & 0.70\\ Proposed Approch & & 11.44 & 94.96\%&51.31\% &12 &55 &0.78\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_4$ (0.1\%)} & 4230 & 73.37\% & 16.1\% & 10 & 34 & 0.28 \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 204 & 93.55\%& 88.5\% &385 & 1389 & 0.86\\ Proposed Approch & & 15 & 89.3\% & 80\% & 10 & 40 & 0.89\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_5$ (0.1\%)} & 45348 & 79.25 & 73.09 & 3 & 6 & 0.84 \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 3587 & 86.46\%& 79.2\% & 123 &462 & 0.83 \\ Proposed Approch && 8.8 & 87.2\% & 76.3\% & 15 & 66& 0.86\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{Partial $\pi_6$ (0.1\%)} & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 2848 & 82.75\% & 62.66\% &31 &100 & 0.77\\ Proposed Approch & &22.67 &81.2\% &49.4\% &12 &44 & 0.66\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{$\pi_{10}$} & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 237 &84.25\% & 91.39\%&1055 &2431 &0.92 \\ Proposed Approch & & 265.3 & 94\% & 97\% & 20 & 44 &0.98\\ \hline Xu and Stoller \cite{xu2014mining} & \multirow{2}{*}{$\pi_{11}$} & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ & $-^*$ \\ Cotrini \textit{et al.} \cite{cotrini2018mining} & & 1345 & 70.93\%&75.64\% & 466 & 1247 &0.85 \\ Proposed Approch & & 1010.43 & 98.49\% & 99\% & 24 & 92 &0.99\\ \bottomrule \end{tabular} \begin{tablenotes} \item $*$ Xu and Stoller \cite{xu2014mining} did not terminate nor produced any output for the these datasets even after running for more than 24 hours. \end{tablenotes} \end{threeparttable} \end{table*} \begin{figure}[htbp] \centering\@ifnextchar[{\ts@includegraphics}{\ts@includegraphics[width=\checkGraphicsWidth,height=\checkGraphicsHeight,keepaspectratio]}}[scale=0.4]{images/F-score_Comparison.pdf} \caption{The F-Score of the Policies Mined by ABAC Mining Algorithms} \label{fig:fscore_comparison} \end{figure} \begin{figure}[htbp] \centering\@ifnextchar[{\ts@includegraphics}{\ts@includegraphics[width=\checkGraphicsWidth,height=\checkGraphicsHeight,keepaspectratio]}}[scale=0.4]{images/WSC_Comparison.pdf} \caption{The Complexity of the Policies Mined by ABAC Mining Algorithms} \label{fig:wsc_comparison} \end{figure} \begin{figure}[htbp] \centering\@ifnextchar[{\ts@includegraphics}{\ts@includegraphics[width=\checkGraphicsWidth,height=\checkGraphicsHeight,keepaspectratio]}}[scale=0.4]{images/Quality_Comparison.pdf} \caption{The Quality of the Policies Mined by ABAC Mining Algorithms} \label{fig:quality_comparison} \end{figure} \section{Related Work} \label{relatedwork} As RBAC approach became popular, many organization decided to equip their information systems with more recent access control model, however migrating to RBAC from legacy access control systems was a huge obstacle for such environments. As a result, several researchers have addressed such a challenge by introducing automated role extraction algorithms \cite{molloy2010mining, xu2012algorithms, kuhlmann2003role, schlegelmilch2005role, vaidya2007role, vaidya2006roleminer, zhang2007role, guo2008role, molloy2008mining, takabi2010stateminer, ni2009automating}. Role engineering or role mining are the terms that have been used to refer to procedures to extract an optimal set of roles given user-permission assignments. In \cite{kuhlmann2003role}, Kuhlmann and Schimpf try to discover a set of roles from user-permission assignments using clustering techniques, however, they do not show the feasibility of their proposed approach through experiments. In addition, their proposed approach lacks a metric to choose the best model based on their clustering method. The ORCA role mining tool is proposed by Schlegelmilch and Steffens and tries to perform a hierarchical clustering on user-permission assignments \cite{schlegelmilch2005role}. Their proposed method limits the hierarchical structure to a tree so that each permission/user is assigned to one role in the hierarchy. This feature limits the feasibility of their proposed approach as, in real environments, roles do not necessarily form a tree. Ni et al. propose a supervised learning approach for role mining which maps each user-permission assignment to a role using a supervised classifier (i.e., a support vector machine (SVM)) \cite{ni2009automating}. The main limitation of their proposed approach is that the roles and some parts of the role-permission assignments are needed beforehand; and hence, it is not applicable in many organizations. Vaidya \textit{et al.} are the first to define the Role Mining Problem (RMP) formally and analyze its theoretical bounds \cite{vaidya2010role}. They also propose a heuristic approach for finding a minimal set of roles for a given set of user-permission assignments. Xu and Stoller are the first to propose an algorithm for mining ABAC policies from RBAC \cite{xu2013mining}, logs \cite{xu2014mining}, and access control list \cite{xu2015mining} plus attribute information. Their policy mining algorithms iterate over access control tuples (generated from available information, e.g., user permission relations and attributes) and construct candidates rules. They then generalize the candidate rules by replacing conjuncts in attribute expressions with constraints. The main limitation of these algorithms is that as they are based on heuristic approaches, the proposed techniques work very well for simple and small scale AC policies, however, as the number of rules in the policy and the number of elements in each rule increases, they do not perform well. Following Xu and Stroller's proposed method, Medvet \textit{et al.} \cite{medvet2015evolutionary} propose a multi-objective evolutionary algorithm for extracting ABAC policies. The proposed approach is a separate and conquer algorithm, in each iteration of which, a new rule is learned and the set of access log tuples becomes smaller. Their algorithm employs several search-optimizing features to improve the quality of the mined rules. Although their approach is a multi-objective optimization framework which incorporates requirements on both correctness and expressiveness, it suffers from the same issue as \cite{xu2015mining}. Iyer and Masoumzadeh \cite{iyer2018mining} propose a more systematic, yet heuristic ABAC policy mining approach which is based on the rule mining algorithm called PRISM. It inherits shortcomings associated with PRISM that includes dealing with a large dimensionality of the search space of attribute values and generation of a huge number of rules. Cotrini \textit{et al.} propose an algorithm called Rhapsody for mining ABAC rules from sparse logs \cite{cotrini2018mining}. Their proposed approach is built upon subgroup discovery algorithms. They define a novel metric, \textit{reliability} which measures how overly permissive an extracted rule is. In addition, they propose a universal cross-validation metric for evaluating the mined policy when the input log is sparse. However, their algorithm is not capable of mining policies from logs with many attributes as the number of extracted rules grows exponentially in the number of attributes of the system. \section{Discussion and Limitations} As mentioned in section \ref{result}, our proposed approach is able to achieves a practical level of performance when applied to both synthesized and real datasets. In the case of synthesized datasets, the proposed approach is capable of mining policies containing both positive and negative attribute filters from complete datasets. On the other hand, our proposed approach shows potential for use in sparse datasets. In addition, the real datasets contain a large number of attributes and attribute values as shown in Table \ref{tab:policies_details}. The ability of our proposed approach in mining high-quality policies for these datasets shows that the size of attributes and attribute values have minimal impact on the effectiveness of our approach. The proposed approach is based on an unsupervised clustering algorithm. Since finding the proper number of clusters is a challenge related to clustering algorithms, our approach is affected by this issue as well. The same issue will also be valid in finding the best thresholds to extract effective attributes and relations. We note that, as the proposed algorithm is based on tuning multiple parameters, it is possible that it gets stuck in minimum optima. For this reason, we do not claim that it will extract the policy with the highest quality in every scenario, nor we claim that extracting rules with negative attribute filters and relations would always result in policy with higher quality (as we can see in Section \ref{result}); however, by trying more randomization in cluster initialization and a wider range of parameters, we can get one that is closer to global optima. In our evaluation, we used random selection to create noisy and sparse datasets from complete datasets. Although we ensured the same percentage of randomly selected tuples from permitted and denied logs, guaranteeing the quality of the sampling is difficult. \section{Conclusion} \label{conclusion} In this paper, we have proposed an unsupervised learning based approach to automating an ABAC policy extraction process. The proposed approach is capable of discovering both positive and negative attribute expressions as well as positive and negative relation conditions while previous approaches in access control policy extraction had only focused on positive expressions. Furthermore, our work is capable of improving the extracted policy through iterations of proposed rule pruning and policy refinement algorithms. Such refinement algorithms are based on the false positive and false negative records and they help in increasing the quality of the mined policy. Most importantly, we have proposed the \emph{policy quality metric} which considers both the conciseness and correctness of the mined policy and is important for comparing the extracted policy with the original one and for improving it as needed. We have evaluated our policy extraction algorithm on access logs generated for various sample policies and demonstrated its feasibility. Furthermore, we have shown that our approach outperforms previous works in terms of policy quality. As future work, we plan to extend our method to support numerical data and extract negative authorization rules as well while studying the effects of various conflict resolution strategies on the quality of the mined policy. \ifCLASSOPTIONcaptionsoff \newpage \fi \bibliographystyle{ieeetr}
1,116,691,501,406
arxiv
\section{Introduction} Recommender systems mediate our access to online information and play a crucial role in online platforms \citep{schafer1999recommender, wei2007survey, zaiane2002building, liu2013soco,hariri2012context,vargas2011effects,levi2012finding}. Especially common are embedding or matrix factorization techniques. In this setting, a recommender system learns a vector representation for each item and user. Then users are recommended the items with feature vectors most similar to their own (e.g., the items that maximize the inner products between the representations)~\citep{takacs2008investigation, koren2009matrix,maneeroj2009hybrid, xue2017deep, zhang2019deep}. The promise of such techniques -- and more broadly collaborative filtering (CF) -- is that we can automatically capture user preferences and item characteristics based on the principle that similar people like similar items. Informally, if Bob and Alice both enjoy item one and Alice further also item two, then Bob is very likely to enjoy item two as well. This work is based on a simple insight: collaborative filtering \textit{also} implicitly assumes that negative preferences are transitive. If Bob and Alice both enjoy item one and Alice \textit{dislikes} item two, then a CF approach will learn that Bob is likely to \textit{dislike} item two as well. While such an assumption may seem innocuous, we show that it leads to a \textit{stereotyping} problem. Suppose a user prefers a set of items that are anti-correlated in the broader population. Then these items are unlikely to be jointly recommended to this user -- regardless of that user's preferences and ratings. In other words, for users with multiple interests that are not jointly common in the general population, the recommendation system cannot jointly recommend all the items in which they are interested; the system may instead only recommend them items corresponding to one interest. We note that this problem may especially affect cultural minorities; \textit{if} other users do not like their culture-specific items, then our results imply a recommender system would not simultaneously recommend them both culture-specific items (which are disliked by others) and other items they like (that other users also like). To formalize this challenge, we first introduce a notion of \textit{joint accessibility}, which indicates when an item set of size $K$ can \textit{jointly} be accessed by a user in a top-$K$ recommendation task (when the $K$ highest predicted items are recommended to the user). Such a notion of joint accessibility captures a fundamental requirement for content diversity in recommender systems: is a user able to access (be recommended) any combination of top $K$ items that they might like? We show that the standard matrix factorization machinery, which models each user and item as a single-vector embedding, does not satisfy joint accessibility conditions. There can exist sets of items that will never be recommended together, even if each item can be individually recommended. In particular, such sets are combinations of items that fall outside the majority of users' preferences. We prove that such a limitation is a consequence of using a single-vector representation for each user -- if two items have representations far apart from one another, then no user vector can be close to both. Under such representations, each user is only able to access a constrained set of item combinations. We formalize this intuition and analyze its implication theoretically through two geometric interpretations -- providing necessary and sufficient conditions for joint accessibility to hold. Moreover, we show that these conditions are not guaranteed by the standard single-vector representation for users and items used in most CF models. To mitigate this limitation, we provide an alternative multi-vector representation technique in which we represent each user using multiple vectors and prove that it guarantees joint accessibility. This multi-vector representation allows the recommender to learn multiple interests for each user, even if the user's combination of interests is rare. \paragraph*{Contributions and organization.} We introduce the stereotyping problem of \textit{joint accessibility} in recommender systems, and study the theoretical conditions of it and alternative modelling fix. In particular, \begin{enumerate} \item We formally define the notion of joint accessibility, which can be used as a general measure for auditing recommender systems. \item We study the joint accessibility condition with the standard single-vector representation for each user, and provide necessary and sufficient conditions for it to hold. We further show that these conditions can be easily violated in the single-vector representation. \item We then propose an alternative modelling fix. The proposed new modelling technique represents each user with \textit{multiple} feature vectors -- which are learnt to capture user's diverse interests. We analyze the multi-vector representation theoretically, and show that joint accessibility generally holds in such a scheme. \item We conduct extensive experiments on real and simulated datasets, demonstrating the stereotyping problem with standard single-vector matrix factorization models. \end{enumerate} We discuss related work next. In \Cref{sec:single_vec}, we formal define joint accessibility and provide geometric conditions under which it holds for the standard single-vector matrix factorization model. \Cref{sec:multi_vec} contains both an impossibility result for accessibility with single-vector representations and our multi-vector model. \Cref{sec:exp} contains our experiments. We conclude in \Cref{sec:conc}. \section{Joint Accessibility and its Geometric Interpretation } \label{sec:single_vec} In this section, we set up the notations for matrix factorization-based recommendations, and formally define \textit{joint accessibility}. We then present the main theoretical analysis for the single-vector representation of users. In particular, we provide necessary and sufficient conditions for joint accessibility through two geometric interpretations. \subsection{Basic setup and joint accessibility} We consider top-$K$ recommendation to a user with a total set of $n$ items, where the user and items are represented by feature vectors with $d$ latent dimensions. In a standard matrix factorization model, each item $j \in [n]$ is associated with a unique feature factor, denoted by $\mathbf v_j \in \mathbb{R}^d$. The user is also represented by a feature vector $\mathbf u \in \mathbb{R}^d$. The system then recommends the items with the $K$ highest predicted ratings based on the feature vectors, which can be learnt from an offline dataset. The user is recommended the items with vectors most similar to their own, i.e. the item that maximizes the inner product in based on the feature representation. Formally, the score (predicted rating) of each item $j$ is\footnote{More generally, the predicted ratings may further include user and item bias terms.}: \begin{align}\label{eq:score} s(j) = \mathbf u^\top \mathbf v_j. \end{align} Given the above scoring rule, the set of items in a top-$K$ recommendation is: \[ S = \argmax_{S \subset [n],\; |S| = K} \sum_{j \in S} s(j). \] With the representation model and its scoring rule at hand, we are now ready to define the notion of \textit{joint accessibility}. In particular, joint accessibility considers whether a set of items can \textit{jointly} be accessed by a user in a recommendation. It captures a fundamental requirement on content diversity in recommender systems: each user -- provided they have shared enough data with the system for it to model their preferences well -- should be able to be recommended any combination of top $K$ items that they possibly like. Formally, let ${\cal I}_K$ be the space of all subsets of items with size $K$, i.e., ${\cal I}_K \triangleq \{S: S \subset [n],\; |S| = K\}$, we define joint accessibility for a top-$K$ recommendation to a user as follows. \begin{definition}(\textit{Joint accessibility})\label{def:joint-access} Consider a top-$K$ recommendation to a user with a total of $n$ items. User and the item vectors have latent dimension $d$. Denote the items feature vectors as $\mathbf v_j \in \mathbb{R}^d, j \in [n]$. Then the recommender system satisfies joint accessibility if and only if \[ \forall \{\mathbf v_j\}_{j=1}^n,\; \forall S\in {\cal I}_K,\; \text{ it is true that } \exists \mathbf u \in {\cal U}\; \text{ s.t. } S \text{ is recommended.} \] We further say that a \textit{given} set $S$ is jointly accessible if there exists $ \mathbf u \in {\cal U}\; \text{ s.t. } S \text{ is recommended.}$ \end{definition} Intuitively, joint accessibility of a top-$K$ recommendation means that for any item sets with size $K$, there exists at least a user vector for which this sets of items will be recommended. In other words, if joint accessibility of a top-$K$ recommendation is not satisfied, then there exist some sets of items with size $K$, such that these sets of items will never be chosen jointly by the recommender system -- even if there exists a user whose history suggests they like exactly that combination of items. In the coming subsections, we formally characterize the necessary and sufficient conditions for joint accessibility to be satisfied, through two geometric interpretations. \subsection{Accessibility conditions and their geometric interpretations}\label{sec:geometric} Definition~\ref{def:joint-access} defines the joint accessibility of a diverse set of items through the existence of a feasible user vector. In the next two subsections, we show that such an existence can be conveniently translated through two geometric interpretations to properties that are easier to visualize. These geometric interpretations provide necessary and sufficient conditions for joint accessibility for a given set of items. They further illustrate why dis-similar items in particular may not be recommended together, regardless of a user's interests. \paragraph{Convex hull interpretation} Our first geometric interpretation uses an argument with convex hulls of vectors. This geometric interpretation extends the analysis in~\citet{dean2020recommendations} to \textit{joint} accessibility of multiple items. \begin{figure*}[!ht] \centering \begin{tabular}{cc} \includegraphics[width=0.4\textwidth]{plots/convex_hull.pdf} & \includegraphics[width=0.43\textwidth]{plots/voronoi.pdf} \end{tabular} \caption{Geometric interpretations of joint accessibility for Top-$2$ recommendations. \textit{(left)} convex hull interpretation. Green vectors represent sum vectors for item pairs.; \textit{(right)} Voronoi diagram interpretation. Colored blocks represent Voronoi cells. }\label{fig:convexhull-voronoi} \end{figure*} \begin{theorem}(Proof in Appendix~\ref{app:proof:thm:single-vec-vertex-condition})\label{prop:single-vec-vertex-condition}(Single vector accessibility condition) Given $n$ items with feature vectors $\mathbf v_1, \cdots \mathbf v_n \in \mathbb{R}^d$, an item set $S$ of size $K$ is accessible if and only if $\sum_{i \in S} \mathbf v_i$ is a vertex of the convex hull: $\textbf{conv}\{\sum_{i \in S} \mathbf v_i, \forall S \subseteq [n], |S| = K\}$. \end{theorem} Theorem~\ref{prop:single-vec-vertex-condition} translates accessibility of a certain set of items to a linear constraint in the items' feature vectors. Intuitively, $\textbf{conv}\{\sum_{i \in S} \mathbf v_i, \forall S \subseteq [n], |S| = K\}$ is the convex hull of all the possible sums of $K$ item vectors. Theorem~\ref{prop:single-vec-vertex-condition} guarantees that, if the sum of the feature vectors of $K$ items is not a vertex of this convex hull, this set of items can not be accessed to any user. As illustrated in Figure~\ref{fig:convexhull-voronoi} \textit{(left)}, in a top-2 recommendation with latent dimension $d=2$, the pair of item 1 and item 3, and the pair of item 2 and item 4 fall inside the convex hull, and are not jointly accessible in this configuration. Note that this joint inaccessibility holds even though every \textit{individual} item can be recommended to users, even in Top-1 recommendation: each of the four items are on the convex hull defined by their individual item vectors. \paragraph{Voronoi diagram interpretation} The convex hull of a general set of vectors can be difficult to visualize. For instance, for top-2 recommendation as illustrated in Figure~\ref{fig:convexhull-voronoi} \textit{(left)}, we would need to visualize the sum of $O(n^2)$ pairs of item vectors, and the corresponding convex hull. Therefore, we accompany the convex hull interpretation with a second geometric interpretation, which uses the Voronoi diagram. This interpretation is more easier to visualize especially when $K$ is small, though holds in a more limited setting. Specifically, we illustrate the alternative geometric interpretation for top-2 recommendation. Let us consider item feature vectors in $\mathbb{R}^d$ and a recommendation algorithm with the scoring rule as in Eq~\eqref{eq:score}. We further assume that the feature vectors are normalized and thus locate on a sphere in $\mathbb{R}^d$. These item vectors on the sphere then define a Voronoi diagram, as follows. \begin{definition} (Spherical Voronoi diagram for item vectors, \citep{de1997computational}) Denote a unit sphere in $\mathbb{R}^d$ as $S^d$. A spherical Voronoi diagram for item vectors are defined with $n$ sites located at the $n$ item vectors, and with the Euclidean distance on the surface of the sphere. The Voronoi cell, associated with the item vector $\mathbf v_k$, is the set of all points on the sphere whose distance to $\mathbf v_k$ is not greater than their distance to the any other sites $j$, where $j \neq k$. Two items are Voronoi neighbors if they have neighboring Voronoi cells. \end{definition} \begin{theorem}\label{thm:voronoi}(Proof in Appendix~\ref{app:proof:thm:voronoi}) Given $n$ items with normalized feature vectors $\mathbf v_1, \cdots \mathbf v_n \in \mathbb{R}^d$, an item set $S$ of size $K=2$ is accessible if and only if the items are Voronoi neighbors. \end{theorem} Theorem~\ref{thm:voronoi} provides another interpretation for joint accessibility of item pairs. To see this, Figure~\ref{fig:convexhull-voronoi} \textit{(right)} illustrates the Voronoi cells for the item vectors in $\mathbb{R}^3$ by showing the two-dimensional surface of the sphere. We see that item 1 and item 3 are not Voronoi neighbors, and therefore not jointly accessible. Intuitively, if two item vectors are far apart, then no single user vector can be simultaneously close to both item vectors. That two items are Voronoi neighbors establishes that there exist user vectors at their joint boundary whose closest neighbors to the user vector are those two items. Otherwise, if items are not Voronoi neighbors, no such user vectors exist. We further provide an observation on the transitiveness of the impossibility for joint accessibility. Intuitively, if joint accessibility is violated for a smaller set of items, it is also violated if we have more items available to recommend. For top-$K$ recommendation with a total of $m$ items, if joint accessibility is violated for a pair of values of $n,d$, then it is also violated for $d$ and any $n'\geq n$. Therefore, guaranteeing joint accessibility with a small set of item is essential for joint accessibility of a large set of available recommendations. \medskip Together, these geometric interpretations illustrate the stereotyping challenge. If two items are far apart (not Voronoi neighbors, or have small vector sum $\mathbf v_j + \mathbf v_k$ not on the convex hull), then they are not jointly accessible. Further, note that in matrix-factorization based collaborative filtering, the relative arrangement of item vectors is meaningful: two item vectors are close if the corresponding items are similarly preferred by users. Thus, if the broader user population does not similarly prefer two items, then the item vector representations will be far apart, and they cannot be recommended together for \textit{any} user; for any user who likes such dis-similar items, the recommender system can learn to only recommend one of them. In this section, we defined joint accessibility and provided conditions under which a given set $S$ of items is jointly accessible. In the next section, we turn to joint accessibility across all subsets of the items. We show that in general such joint accessibility with the single-vector representation, but can be guaranteed with an alternative representation technique. \section{Proofs} \subsection{Proof of Theorem \ref{prop:single-vec-vertex-condition}} \label{app:proof:thm:single-vec-vertex-condition} \begin{proof} The proof is a generalized version of \citet[Proposition~1]{dean2020recommendations}, which is specialized to Top-1 recommendation. First, notice that, a set $S$ of $K$ items is recommended as the set of Top-$K$ items if and only if: \begin{align*} \sum_{i\in S} \mathbf v_{i}^\top u > \sum_{i\in S'} \mathbf v_{i}^\top u, \end{align*} for all $S' \subseteq [n], S' \neq S, |S'| = K$. The above is equivalent to: \begin{align}\label{eq:single-vec-top-k} Q_s u > 0, \end{align} where $Q_s = \begin{pmatrix} (\sum_{i\in S} \mathbf v_{i} - \sum_{i\in S'_1} \mathbf v_{i})^\top\\ \cdots\\ \cdots \\ \left(\sum_{i\in S} \mathbf v_{i} - \sum_{i\in S'_{{n\choose k}-1}} \mathbf v_{i}\right)^\top\end{pmatrix} \in \mathbb{R}^{\left({n\choose k}-1\right) \times d}$. Therefore, the set $S$ is accessible if and only if there exists $u \in \mathbb{R}^d$, s.t, eq.~\eqref{eq:single-vec-top-k} is satisfied. This is a linear constraint on $u$, and is equivalent to the feasibility of the following linear program: \begin{align*} \min \quad & \textbf{0}^\top u \\ \textnormal{s.t.} \quad & Q_su \geq \epsilon \end{align*} where $\epsilon>0$ is an arbitrarily small positive number. To analyze the feasibility of this linear program, we consider its dual: \begin{align*} \max \quad & \epsilon^\top \lambda \\ \textnormal{s.t.} \quad & Q_s^\top \lambda =0\\ &\lambda \geq 0 \end{align*} Since the dual is always feasible with a solution $\lambda = \textbf{0}$, by Clark's theorem, the primal is feasibly if and only if the dual is bounded. Further notice that for any $\lambda$ that is a feasible solution, $a\lambda, a\geq 0$ is also a feasible solution. Setting $a$ to $+\infty$ will make the dual unbounded whenever if $\lambda$ has nonzero entries. This means that a sufficient condition for the unboundedness of the dual is that there exists some feasible $\lambda \neq \textbf{0}$, with $Q_s^\top \lambda =0$, such that: \begin{align*} \sum_j \lambda_j \left(\sum_{i\in S} \mathbf v_{i} - \sum_{i\in S'_j} \mathbf v_{i}\right) = 0, \lambda > 0. \end{align*} By rearranging terms, this is equivalent to $\sum_{i\in S} \mathbf v_{i}$ being a convex combination of $ \sum_{i\in S'_j} \mathbf v_{i}$ of all $j$. Therefore, $S$ is accessible for a user $u$ if and only if $\sum_{i \in S} \mathbf v_i$ is a vertex of the convex hull: $\textbf{conv}\{\sum_{i \in S} \mathbf v_i, \forall S \subseteq [n], |S| = K\}$. \end{proof} \subsection{Proof of Theorem~\ref{thm:voronoi}}\label{app:proof:thm:voronoi} \begin{proof} First, assume that items $\mathbf v_1$ and $\mathbf v_2$ are the top-2 recommendations for some user $\mathbf u$. Then by definition, for all $j \in [n]$, $j \neq 1, 2$, we have $\mathbf u \cdot \mathbf v_j \leq \mathbf u \cdot \mathbf v_1$, and $\mathbf u \cdot \mathbf v_j \leq \mathbf u \cdot \mathbf v_2$. Without loss of generality, assume that $\mathbf v_1$ is the Top-1 recommendation for $\mathbf u$. Then by the construction of Voronoi diagram, this means that $\mathbf u$ is in the Voronoi cell of $\mathbf v_1$. Suppose that if $\mathbf v_2$ is not in an neighboring Voronoi cell of $\mathbf v_1$'s. Then, there must exist another item vector $\mathbf v_3$, such that $\mathbf v_3$ has a neighboring Voronoi cell to $\mathbf v_1$, and the closest distance (the shortest arc) between $\mathbf v_1$ and $\mathbf v_2$ crosses the cell of $\mathbf v_3$. Denote the shortest arc from $\mathbf v_1$ to $\mathbf v_2$ on the sphere as $\ell$. If $\mathbf v_3$ is on $\ell$, we immediately get $d(\mathbf u, \mathbf v_3) < d(\mathbf u, \mathbf v_2)$. If $\mathbf v_3$ is not on $\ell$, consider any point $\mathbf v_4$ that is in the Voronoi cell of $\mathbf v_3$ and is on $\ell$. Then, by triangle inequality, \begin{align*} d(\mathbf u, \mathbf v_3) &< d(\mathbf u, \mathbf v_4) + d(\mathbf v_4, \mathbf v_3)\\ & < d(\mathbf u, \mathbf v_4) + d(\mathbf v_4, \mathbf v_2)\\ &= d(\mathbf u, \mathbf v_2). \end{align*} Therefore, $\mathbf u$ is closer to $\mathbf v_3$ than $\mathbf v_2$. This contradicts the fact that $\mathbf v_2$ is the second closest item vector to $\mathbf u$. On the other hand, consider two item vectors $\mathbf v_1$ and $\mathbf v_2$ that are Voronoi neighbors. Then, any user vector $\mathbf u$ that is on the intersection of the two Voronoi cells has a top-2 recommendation of $\{\mathbf v_1, \mathbf v_2\}$, which completes the proof. \end{proof} \subsection{Proof of Theorem~\ref{thm:single-vec-impossibility}}\label{app:proof:thm:single-vec-impossibility} \begin{proof} We prove the theorem by providing an example that is illustrated in Figure~\ref{fig:convexhull-voronoi} \textit{(left)}. Specifically, consider $d=2$, and four items with feature vectors $(2,4), (-2,2), (-3, -1), (3,-3)$. It is immediate to see that for top-1 recommendations, four item vectors are all vertices of the convex hull $\textbf{conv}\{\sum_{i \in S} \mathbf v_i, \forall S \subseteq [n], |S| = 1\}$. Therefore, by Theorem~\ref{app:proof:thm:single-vec-vertex-condition}, each item is individually accessible. Now we consider a top-2 recommendation. The convex hull $\textbf{conv}\{\sum_{i \in S} \mathbf v_i, \forall S \subseteq [n], |S| = 2\}$ is a convex hull of six possible item pairs, which correspond to the vector sums: \[(0,6), (-1,3), (5,1), (-5, 1), (1,-1), (0,-4).\] Therefore, the item pair of item 1 and item 3 (correspond to vector sum $(-1,3)$), and the item pair of item 2 and item 4 (correspond to vector sum $((1,-1))$) are not jointly accessible. Therefore, with the single vector recommendation, individual accessibility of the items is not sufficient for joint accessibility. This completes the proof. \end{proof} \subsection{Proof of Theorem \ref{thm:multi-vec-access}} \label{app:proof:thm:multi-vec-access} \begin{proof} First, given that each item is individually accessible, by Theorem~\ref{app:proof:thm:single-vec-vertex-condition} with $K=1$, we have that for any item $j \in [n]$, there exists a user vector $\mathbf u_j \in \mathbb{R}^d$, such that \[ \mathbf u_j \cdot \mathbf v_j \geq \mathbf u_j \cdot \mathbf v_{j'}, \] for any $j' \in [n], j' \neq j$. For convenience, let us denote $\mathbf u_j$ as the representative user vector for item $j$. Moreover, consider an arbitrary set of $K$ items, with item feature vectors $\mathbf v_1, \cdots, \mathbf v_K$ and their representative user vectors $\mathbf u_1, \cdots, \mathbf u_K$. Then there exist constants $c_1, \cdots, c_k$ such that \[ c_1\mathbf u_1\cdot \mathbf v_1 = \ldots = c_K\mathbf u_K\cdot \mathbf v_K. \] Now we show that the set of user vectors $\{c_i\mathbf u_i\}, i = 1\cdots K$ form a multi-vector user representation such that items ${\cal S} = \{\mathbf v_1, \cdots, \mathbf v_K\}$ are jointly accessible. Consider any other set of $K$ items with item feature vectors ${\cal S}' = \{\mathbf v'_1, \cdots, \mathbf v'_K\}$. Denote the predicted ratings for set ${\cal S}$ and ${\cal S}'$ as $r({\cal S})$ and $r({\cal S}')$. The scoring rule as in Eq~\eqref{eq:score-multi} yields: \begin{align*} r({\cal S}) &= \sum_{i=1}^K \max_{j \in [m]} c_j\mathbf u_j\cdot \mathbf v_i\\ &= \sum_{i=1}^K c_i\mathbf u_i\cdot \mathbf v_i\\ &\geq \sum_{i=1}^K \max_{j \in [m]} c_j\mathbf u_j\cdot \mathbf v'_i \\ &= r({\cal S}') \end{align*} which completes the proof. \end{proof} \section{Additional Experimental Details} In this section we include further empirical results for the synthetic and ML10M datasets. Figure \ref{fig:empsyn64} shows the accessibility of items in the synthetic setting with a $64$ dimensional matrix factorization model. Figure \ref{fig:empml64} shows the accessibility of items in the ML10M case with a $64$ dimensional matrix factorization model. The results are qualitatively similar to Figures \ref{fig:empsyn} and \ref{fig:empml}, which indicates that the stereotyping problem is not merely caused by an underspecified model. \begin{figure}[!ht] \centering \subfloat[Subfigure 1 list of figures text][How similar the top two items recommended to a user are to each other, as a function of how similar the two users true vectors are.]{ \includegraphics[width=0.445\linewidth]{plots/simsquared_oracle_f892b5715793a27.png} \label{fig:similarityuser} } \qquad \subfloat[Subfigure 1 list of figures text][Same plot as~\Cref{fig:empiricalappearanceprob}, with 64-dimensional vectors]{ \includegraphics[width=0.445\linewidth]{plots/probappearance_51221d8b8c6ceb7.png} \label{fig:syn64empir}} \caption{Supplemental figures for synthetic experiment.} \label{fig:empsyn64} \end{figure} \begin{figure}[!ht] \centering \subfloat{ \includegraphics[width=0.46\linewidth]{plots/probappearance_bca912125522212.png} \label{fig:empiricalappearanceml64}} \qquad \subfloat{ \includegraphics[width=0.46\linewidth]{plots/impossibility_209d196cc8b29fc.png} \label{fig:trainedimpossibilityml64}} \caption{Same plots as in \Cref{fig:empml}, except with 64-dimensional vectors.} \label{fig:empml64} \end{figure} \subsection*{Related work}\label{sec:related_work} Recommender systems have been extensively studied, with many successful industrial applications. We discuss the most closely related work under three broad verticals below. \paragraph{Collaborative filtering and matrix factorization.} Collaborative filtering (CF) is one of the most widely used methods for building large-scale recommender systems~\citep{herlocker2004evaluating,schafer2007collaborative}. CF-based recommender systems create recommendations based on what users similar to a given user have liked in the past. At a high level, the similarity metric across users is based on user rating histories, such that the ratings from those like-minded users can be used to predict the ratings of the user of interest; it can also be derived from other items that are likely to be paired with the items which the user of interest has liked in the past. Across many practical implementations of the CF-based approaches, a key tool used for computing the similarities either across the users or items is matrix factorization~\citep{koren2009matrix, xue2017deep,takacs2008investigation, luo2014efficient, yu2012scalable, hernando2016non, wu2018dual}, which provides an efficient way to generate a \textit{single} latent feature vector of each user and item for computing similarities. We illustrate limitations of this approach and in particular the single-vector representation of the users; we then consider an alternative \textit{multi-vector} user representation, to enable more pairs of items to be recommended together to users. \paragraph{Content diversity in recommender systems.} A primary goal of traditional recommender systems is to achieve high prediction accuracy. However, recent works have illustrated the pitfalls and limitations of this focus. To this end, our work complements a rich line of research on diversity in recommender systems~\citep{fleder2007recommender, cen2020controllable, moller2018not, helberger2018exposure, lathia2010temporal, candillier2011diversity, kunaver2017diversity, di2017adaptive, gravino2019towards, antikacioglu2017post}, that analyze how recommender systems may not show users diverse content and explore alternative methods to improve content diversity. However, most of the prior works assume that each user is represented by single vectors, and consider diversity in the sense of \textit{individual} items. In this paper, we emphasize that such a single-vector embedding can fundamentally limit the ability of the recommender system in capturing a diverse set of interests of each user, and propose a new metric for diversity in terms of accessibility to \textit{sets} of items. We further show how increasing a user's preference for one item could in traditional models decrease the likelihood they receive recommendations for a different item. Most related (and the exception to the single-vector assumption) is~\citet{cen2020controllable}, who explore a similar multi-vector user representation framework as us. They train such vectors using modular-based neural networks and provide empirical evidence that such multi-vector representations can better capture the multiple interests for each users. Our paper theoretical uncovers one of the mechanisms of such a solution's benefits, and in the process highlights the heterogeneous effects of single-vector representations on users: users who like sets of items that are together uncommon among other users -- even if all the items individually are common -- normally face especially poor recommendations. \paragraph{Biases in recommender systems.} More broadly, our work is related to the active line of recent works studying biases across different user groups, and long-term impacts such as filter bubbles, glass-ceiling effects, and user agency in recommender systems~\citep{nguyen2014exploring, teppan2015decision, dean2020recommendations, ge2020understanding, mendoza2020evaluating, bellogin2017statistical, jiang2019degenerate, haim2018burst}. The prior works analyze causes of biases theoretically or provide empirical evidence for such biases. Most related is~\citet{dean2020recommendations}, who introduce a measure of reachability and consider user agency in interactive recommender systems; reachability considers whether a \textit{single} item can be recommended to any user, and they study the concept for matrix factorization-based recommender systems through a geometric framework. Our joint accessibility measure generalizes the notion of reachability from single item to combination of multiple items, and thus is able to capture the multiple interests of the users. \section{Accessibility with Single and Multi-vector Representations} \label{sec:multi_vec} In this section, we consider joint accessibility across all subsets of the items. We first do so for the standard matrix factorization framework where each user is represented by a single vector, formalizing the impossibility examples from the previous section. Then, we propose an alternative representation model for mitigating the stereotype problem in joint accessibility. This new model represents each user with multiple vectors in particular can capture each user's diverse multiple interests. We then analyze the multi-vector representation theoretically, providing sufficient conditions for joint accessibility. Note that as pointed out by \citet{dean2020recommendations}, accessibility to a single item is not always guaranteed, if the vector is not on the convex hull defined by the individual item vectors. In this section, we thus ask: \textit{(when) does accessibility of all individual items imply that all subsets of items are jointly accessible?} \subsection{Impossibility result with single-vector representation} Our first result formalizes the examples from the previous section: that under a standard matrix factorization framework, in general individual accessibility does not imply joint accessibility. \begin{theorem}\label{thm:single-vec-impossibility}(Proof in Appendix~\ref{app:proof:thm:single-vec-impossibility}) Consider single-vector user representation, with $n$ items, and top-K recommendation. Then, there exists item feature vectors $\mathbf v_1, \cdots \mathbf v_n \in \mathbb{R}^d$ such that every item is individually accessible, but that there exists a set of $K$ items that are not jointly accessible. \end{theorem} Theorem~\ref{thm:single-vec-impossibility} shows a fundamental limitation of the single item representation: it possesses a weak capability in joint accessibility of sets of items. This is especially problematic if a user has a diverse preferences over items with feature vectors that are not similar. In the next subsection, we propose proposing an alternative representation to overcome this problem. \subsection{Multi-vector representation} From the geometric interpretation analysis in Section~\ref{sec:geometric}, we see that the limitation of joint accessibility depends crucially on the single-vector representation: the same user vector cannot be simultaneously close to two far apart vectors, and so such a representation is restricted in capturing multiple interests of the users. Therefore, our new framework uses multiple vectors to represent each user. Consider a top-$K$ recommendation to a user with a total of $n$ items, where the user and item vectors have latent dimensions $d$. In a multi-vector representation model, we associate a user with $m$ feature vectors, $\{\mathbf u_i, \ldots, \mathbf u_m\} \in \mathbb{R}^d$. The system then recommends the top $K$ items based on the feature vectors. An important change is, the new scoring rule takes the \textit{maximum} predicted rating over all user vectors for an item as the final prediction. Formally, the score of each item $j$ is \begin{align}\label{eq:score-multi} s(j) =\max_{i\in [m]} \mathbf u_i^\top \mathbf v_j. \end{align} Given the above scoring rule, the set of items in an top $K$ recommendation are those with the $K$ highest scores: $S \triangleq \argmax_{S \subset [n],\; |S| = K} \sum_{j \in S} s(j)$. With such a representation and with $m \geq K$, individual accessibility guarantees joint accessibility. \begin{theorem}(Proof in Appendix~\ref{app:proof:thm:multi-vec-access})\label{thm:multi-vec-access} Consider $m$-vector user representations and the recommender system scoring rule as in Eq~\eqref{eq:score-multi}, with $n$ items, and top-K recommendation. Suppose $m \geq K$. Then, for any item feature representations $\mathbf v_1, \cdots \mathbf v_n \in \mathbb{R}^d$, if every item is individually accessible, then every set of size $K$ items is jointly accessible. \end{theorem} The result follows almost immediately from the definitions: if there exist individual item vectors such that each given item is recommended, then for any set of items of size $K$ there exists a set of user vectors of size $K$ such that the set is recommended. \medskip In this section, we show a stark difference between the standard matrix factorization framework and one in which each user is represented by multiple item vectors. However, one limitation of our theoretical results is that they show that the problem with single vectors \textit{may} occur, not that it necessarily does so. Thus, in the next section, we turn empirically study when and if joint accessibility holds. \section{Experiments}\label{sec:exp} In the previous section, we theoretically characterized when a set of items may not be jointly accessible, showing that the single-vector user representation enables such inaccessibility far more than when each user is represented by multiple vectors. In this section, we empirically illustrate that our findings are not just a theoretical concern, showing that standard matrix factorization techniques lead to joint accessibility issues in practice. In \Cref{sec:empmethods}, we outline our empirical setting. Results are in \Cref{sec:empresults_inaccesibility}. \subsection{Empirical setting and methods} \label{sec:empmethods} We first illustrate our insights and results using a synthetically generated dataset, and then using MovieLens 10M dataset (ML10M) \citep{harper2015movielens}. The synthetic setting allows us to observe ground truth preferences, and thus to illustrate exactly which items are not jointly accessible and in turn which users receive insufficiently diverse recommendations. The ML10M setting then demonstrates that our insights extend to real recommendation settings. We generate the synthetic dataset using a modified version of the \texttt{latent-static} environment from the RecLab simulation platform \citep{krauth2020offline}. \paragraph{Synthetic data generation.} In the synthetic setting, we sample \num{10677} items and \num{69878} users, matching the numbers from ML10M. As the ground truth, we represent each item as a \num{64} dimensional vector uniformly sampled from the unit sphere. To illustrate exactly how single-vector representation fails when users might have different (potentially ``opposing'' interests), we construct for each user a ground truth representation as two \num{64} dimensional vectors uniformly sampled from the unit sphere. That is, \[ \mathbf u_{i, k} = \frac{Z_{i, k}}{\|Z_{i, k}\|_2} \quad \mathbf v_j = \frac{Z_j}{\|Z_j\|_2}, \] where $\mathbf u_{i, k}$ is the $k$-th (for $k\in \{1, 2\}$) vector representing user $i$ and $\mathbf v_j$ is the vector representing item $j$, and $Z_{i, k}$ and $Z_j$ are vectors drawn from $\mathcal{N}(0, I_d)$. Then, the corresponding true rating between a user-item pair is \[r_{i, j} = \max_{k \in \{1, 2\}}\langle \mathbf u_{i, k}, \mathbf v_j \rangle.\] For training data, we uniformly sample 10 million user-item pairs without replacement and obtain their corresponding rating, with an additive Gaussian noise of mean zero and standard deviation \num{0.01}. \paragraph{Model training.} In both the MovieLens and synthetic settings we train matrix factorization models using the standard alternating least squares (ALS) algorithm \citep{hu2008collaborative}. In each setting, we model each user and item as a single vector of dimension $32$ or $64$. We tune the regularization parameter using $10$-fold cross validation and grid search. In the synthetic setting, we further constrain vectors to have norm $1$, to match the ground truth. \paragraph{Recommendations.} After obtaining the trained user and item vectors, for each dataset we select a subset of the \num{400} most popular individual items for which to carry out our analysis (for computational reasons, as our metrics calculations are quadratic in the number of items). For this subset and all users, we calculate a predicted rating between that user $i$ and item $j$, as the dot product of the given item and user vectors, $u_i \cdot v_j$. Finally, we find the pair of items that would be recommended to each user with top-2 recommendations, i.e., the two items with the highest predicted rating for that user. For the synthetic dataset, we further find the pair of items that an oracle recommender would recommend to the user, i.e., the top two items in the user's true ranking over the items. \subsection{Analysis and results} \label{sec:empresults_inaccesibility} The goal of our experimental analysis is to illustrate our insights regarding joint accessibility and the stereotyping challenge: first, that sets of items are not (and cannot be) jointly recommended together even if every individual item can be recommended; and that in particular the sets that are inaccessible are those that in the training data are anti-correlated, and so recommendations do not reflect the underlying diversity of a given user's preferences. Note that, by construction of the synthetic dataset, if recommendations were made according to the oracle, there would not be a strong relationship between how similar two item vectors are and how often they're recommended together; an optimal multiple vector model could recommend dissimilar items together. In contrast, in both datasets using the trained single-representation vectors, we indeed observe that only items that are similar are in practice recommended together. We illustrate this in several ways, starting with the synthetic setting where we can also compare to what an oracle recommender would do. \subsubsection{Synthetic setting} First, we observe that the single-vector model does not effectively make item pairs accessible to users. Even restricting the attention to 400 items (and thus \num{79800} unique item pairs), the users combined were only recommended \num{28451}---$35.7\%$---of the item pairs. (Even as all 400 items were recommended individually). In contrast, an oracle multi-vector recommender would show these users \num{42729}---$53.5\%$---unique item pairs (note that if every user was shown a different pair, we would see \num{69878} unique pairs). \begin{figure}[!ht] \centering \subfloat[Subfigure 1 list of figures text][How often pairs of items with the given similarity are recommended together. For example, items $j,k$ with true similarity $\mathbf v_j \cdot \mathbf v_k = 1$ are on average recommended together for 1 user in the training set, using either the oracle recommendation or the trained system. ]{ \includegraphics[width=0.43\linewidth]{plots/probappearance_a7be4add6140d66.png} \label{fig:empiricalappearanceprob}} \qquad \subfloat[Subfigure 2 list of figures text][How often pairs of item with a given similarity are recommended together for a user vector constructed specifically to maximize the ratings for that pair. With a single vector per user, not all items are accessible.]{ \includegraphics[width=0.43\linewidth]{plots/impossibility_f892b5715793a27.png} \label{fig:trainedimpossibility}} \caption{Empirical accessibility results for our synthetic setting.} \label{fig:empsyn} \end{figure} Figure~\ref{fig:empiricalappearanceprob} further shows the relationship between how similar two items are, and how \textit{often} they're empirically recommended to users in a top-2 recommendation in the dataset. On average, using a single-vector recommender, the most similar items are recommended together more than 10 times more often than are the least similar items. In contrast, using the oracle recommender -- which uses multiple vectors per user -- dissimilar items are recommended together almost as often as are similar items. The above measures are based on the users present in our synthetic dataset (recall that we sample the user vectors uniformly at random). On the other hand, our theoretical results are whether there exists \textit{any} user vector such that a given pair of items can be recommended together as the top two items. To demonstrate such inaccessibility on our synthetic dataset, for each pair of items we (approximately) find the user vector \textit{best} suited to recommend those two items, using the following heuristic. We wish to find a user vector that maximizes the ratings for the given items while minimizing the ratings of other items; thus, we solve the following least squares problem, for item pair $j,k$: \begin{align} u^* &= {\arg\min}_{u} \| \mathbf{V} u - r \|, \label{eqn:theoraccesslsheuristic}\\ \text{where}\,\,\,\,\,\,\,\,\, r_\ell &= \begin{cases} 1 & \ell \in \{j, k\} \\ 0 & \text{otherwise} \end{cases} \nonumber \end{align} and $\mathbf{V}$ is the learned set of item vectors. In other words, we construct a user vector $u^*$ for a hypothetical user that likes items $j,k$ and nothing else. Then, we check whether the given two items are indeed recommended to the user represented by the calculated user vector with top-2 recommendations. A pair is declared inaccessible if indeed they are not the top two items, even for a user constructed especially for these two items. Figure~\ref{fig:trainedimpossibility} shows the results. We find that for a dissimilar pair of item vectors, indeed they are sometimes declared inaccessible by our heuristic; even if a user likes those two items and no other items, that user is not recommended those two items with single-vector matrix factorization. Recall that, theoretically, there does exist multi-vector representation per user such that every pair would be accessible -- and so an oracle recommender would achieve perfect accessibility according to this metric. The above metrics are \textit{item} centered: which pairs of items can or cannot be recommended together. However, recall that a core insight of our work is that users with joint interests rare in the population -- even if their individual interests are all popular -- will necessarily receive recommendations that do not match their diverse interests. We now turn to a user-centric measure, in Appendix \Cref{fig:similarityuser}. Consider how diverse a given user's interests are, compared the general population: as measured by the dot product of their two true user item vectors. We find that the more diverse their true interests, the more diverse are their recommendations by the oracle recommender (similarity of recommendations is increasing in similarity of user vectors). On the other hand, with a single user trained vector, users with diverse interests do not receive correspondingly diverse recommendations. Furthermore, recommendations using single-vector model are overall far more similar than are recommendations with the oracle recommender. The Appendix contains the same results but for $64$-dimensional user vectors. In general, we find that empirical accessibility (with randomly drawn users) results do not change with the dimension (even up to $128$-dimensional user vectors) -- in practice, with a standard training algorithm for single vectors, joint accessibility does not improve with the dimension, even as error metrics such as RMSE do -- in other words, over-parametrization does not help in practice. On the other hand, our heuristic to determine theoretical accessibility as in \Cref{eqn:theoraccesslsheuristic} does predominantly find suitable user vectors -- suggesting that increasing the dimension alongside finding new methods to train user vectors may also increase accessibility in practice. \subsubsection{MovieLens dataset} \begin{figure}[!ht] \centering \subfloat{ \includegraphics[width=0.43\linewidth]{plots/probappearance_382545798ec8fbf.png} \label{fig:empiricalappearanceml}} \qquad \subfloat{ \includegraphics[width=0.43\linewidth]{plots/impossibility_9d196cc8b29fc32.png} \label{fig:trainedimpossibilityml}} \caption{Empirical accessibility results for ML10M. Each subplot shows the same result as the corresponding subplot in \Cref{fig:empsyn}. Now, vector similarity is defined as cosine similarity, to incorporate that vectors may have differing norms.} \label{fig:empml} \end{figure} \Cref{fig:empml} contains the corresponding results for ML10M, though of course we do not have an oracle recommender system against which to compare. We find that results are qualitatively similar to those using our synthetic dataset, or even more extreme. Only \num{6433}---$8.1\%$---of pairs are recommended together to the training set users, and now the most similar items are recommended together almost $100$ times more often than are the least similar items. (Even as $89\%$ of the items can be individually recommended). Restricting among items that are individually recommended, for most pairs of items we can find no user vector that recommends that pair together. Together, these empirical results demonstrates that joint accessible -- especially of items deemed dissimilar by the user population -- is a challenge in practice. \section{Conclusion} \label{sec:conc} We have studied a stereotyping problem in matrix factorization-based collaborative filtering algorithms with regard to users' accessibility to a diverse set of items -- standard recommenders cannot serve users with joint interests that are anti-correlated in the general population. We formalize this challenge by introducing a new notion of \textit{joint accessibility}, which describes whether a set of items can \textit{jointly} be accessed to a user in a recommendation. We provide intuitive geometric interpretations of joint accessibility, and show that it can be violated when the user is only represented in a single-vector representation model. We further propose an alternative modelling approach, which is designed to capture the diverse multiple interests of each user using a \textit{multi}-vector representation. Extensive experimental results on real and simulated datasets demonstrate the stereotyping problem with standard single-vector matrix factorization models. We note, however, that we do not consider how to \textit{train} such a multi-vector model. Future work should take up this challenge, to enable jointly accessible recommender systems in the real world. As recommender systems are increasingly employed in numerous online platforms and high stakes environments, it is necessary to scrutinize to ensure that these systems will not perpetuate, exacerbate, or create new inequity problems. Aiming to make recommender algorithms themselves intrinsically fairer, more inclusive, and more equitable plays an important role in achieving that goal. In this work, we inspect one aspect of such impact, which is on the stereotyping problem of diverse user interests, and provide new analysis on how the joint accessibility of items can be limited in a standard matrix factorization-based recommender system. More generally, our analysis emphasizes the importance of model and algorithm choice to the system's downstream societal impact.
1,116,691,501,407
arxiv
\section{Introduction} Linear isotropic elasticity (LIE) describes the mechanical response of macroscopic molecular solids assuming matter to be continuous and rotationally invariant. These assumptions are not met at the microscopic scale. Indeed, the elastic properties of small polycrystalline~\cite{Mullen1997} or amorphous~\cite{Wittmer2002} samples exhibit large sample-to-sample fluctuations. Similar size fluctuations characterize the elastic response in the plastic regime, where they have been extensively investigated (see, e.g.~\cite{Sethna2017}). The elastic response fluctuations vanish as the linear size of a sample increases and LIE becomes more accurate. Accordingly, LIE's validity depends on the ratio between the linear system size, $L$, and a microscopic elastic length scale, $\xi_E$. What sets this length scale? And how does the validity of LIE depends on $L/\xi_E$? These questions have been separately addressed in amorphous or polycrystalline materials. For amorphous solids, extensive simulations have investigated the convergence of the elastic response to linear isotropic elasticity in model Lennard-Jones like systems. Tanguy~\cite{Tanguy2002} et al. found the stress anisotropy to decrease exponentially with the system size with a decay length of the order of $65$ particle diameters, which is a possible estimation of $\xi_E$. This length scale has been associated with the correlation length of the non-affine particle displacements induced by external deformations, which is also, typically, of the order of several diameters~\cite{Tanguy2002, Wittmer2002,Leonforte2005}. Subsequent work~\cite{Tsamados2009} showed that the eigenvalues of the stiffness tensor evaluated over a coarse-graining length scale $w$ converge to their asymptotic limit as a power-law not complying with the central limit theorem expectation, and possibly dependent on the degree of structural order~\cite{Cakir2016}. We note, however, that these results may depend on the chosen definition of coarse-grained elastic quantities~\cite{Mizuno2013}. For polycrystals, the question of how the validity of LIE depends on $\xi_E/L$ has not been addressed. Previous works, indeed, mostly investigated how the elastic properties relate to those of the single grains in the limit $L/\xi_E \gg 1$, e.g. through the Voight~\cite{Voigt1889} or Reuss~\cite{Reuss1929} averages or more refined approaches~\cite{Mavko2009, Avellaneda1996}. In polycrystals, the length scale $\xi_E$ is heuristically identified with the typical grain size~\cite{Chaikin2010}, despite concerns on the connection between structural and elastic length scales~\cite{Nagel}. In this paper, we investigate the emergence of LIE in materials with different degree of structural disorder, from amorphous to polycrystalline, produced via large-scale three-dimensional numerical simulations of the cooling process of liquid samples, at different cooling rates (Sec.~\ref{sec:model}). We demonstrate in Sec.~\ref{sec:lie} that deviations from LIE scales with the linear size $L$ of the system as $(L/\xi_E)^{-3/2}$, where $\xi_E$ is an elastic correlation length. This result implies that finite-size effects act as a random perturbation to the stiffness matrix, as we discuss in Sec.~\ref{sec:perturbation}. We further show in Sec.~\ref{sec:structural} that the correlation length $\xi_E$, that grows as the cooling rate decreases, (i) corresponds to a structural correlation length $\xi_S$ which for polycrystalline materials coincides with the grain size and (ii) controls the size dependence of the pressure and anisotropy of the stress tensor. Finally, in Sec.\ref{sec:local} we study the correlation of locally defined stress and compliance tensors. We show that these tensors are characterized by long-ranged anisotropic correlations, confirming previous findings ~\cite{Lemaitre2014,Wu2015a,Lemaitre2015,Lemaitre2018}, and show that the decay of these correlations are governed by the elastic length scale $\xi_E$. \section{Numerical model and protocols \label{sec:model}} We perform large-scale numerical simulations of monodisperse spherical particles of diameter $\sigma$ interacting via the Hertzian potential, $v(r)= \frac{2}{5}\epsilon(r-\sigma)^{5/2}$ for $r < \sigma$, $v(r) = 0$ otherwise. We fix the volume fraction to $\phi=0.74$, a value at which the ground state is an fcc crystal~\cite{Pamies2009}, and prepare solid samples by quenching equilibrated liquid configurations to low temperature, using periodic boundary conditions. We mimic quenches to temperatures well below the melting one, $T_m$, by first cooling the system to $T_l \simeq 0.8 T_m$ at rate $\Gamma$, and then minimizing the energy via the conjugate-gradient algorithm. The cooling rate affects the ordering properties of the resulting configuration, which is amorphous at large $\Gamma$, and polycrystalline at small $\Gamma$, as apparent from Fig.~\ref{fig:cooling}. For each cooling rate $\Gamma$ and number of particles $N$, in the range $500$ to $1$ million, we prepare $50$ independent samples. All data reported in the following are averaged over these samples. \begin{figure}[t!] \centering \includegraphics[angle=0,width=0.48\textwidth]{Cooling.eps} \caption{ We illustrate in the right panel the dependence of the pressure on the temperature, for different cooling rates. Energy minimization of the $T=7\times10^{-4}$ configuration bring the system in solid states with different degree of disorder. The left panels illustrate example radial distribution functions and snapshots of these solids, for $N=131072$. The color of a particle identifies its local crystal structure~\cite{Ackland2006,Stukowski2010}, fcc (green), hpc (red), bcc (blue), icosahedral (yellow), none (gray). In this work, we investigate the elastic properties of these solids, in the linear response regime. \label{fig:cooling} } \end{figure} \section{Emergence of Linear isotropic elasticity~\label{sec:lie}} According to LIE, in three dimensions, the stress-strain relation $\hat {\mathbf \sigma} = \hat {\bf C} \hat {\bf \epsilon}$ is, \begin{equation} \begin{pmatrix} \sigma_{1} \\ \sigma_{2} \\ \sigma_{3} \\ \sigma_{4} \\ \sigma_{5} \\ \sigma_{6} \end{pmatrix} = \begin{pmatrix} \lambda + 2\mu & \lambda & \lambda & 0 & 0 & 0\\ \lambda & \lambda + 2\lambda & \lambda & 0 & 0 & 0\\ \lambda & \lambda & \lambda + 2\mu & 0 & 0 & 0\\ 0 & 0 & 0 & 2\mu & 0 & 0\\ 0 & 0 & 0 & 0 & 2\mu & 0\\ 0 & 0 & 0 & 0 & 0 & 2\mu \end{pmatrix} \begin{pmatrix} \epsilon_{1} \\ \epsilon_{2} \\ \epsilon_{3} \\ \epsilon_{4} \\ \epsilon_{5} \\ \epsilon_{6} \end{pmatrix} \label{eq:lietensor} \end{equation} Here, the suffix 1-6 indicates $xx,yy,zz,xy,xz,yz$ so that, e.g., $c_{14}$ stands for $c_{xxxy}$. The parameters $\lambda = \frac{\nu E}{(1+\nu)(1-2\nu)}$ and $\mu = G = \frac{ E}{2(1+\nu)}$ are the Lam\'e constants, and $E$, $G$ and $\nu$ are the Young modulus, shear modulus and Poisson ratio, respectively. If LIE holds, therefore, the six invariants of the stress tensor $\hat {\bf C}$ are $2\mu$, with multiplicity five, and $3\lambda +2\mu$, with single multiplicity. However, in finite systems rotational invariance is broken and hence $\hat {\bf C}$ is a symmetric matrix with entries depending on the reference frame. A frame-independent evaluation of the LIE's validity~\cite{Tsamados2009} is thus obtained comparing the invariants of $\hat {\bf C}$ with those predicted by LIE. To evaluate the stiffness matrix, we impose to each configuration a strain deformation followed by energy minimization. We perform this operation for the six deformation modes, $d(\epsilon_{\alpha\beta})$. In the linear response regime, which we have checked to occur for strains $d(\epsilon_{\alpha\beta}) \lesssim 10^{-7}$, this allows evaluating the stiffness matrix $c_{\alpha\beta\gamma\delta}$ from the changes in stress tensor, $d(\sigma_{\alpha\beta})$, \begin{equation} c_{\alpha\beta\gamma\delta}(N) = \frac{d(\sigma_{\alpha\beta})}{d (\epsilon_{\gamma\delta})} \label{eq:ds} \end{equation} The subsequent diagonalization of the stiffness matrix yields six eigenvalues, we indicate with $c_1 \leq \ldots \leq c_5 \leq b$. \begin{figure}[t!] \centering \includegraphics[angle=0,width=0.48\textwidth]{FigEigScaling.eps} \caption{ System size dependence (a) of the largest eigenvalue of the stiffness matrix for different cooling rates, and (b) of the other $5$ eigenvalues $c_1,\ldots,c_5$ for $\Gamma = 10^{-7}$. Panel (c) illustrates the approach of the average of each $\langle c_i \rangle$ to the common asymptotic value $2\mu$ on increasing the system size, for $\Gamma = 10^{-7}$ and $\Gamma = 10^{-8}$. Data are averaged over $50$ realizations for each system size and cooling rate. Panel (d) illustrates that the average of the 5 eigenvalues $\langle c_i \rangle$ approach its asymptotic limit as $\left(N_E/N\right)^{1/2}$. This allows defining an elastic length scale $\xi_E = N_E^{1/3}$ which grows as $\Gamma$ decreases, as illustrated in the inset. \label{fig:eigs} } \end{figure} We observe the sample average of the largest eigenvalue $\langle b \rangle$ to become asymptotically size independent, $\langle b \rangle-(3\lambda+2\mu)\propto N^{-k_b}$ with $k_b \simeq 0$, as illustrated in Fig.~\ref{fig:eigs}a. $\langle b \rangle$ decreases with $\Gamma$, a finding explained considering that, at constant volume, ordered systems have a smaller pressure, as in Fig.~\ref{fig:cooling}. When the effect of pressure is filtered out investigating $\langle b \rangle/\langle P \rangle$, ordered systems result stiffer than disordered ones. At each $\Gamma$, the five eigenvalues $\langle c_i\rangle$ approach a common limiting value $2\mu$ as the system size increases. We find, in particular, that $c_1$ and $c_2$ approach the asymptotic value from below, $c_4$ and $c_5$ from above, while $c_3 \simeq 2\mu$ regardless of the system size. As an example, we illustrate the size dependence of the eigenvalues in Fig.~\ref{fig:eigs}b, for $\Gamma = 10^{-7}$. The eigenvalues approach their common asymptotic limit as \begin{equation} |\langle c_i\rangle-2\mu| = 2\mu \left(\frac{N}{{N}_E}\right)^{-k_c}, \label{eq:pl} \end{equation} with $k_c = 1/2$ and ${N}_E$ slightly dependent on the considered eigenvalue, as illustrated for $\Gamma = 10^{-7}$ and $\Gamma = 2\times 10^{-8}$ (data scaled by a factor $5$) in panel c. For each cooling rate, we also compute $\langle\hspace{-0.05cm}\langle |\langle c_i\rangle-2\mu| \rangle\hspace{-0.05cm}\rangle$, where $\langle\cdot \rangle$ denotes an average over different realizations, and $\langle\hspace{-0.05cm}\langle\cdot \rangle\hspace{-0.05cm}\rangle$ averages over the different eigenvalues. Fig.~\ref{fig:eigs}d shows that this quantity scales as $N^{-1/2}$ for $N > N_E$, with $N_E$ increasing as the cooling rate decreases, as in Fig.~\ref{fig:eigs}d. We remark that $N_E$ can be identified with the disorder parameter introduced by fluctuating elasticity theory~\cite{SchirmacherPRL, SchirmacherEPL2006, Marruzzo2013a,Shivam2}. Furthermore, we notice that these findings are in line with previous results on the dependence of the sample-to-sample fluctuations of the elastic constants on the systems size~\cite{Mizuno2016a,Mizuno2016b,kapteijns2020elastic}. However, these results represent a significant departure from previous findings~\cite{Tsamados2009} on the dependence of the stiffness matrix's eigenvalues on a coarse-grained length scale, $w$. Indeed, this previous work in two-spatial dimensions, found the largest eigenvalue to approach its asymptotic limit as $w^{-2}$, and the other two as $w^{-0.87}$. By associating the exponents to volume and surface effects~\cite{Tsamados2009}, the scalings should be $w^{-3}$ and $w^{-2}$, in three spatial dimensions, corresponding to $k_b = 1.5$ and $k_c=1$, in marked contrast with our findings, $0$ and $0.5$, respectively. \section{Size effects as perturbations~\label{sec:perturbation}} We rationalize our findings considering that the stress change resulting from an applied deformation is \begin{equation} d (\sigma_{\alpha\beta}) = d (\epsilon_{\gamma\delta}) c_{\alpha\beta\gamma\delta}(N) = \frac{\rho}{N} \sum_i d (r_\alpha f_\beta)_i, \label{eq:dc} \end{equation} where $r_\alpha$ and $f_\beta$ are the $\alpha$ and $\beta$ components of the distance and the interaction force of the particles involved in bond $i$, respectively, where a bond correspond to an interparticle interaction. Since the strain is given, each matrix element $c_{\alpha\beta\gamma\delta}(N)$ is the average of $\propto N$ numbers. If the contributions $d (r_\alpha f_\beta)_i$ are asymptotically uncorrelated, then by central limit theorem each matrix element is Gaussian distributed with average $c_{\alpha\beta\gamma\delta}$, its expected value in the thermodynamic limit, and variance scaling as $N^{-1/2}\propto L^{-d/2}$, in $d$ spatial dimensions. Indeed, we observe in Fig.~\ref{fig:pscaling} that the distributions of the matrix elements collapse on a Gaussian curve when appropriately scaled. We remark that these collapses only occur asymptotically, $N > N_E$, implying the existence of short-ranged spatial correlations between the contributions of the different contacts to the stiffness matrix. \begin{figure}[t!] \centering \includegraphics[angle=0,width=0.48\textwidth]{{FigDist_1e07}.eps} \caption{ Panels (a) and (c) illustrate the distribution of the tensor elements $c_{11}, c_{22}, c_{33}$ and $c_{44}, c_{55}, c_{66}$, respectively, for different system sizes, at cooling rate $10^{-7}$. These distributions are respectively collapsed in (b) and (d). The full lines are Gaussian fits to the $N=65536$ data. \label{fig:pscaling} } \end{figure} These findings imply that, for $N > N_E$, the stiffness matrix of a given realization is \begin{equation} {\hat {\bf C}}(N) = {\hat {\bf C}}(\infty) + \frac{1}{\sqrt{N}} {\hat {\bf R}}, \label{eq:lieN} \end{equation} where ${\hat {\bf C}}(\infty)$ is as in Eq.~\ref{eq:lietensor}, and ${\hat {\bf R}}$ is a Hermitian random matrix, with some given probability distribution and norm. Finite-size effects, therefore, are equivalent to a random perturbation of the asymptotic stiffness matrix. Matrix perturbation theory~\cite{Stewart1990} then implies that each eigenvalue of ${\hat {\bf C}}(N)$ differs from its asymptotic limit by a constant proportional to the spectral norm of the perturbation, $N^{-1/2}$, as we have observed. This theoretical interpretation allows rationalizing the results of Fig.~\ref{fig:eigs}, where we investigate how the averages of the {\it sorted} eigenvalues of the perturbed matrix approach their asymptotic values. In a given realization, eigenvalue $b$, which is the largest, equals $b(N) = b_\infty + x$, where $x$ is random number of zero mean and standard deviation $\propto N$. The average over different configurations is therefore $\langle b \rangle(N) =b_\infty$: the average has no size dependence, i.e. $k_b = 0$, consistent with our observation in panel a. The other five eigenvalues coincide in the thermodynamic limit. At any finite $N$, noise splits their values, and the eigenvalues equal $c_i = c_\infty + x_i$, $i = 1,5$, where $x_i$ are random variables of zero mean and standard deviation $\propto N^{-1/2}$. Since we sort the eigenvalues, $x_i < x_{i+1}$, we have $|\langle c_i \rangle - c_\infty| \propto N^{-k_c}$, with $k_c = 1/2$, for $i \neq 3$. Conversely, for $i = 3$ we predict $\langle c_i \rangle = c_\infty$. All of these predictions are in agreement with our findings in Fig.~\ref{fig:eigs}. In two dimensions, where the stiffness matrix has three eigenvalues, we predict $k_b = 0$, and $k_c=1/2$ for $i=1,2$. This prediction for $k_c$ is in rough agreement with previous results~\cite{Tsamados2009}, which have reported $2k_c = 0.87$. \section{Mechanical and structural length scales~\label{sec:structural}} \subsection{Elastic length scale} The above results imply that the emergence of LIE is characterized by a typical size $N_E$, to which we associate a length scale $\xi_E := N_E^{1/d}$. For $N>N_E$, the probability distributions of different matrix elements is Gaussian, and Eq.~\ref{eq:pl} holds. This length scale measures the spatial correlation of different contacts' contributions to the stiffness matrix, $d(r_\alpha f_\beta)_i$. Here, we extract this length scale via the linear regression fits shown in Fig.~\ref{fig:eigs}d. The length scale $\xi_E$ grows as the cooling rate $\Gamma$ decreases and the system becomes more ordered. It varies from $\xi_E \simeq 4\sigma$ at $\Gamma = 10^{-7}$ to $\xi_E \simeq 15\sigma$ at $\Gamma = 10^{-8}$. \subsection{Structural length scale} In polycrystalline materials agglomerate of randomly oriented grains, $\xi_E$ is expected to correspond to the typical grain size. In amorphous materials, $\xi_E$ may reflect a structural length scale of difficult definition. Since the correlation between mechanical and geometrical properties of solids is debated~\cite{Nagel}, it is also possible that $\xi_E$ do not have a structural interpretation. Here, we investigate the correlation between the elastic and the structural properties of our systems by associating to each particle its Steinhardt~\cite{Steinhardt1983} order parameters, $q_{lm}(i)=\frac{1}{N_b(i)}\sum Y_{lm}(\hat {\bf r_{ij}})$, where the sum runs over all $N_b$ neighbors of particle $i$, and $Y_{lm}(\hat {\bf r_{ij}}) = Y_{lm}(\theta_{ij},\psi_{ij})$ are the spherical harmonics. We identify the neighbours through a Voronoi tessellation. The scalar product $s_{ij} = \sum_{m=-6}^6 q_{6m}(i) q_{6m}^*(j)$ measures the correlation between the structures surrounding particles $i$ and $j$~\cite{Lechner2008}. Hence, the decay of correlation function \begin{equation} S(r) = \frac{\sum_i \sum_j s_{ij} \delta(r-r_{ij})}{\sum_i \sum_j \delta(r-r_{ij})} \label{eq:S} \end{equation} allows estimating a structural correlation length. We find the correlation function $S(r)$ to decay exponentially, $S(r) = \exp(-r/\xi_S)$, with a characteristic structural length scale $\xi_S$ depending on the cooling rate, as shown in Fig.~\ref{fig:length}a. Deviations from the exponential behaviour results from finite-size effects. The elastic length scale $\xi_E$ and the structural length scale $\xi_S$ turns out to be proportional, as illustrated in Fig.~\ref{fig:length}b. This result demonstrates a close connection between structural and elastic properties, equally valid in our polycrystalline and disordered systems. \begin{figure}[t!] \centering \includegraphics[angle=0,width=0.48\textwidth]{FigLength.eps} \caption{ Structural correlation function (a), Eq.~\ref{eq:S}, for different cooling rates and $N = 524288$; for $\Gamma = 10^{-8}$, we also consider a larger $N$ value, as indicated. The exponential decay of the correlation functions defines a structural length scale, $\xi_S$. Panel b shows that $\xi_S$, and the length scales $\xi_P$ and $\xi_R$ respectively associated to the pressure and to the stress anisotropy are proportional to $\xi_E$. \label{fig:length} } \end{figure} \subsection{Stress length scale} Microscopically, $\xi_E$ is the correlation length between the contribution of different interparticle contacts to the stiffness matrix, $d (r_\alpha f_\beta)_i$, Eq.~\ref{eq:dc}. One may, therefore, wonder if the contributions $(r_\alpha f_\beta)_i$ of the contacts to the stress are similarly correlated. We investigate this issue focussing on the dependence of average pressure $\langle P \rangle$ on the system size. Fig.~\ref{fig:stress}a illustrates that the average pressure exponentially approaches its asymptotic value as $N$ increases. This allows defining a typical size $N_P$, and hence a typical pressure length scale $\xi_P := N_P^{1/d}$, we show to be proportional to $\xi_E$ in Fig.~\ref{fig:length}b. We remark here that, for $\Gamma = 10^{-8}$, the pressure dependence on $N$ is too weak to allow for a reliable estimation of $\xi_P$. Furthermore, we evaluate the degree of anisotropy of the stress tensor through the parameter $R = \sqrt{2 J_2}/P$, where $J_2$ is the second invariant of the deviatoric stress. Regardless of the cooling rate, $\langle R \rangle$ asymptotically scales as $(N/N_R)^{-1/2}$, as we illustrate in Fig.~\ref{fig:stress}b. The corresponding length scale $\xi_R := N_R^{1/d}$ is also proportional to $\xi_E$, as we illustrate in Fig.~\ref{fig:length}b. \begin{figure}[!!t] \centering \includegraphics[angle=0,width=0.48\textwidth]{FigStress.eps} \caption{ The average pressure approaches a liming value as the system size increases (a). The size dependence is well described by an exponential law, $P = P_0 + \Delta P e^{-N/N_P}$ (lines). The averaged stress anisotropy parameters exponentially scales as $(N/N_R)^{-1/2}$ (b). \label{fig:stress} } \end{figure} \section{Local elasticity~\label{sec:local}} \begin{figure}[b!] \centering \includegraphics[angle=0,width=0.48\textwidth]{FigSpherical.eps} \caption{ (a) Spherical map of the correlation function of the particle-level stress, $\sigma_{xy}({\bf 0})\sigma_{xy}({\bf r})$, for $|{\bf r}| \simeq 1.5$. Data are normalizing using their standard deviation. (b) As in (a), but for the correlation function of the component $c_{44}$ of the particle-level stiffness matrix. \label{fig:spherical} } \end{figure} We now consider the possibility of extracting the elastic length scale via the direct study of the local elastic properties, rather than resorting to finite-size investigations. To this end, we associate to each particle stress and elasticity tensors. We define the stress tensor of particle $i$ as $\sigma_{\alpha\beta}^{(i)} = \frac{\rho}{2}\sum_j^{(i)} (r_\alpha f_\beta)_j$ where the sum is over all interaction forces involving particle $i$. We define a particle-level stiffness tensor $c^{(i)}_{\alpha\beta\gamma\delta}$ as $d\sigma_{\alpha\beta}^{(i)}/d(\epsilon_{\gamma\delta})$. These two definitions, and in particular the adoption of a uniform strain, ensure that the macroscopic stress and stiffness tensors emerge as the average of the local ones. We illustrate in Fig.~\ref{fig:spherical} spherical maps of the correlations functions of the local shear stress, $\langle \sigma_{xy}(r)\sigma_{xy}(0)\rangle - \langle \sigma_{xy}\rangle^2$ (a), and of $c_{44}$, we will refer to as the local shear modulus $\mu$, $\langle \mu(r)\mu(0)\rangle-\langle \mu \rangle^2$ (b) at $r \simeq 1.5$, for a $N=131072$ particle system in a disordered state, as obtained using the fastest of our cooling rates. The standard deviation of the correlations at the considered radial distance is used as a normalization factor. In accordance with previous results ~\cite{Lemaitre2014,Wu2015a,Lemaitre2015,Lemaitre2018} this investigation evidences Eshelby-like quadrupolar anisotropic correlations both in the stress and in the local shear modulus. \begin{figure}[t!] \centering \includegraphics[angle=0,width=0.48\textwidth]{FigCorr3d.eps} \caption{ Correlation function of the particle defined $\sigma_{xy}$ for different cooling rates, plotted as a function of $r$ (a) and of $r/\xi_e$ (c). Analogous results for the correlation function of the particle defined $c_4$ are in panels (b) and (d), respectively. The correlation functions are averaged taking into consideration the quadrupolar symmetry of the fields. Symbols are as in Fig.~\ref{fig:stress}. \label{fig:corr3d} } \end{figure} We investigate the radial dependence of the observed stress correlations through ~\cite{Tong2020} an angle averaged correlation function, $C_{\sigma_{xy}}(r) = -\frac{1}{2\pi}\int_0^\pi d\phi \int_0^{2\pi}d\theta [\langle \sigma_{xy}(r)\sigma_{xy}(0)\rangle - \langle \sigma_{xy}\rangle^2]$. The correlation function $C_{\mu}(r)$ of the local shear modulus is similarly defined. Fig.~\ref{fig:corr3d}a illustrates that $C_{\sigma_{xy}}(r) \propto r^{-3}$, after a transient, regardless of the cooling rate. A similar result holds for the local shear modulus's correlation function, as illustrated in panel (b). These results confirm the existence of long-ranged anisotropic correlations~\cite{Lemaitre2014, Wu2015a, Lemaitre2015, Lemaitre2018} in the stress and stiffness fields of amorphous materials. When the correlation functions are plotted versus the radial distance scaled by the elastic length scale, as in Figs.~\ref{fig:corr3d}(c) and (d), data for different cooling rates collapse in the asymptotic regime, within our numerical uncertainty. This result indicates that the correlation functions asymptotically decays as $(r/\xi_e)^{-3}$, demonstrating how the elastic length scale can be evaluated from the analysis of locally defined elastic quantities. We finally remark that self-averaging, the scaling of the fluctuations of the elastic properties with $N^{-1/2}$ (Fig.~\ref{fig:eigs}), holds as these long-ranged correlations are anisotropic in space. Positive and negative contributions cancels when evaluating the fluctuations via a volume integral of the correlation function. \section{Conclusions} Our results establish that the emergence of isotropic linear elasticity is governed by central limit theorem, which sets in systems larger than a typical elastic length scale. The existence of a finite correlation length in the elastic properties is in general agreement, e.g., with the assumptions of fluctuating elasticity theory~\cite{SchirmacherPRL, SchirmacherEPL2006, Marruzzo2013a}, as well as with the size dependence of the shear modulus reported in previous works~\cite{Mizuno2016a,Mizuno2016b,Lerner2019,kapteijns2020elastic}. The degree of disorder does not qualitatively affects this scenario, but influences the value of the elastic length scales. Specifically, the elastic length scale grows with the degree of ordering and can be identified with the size of the grain boundaries, in polycrystalline materials. We have further demonstrated that the elastic length scale, which we have derived via a finite size scaling investigation, can alternatively be measured via the study of the spatial correlation of locally defined elastic properties. Either the finite-size scaling and real space investigations indicate that the correlation of the elastic properties reflect those of the frozen in stress. This is a result of practical significance, as correlations in the stress are easier to investigate than correlations in the local elastic constants. We suspect that the structural correlation function we have introduced may be inappropriate in the presence of polydispersity or non-radially symmetric interaction potentials. In these cases where it is not apparent what structural correlation function relates to the elastic response. Possibly, in these cases structural correlations could be more meaningfully indirectly evaluated studying the correlation of the elastic properties. This appears a promising direction to extract a static length scale in disordered materials whose relevance to, e.g. the glass transition problem~\cite{Karmakar} or plastic response~\cite{Sethna2017}, needs to be systematically explored. In this regard, it is interesting to contrast our results with size-scaling studies of the fluctuations of the shear modulus in systems whose crystallization is severely inhibited. These studies considered systems first thermalised at a parent temperature $T_p$, and then brought to an energy minimal configuration. The parent temperature, therefore, qualitatively plays the role of our cooling rate. While we have observed that the elastic length scale grows as a system is better annealed, being correlated to the size of the grain boundaries, these previous studies have conversely found it to decrease~\cite{Rainone2020,Shivam2}. Recent results~\cite{Gonzalez-Lopez2021,Gonzalez-Lopez2021a, Shivam2} have also shown that, in attractive systems, the elastic length scale is affected by the range of the attractive interaction. Hence, depending on the features of the underlying energy landscape, annealing might increase or decrease the elastic length scale above which isotropic linear elasticity sets it. \begin{acknowledgments} We acknowledge support from the Singapore Ministry of Education through the Academic Research Fund Tier 1 (2019-T1-001-03), Singapore and are grateful to the National Supercomputing Centre (NSCC) of Singapore for providing the computational resources. \end{acknowledgments}
1,116,691,501,408
arxiv
\section{Introduction} Since the early days of quantum field theory (QFT) 1+1 dimensional models have attracted much attention. They have been extremely valuable to develop general ideas and intuition about the structure of QFT. The eldest and perhaps most popular of these 1+1 D models bear the names of Thirring \cite{Thirring} -- Dirac fermions interacting with a local current-current interaction -- and Schwinger \cite{Schwinger} -- quantum electrodynamics with fermions. The models originated in particle physics and therefore, in order to have Lorentz invariance, were considered mainly on infinite space ${\bf R}$ (see e.g.\ \cite{CHu,CW,CRW} and references therein). Then one has to deal with infrared (infinite space volume) divergences in addition to singularities coming from the ultraviolet (short distances). In case the fermions are massless, both models are soluble \cite{Schwinger,Klaiber,swieca} and a very detailed picture of their properties can be obtained. Another related model originated from solid state physics and is due to Luttinger \cite{Luttinger} -- massless Dirac fermions on spacetime $S^1\times{\bf R}$ interacting with a non-local current-current interaction (Lorentz invariance is nothing natural to ask for in solid state physics). The Luttinger model shows that an interacting fermion system in one space dimension need not behave qualitatively similar to free fermions but rather has properties similar to a boson system. Such behaviour is generic for 1+1 D interacting fermion models and is denoted as Luttinger liquid in solid state physics, in contrast to Landau liquids common in 3+1 D. To consider the Luttinger model on compact space has the enormous technical advantage that infra red (IR) problems are absent, and one can concentrate on the short distance (UV) properties which are rather simple due to the non-locality of the interaction. In fact, this allows a construction of the interacting model on the Fock space of {\em free} fermions \cite{MattisLieb,HSU,CH} and one directly can make use of mathematical results from the representation theory of the affine Kac-Moody algebras. Such an approach was recently given for QCD with massless fermions \cite{LS3}. As shown by Manton \cite{Manton}, the Schwinger model on compact space $S^1$ allows a complete understanding of the UV divergences and anomalies and their intriguing interplay with gauge invariance and vacuum structure. In the present paper we study the extension of the Luttinger model obtained by coupling it to a dynamical electromagnetic field. For vanishing Luttinger (4-point) interaction our model therefore reduces to the Schwinger model as studied by Manton \cite{Manton}, and for vanishing electric charge to the Luttinger model \cite{MattisLieb}. Since our approach is in Minkowski space and provides a direct construction of the field-- and observable algebras of the model on a physical Hilbert space, it is conceptually quite different from the path integral approach, and we believe it adds to the physical understanding of these models. The plan of the paper is as follows. In Section 2 the construction of the model is given. To fix notation, we first summarize the classical Hamiltonian formalism. We then construct the physical Hilbert space and discuss the non-trivial implications of anomalies (Schwinger terms) and gauge invariance. In Section 3 the model is solved by bosonization, and a method for calculating all Green functions is explained. As an example the equal time 2-point functions are given. In Section 4 we comment on regularization and renormalization in our setting. We discuss the limit to the Thirring-Schwinger model where the 4-point interaction becomes local and space infinite. We end with a short summary in Section 5. A summary of the mathematical results needed and some details of calculations are deferred to the appendix. \section{Constructing the model} \subsection{Notation} Spacetime is the cylinder with $x=x^1\in \Lambda\equiv [-L/2,L/2]$ the spatial coordinate and $t=x^0\in{\bf R}$ time. We have one Dirac Fermion field $\psi_{\sigma}(\vec{x})$ and one Photon field $A_\nu(\vec{x})$ (here and in the following, $\sigma,\sigma'\in\{+,-\}$ are spin indices, $\mu,\nu\in\{0,1\}$ are spacetime indices, and $\vec{x}=(t,x),\vec{y}=(t',y)$ are spacetime arguments). The action defining the Luttinger-Schwinger model is\footnote{unless otherwise stated, repeated indices are summed over throughout the paper}$^,$ \footnote{$\partial_\nu\equiv\partial/\partial x^\nu$; our metric tensor is $g_{\mu\nu}=diag(1,-1)$} \begin{eqnarray} \label{1} {\cal S} = \int d^2\vec{x}\left(-\frac{1}{4}F_{\mu\nu}(\vec x)F^{\mu\nu}(\vec x) + \bar\psi(\vec x) \gamma^\nu\left(-{\rm i}\partial_\nu + e A_\nu(\vec x)\right)\psi(\vec x)\right) \nonumber \\ \nopagebreak - \int d^2\vec{x}\int d^2\vec{y}\, j_\mu (\vec x)v(\vec x -\vec y) j^\mu(\vec y) \end{eqnarray} where $F_{\mu\nu}=\partial_\mu A_\nu-\partial_\nu A_\mu$ and $\gamma^\nu\equiv (\gamma^\nu)_{\sigma\sigma'}$ are Dirac matrices which we take as $\gamma^0=\sigma_1$ and $\gamma^1={\rm i}\sigma_2$, and $\gamma_5=-\gamma^0\gamma^1=\sigma_3$ ($\sigma_i$ are Pauli spin matrices). As usual, the fermion currents are $ j_\nu = \bar\psi\gamma_\nu\psi, $ and we assume the 4--point interaction to be instantaneous (local in time) \begin{equation} v(\vec x-\vec y) = \delta(t-t')V(x-y) \end{equation} where the interaction potential is parity invariant, $V(x)=V(-x)$. As in case of the Luttinger model \cite{HSU} we will also have to assume that this potential is not `too strong', or more precisely that the Fourier coefficients \alpheqn \begin{equation} \label{condition} W_k = \frac{1}{8\pi}\int_{\Lam}{\rm d}{x}\, V(x)\ee{-{\rm i} kx} = W_{-k}= W_k^*,\quad k\frac{L}{2\pi} \in{\bf Z} \end{equation} of the potential obey the conditions \begin{equation} \label{condition1} -1-\frac{e^2}{\pi k^2} < W_k < 1\quad\forall k\quad \mbox{ and }\quad \sum_k |kW_k^2|<\infty . \end{equation} \reseteqn {}From the action \Ref{1} we obtain the canonical momenta $\Pi_{A_0(x)} \simeq 0$, $\Pi_{A_1(x)} = F_{01}(x) = E(x)$ etc.\ (here and in the following, we set $t=0$ and make explicit the dependence on the spatial coordinate only) resulting in the Hamiltonian ($\psi^*\equiv \bar\psi\gamma^0$) \begin{eqnarray} \label{4} H=\int_{\Lam} {\rm d}{x} \left( \frac{1}{2} E(x)^2 + \psi^*(x)\gamma_5\left(-{\rm i}\partial_1 + e A_1(x)\right) \psi(x)\right) + 4\int_{\Lam} {\rm d}{x} {\rm d}{y} \, \rho^+(x)V(x-y)\rho^-(y), \end{eqnarray} and the Gauss' law \begin{equation} \label{5} G(x) = -\partial_1 E(x) + e \rho(x) \simeq 0 \, . \end{equation} We introduced chiral fermion currents \begin{equation} \rho^\pm(x) = \psi^*(x)\frac{1}{2}(1\pm\gamma_5)\psi(x) \label{chiralcu} \end{equation} so that fermion charge-- and momentum density $\rho=j^0$ and $j=j^1$ can be written as \begin{eqnarray} \rho(x) &=& \rho^+(x) + \rho^-(x)\nonumber \\ \nopagebreak j(x) &=& \rho^+(x) - \rho^-(x). \end{eqnarray} \subsection{Observables} \label{obs} The observables of the model are all gauge invariant operators. They leave invariant physical states. The ground state expectation values of these operators are the Green functions we are interested in. For later reference we write down the action of static gauge transformations i.e.\ differentiable maps $\Lambda\to{\rm U}(1), x\mapsto \ee{{\rm i} \alpha(x)}$, \begin{eqnarray} \label{gaugetrafo} \psi_\sigma(x)&\to& \ee{{\rm i} \alpha(x)}\psi_\sigma(x) \nonumber \\ \nopagebreak A_1(x) &\to& A_1(x) - \frac{1}{e}\frac{\partial \alpha(x)}{\partial x} \\ E(x) &\to& E(x)\, . \nonumber \end{eqnarray} These obviously leave our Hamiltonian and Gauss' law invariant. We note that every gauge transformation can be decomposed into a {\em small} and a {\em large} gauge transformation, $ \alpha(x)=\alpha_{small}(x) + \alpha_{large}(x), $ where \begin{equation} \alpha_{large}(x) = n\frac{2\pi x}{L} \quad (n\in{\bf Z}), \quad \alpha_{small}\left(-\frac{L}{2}\right)= \alpha_{small}\left(\frac{L}{2}\right) \end{equation} with $n=\frac{\alpha(L/2)-\alpha(-L/2)}{2\pi L}$. The large gauge transformations correspond to $\Pi_1(S^1)={\bf Z}$ and play an important role in the following, as expected from general arguments \cite{Jackiw}. It is important to note that Gauss' law \Ref{5} requires physical states only to be invariant under small (but {\em not} under large) gauge transformations. All gauge invariant objects which one can construct from $A_1(x)$ (at fixed time) are functions of \begin{equation} Y= \frac{1}{2\pi} \int_{\Lambda}{\rm d} y A_1(y) \, . \end{equation} In fact, $Y$ above is only invariant with respect to small gauge transformations and changes by multiples of $1/ e$ under the large ones. Thus the quantity which is invariant under all gauge transformations is $\ee{{\rm i} 2\pi e Y}$ which is equal to the Wilson line (holonomy) \begin{equation} \label{Wilson} W[A_1] = \ee{{\rm i} e\int_{\Lambda}{\rm d} y A_1(y)} \: . \end{equation} The fermion fields are not gauge invariant, but by attaching parallel transporters to them one obtains field operators \begin{equation} \label{chi} \chi_\sigma(x) = \ee{{\rm i} e\int_r^x{\rm d} y A_1(y) }\psi_\sigma(x)\, , \quad r\in\Lambda \end{equation} which obviously are invariant under all (small and large) gauge transformations \Ref{gaugetrafo} with $\alpha(r)=0$; $r$ is a spatial point which we can choose arbitrarily. Note that these fields also obey CAR but are {\em not} antiperiodic: they obey $\chi_\sigma(L/2)= -W[A_1]\chi_\sigma(-L/2)$ where $W[A_1]$ is the Wilson line above. Bilinears of these operators are the meson operators $$ M_{\sigma\sigma'}(x,y) = \chi^*_\sigma(x)\chi_{\sigma'}(y) \, . $$ These are invariant under all static gauge transformations and thus can be used as building blocks of the Green functions we are interested in. \subsection{The quantum model} In the following we find it convenient to work in Fourier space. We introduce the following useful notation. Fourier space for even (periodic) functions is \alpheqn \begin{equation} \label{a} \Lambda^*\equiv \left\{\left. k=\frac{2\pi}{L} n \right| n\in{\bf Z}\right\}\quad \, . \end{equation} As we use fermions with odd (anti--periodic) boundary conditions we also need \begin{equation} \Lambda^*_{odd}\equiv \left\{\left. k=\frac{2\pi}{L} \left(n+\frac{1}{2}\right) \right| n\in{\bf Z}\right\}. \end{equation} {}For functions $\hat f$ on Fourier space we write \begin{equation} \hat{\int}_{\dLam} \hat{\rm d} k \hat f(k) \equiv \sum_{k\in\Lambda^*} \frac{2\pi}{L} \hat f(k) \end{equation} \reseteqn and similarly for $\Lambda^*_{odd}$ (we will use the same symbols $\delta$ and $\hat\delta$ also in the latter case). Then the appropriate $\delta$-function satisfying $\hat{\int}_{\dLam}\hat{\rm d} q\, \hat{\delta}(k-q)\hat f(q)=\hat f(k)$ is $\hat{\delta}(k-q) \equiv \frac{L}{2\pi} \delta_{k,q}$. {}For the Fourier transformed operators we use the following conventions, \alpheqn \begin{equation} \label{10a} \hat\psi^{}_{\sigma}(q) = \int_{\Lam} \frac{{\rm d}{x}}{\sqrt{2\pi}} \psi^{}_{\sigma}(x) \ee{-{\rm i} qx},\quad \hat\psi^{*}_{\sigma}(q)=\hat\psi^{}_{\sigma}(q)^* \quad (q\in\Lambda^*_{odd}) \end{equation} (as mentioned, we use anti--periodic boundary conditions for the fermions), \begin{equation} \label{A1} \hat A_1(k) = \int_{\Lam} \frac{{\rm d}{x}}{2\pi} A_1(x) \ee{-{\rm i} kx} \quad (k\in\Lambda^*) \end{equation} and in the other cases \begin{equation} \label{other} \hat Y(k) = \int_{\Lam} {\rm d}{x}\, Y(x) \ee{-{\rm i} kx}\quad (k\in\Lambda^*)\quad \mbox{ for $Y=E,\rho^{\pm},\rho,j,V$} \end{equation} \reseteqn Following \cite{HSU} we also find it convenient to introduce $W_k=\hat V(k)/8\pi$ (cf.\ \Ref{condition}). With that the non-trivial C(A)CR in Fourier space are \begin{eqnarray} \label{fcacr} \ccr{\hat A_1(p)}{\hat E(k)} &=& {\rm i}\hat{\delta}(k+p) \nonumber \\ \nopagebreak \car{\hat\psi_{\sigma}(q)}{\hat\psi^*_{\sigma'}(q')} &=& \delta_{\sigma\sigma'}\hat{\delta}(q-q') . \end{eqnarray} The essential physical requirement determining the construction of the model and implying a non-trivial quantum structure is positivity of the Hamiltonian on the physical Hilbert space. It is well-known that it forces one to use a non-trivial representation of the field operators of the model. The essential simplification in (1+1) (and not possible in higher) dimensions is that one can use a quasi-free representation for the fermion field operators corresponding to ``filling up the Dirac sea'' associated with the {\em free} fermion Hamiltonian, and for the photon operators one can use a naive boson representation. This will be verified for our model for the class of potentials $V$ obeying (\ref{condition},b). So the full Hilbert space of the model is ${\cal H} = {\cal H}_{\rm Photon}\otimes {\cal H}_{\rm Fermion}$. For ${\cal H}_{\rm Photon}$ we take the boson Fock space generated by boson field operators $b^{*}(k)$ obeying CCR \alpheqn \begin{equation} \ccr{b(k)}{b^*(p)} = \hat{\delta}(k-q)\quad\mbox{etc.} \end{equation} and a vacuum\footnote{Note that the term ``vacuum'' here and in the following does {\em not} mean that this state has anything to do with the ground state of the model; it is just one convenient state from which all other states in the Hilbert space can be generated by applying the field operators.} $\Omega_{\rm P}$ such that \begin{equation} b(k)\Omega_{\rm P} = 0 \quad \forall k\in\Lambda^* . \end{equation} \reseteqn We then set \alpheqn \begin{equation} \label{photon} \hat A_1(k) = \frac{1}{s}\left(b(k) + b^*(k)\right) \quad \hat E(k) = -\frac{{\rm i} s}{2}\left(b(k)-b^*(k)\right) \end{equation}\reseteqn where $ s^4 = \pi e^2 $ (the reason for choosing this factor $s$ will become clear later). We will use below normal ordering $\xx\cdots\xx$ of bilinears in the Photon field operators with respect to the vacuum $\Omega_{\rm P}$, for example $\xx b(k)b^*(p)\xx\, = b^*(p)b(k)$. {}For ${\cal H}_{\rm Fermion}$ we take the Fermion Fock space with vacuum $\Omega_{\rm F}$ such that \begin{eqnarray} \label{11} \frac{1}{2}(1 \pm \gamma_5) \hat\psi(\pm q) \,\Omega_{\rm F} &=& \frac{1}{2}(1 \mp \gamma_5) \hat\psi^*(\mp q) \,\Omega_{\rm F} =0 \quad \forall q > 0\, . \end{eqnarray} The presence of the Dirac sea requires normal-ordering $\normal{\cdots}$ of the Fermion bilinears such as $\hat H_0=\hat{\int}_{\dLamF}\hat{\rm d} q \normal{q\,\hat\psi^*(q)\gamma_5\hat \psi(q)}$ and $\hat \rho_\pm$ (\ref{chiralcu}). This modifies their naive commutator relations following from the CAR as Schwinger terms show up \cite{GLrev,CR,A}. In our case, the relevant commutators are: \begin{eqnarray} \ccr{\hat\rho^\pm(k)}{\hat\rho^{\pm}(p)}&=& \pm k\hat{\delta}(k+p)\,, \nonumber\\ \ccr{\hat\rho^\pm(k)}{\hat\rho^{\mp}(p)} &=& 0 \label{12} \, , \\ \ccr{\hat H_0}{\hat\rho^\pm(k)} &=& \pm k \hat\rho^\pm(k) \nonumber \, . \end{eqnarray} We note that \begin{equation} \label{vacF} \hat\rho^+(k)\Omega_{{\rm F}} = \hat\rho^-(-k)\Omega_{{\rm F}} = 0\quad\forall k>0 \end{equation} which together with \Ref{12} shows that the $\hat\rho^+(k)$ (resp. $\hat\rho^-(k)$) give a highest (resp. lowest) weight representation of the Heisenberg algebra. We can now write the Gauss' law operators in Fourier space as \begin{equation} \label{14} \hat G(k) = -{\rm i} k\hat E(k) + e\hat\rho(k), \end{equation} so eqs. \Ref{12} imply \[ \ccr{\hat G(k)}{\hat\rho^\pm(p)} = \pm k e\hat{\delta}(k+p). \] Due to the presence of the Schwinger terms, these Fermion currents no longer commute with the Gauss' law generators, hence they are not gauge invariant and no observables of the model. To obtain Fermion currents obeying the classical relations (without Schwinger terms), we note that $ \ccr{\hat G(k)}{\hat A_1(p)} = k\hat{\delta}(k+p),$ hence the operators \begin{equation} \label{16} \tilde\rho^\pm(k) \equiv \hat\rho^\pm(k) \pm e\hat A_1(k) \end{equation} commute with the Gauss law generators and are thus the observables of the model corresponding to the chiral Fermion currents on the quantum level. Recalling the normalization is only unique up to finite terms, it is natural to regard the $\tilde \rho^\pm(k)$ as the fermion currents obtained by a {\em gauge covariant normal ordering} preserving the classical transformation properties under gauge transformations. Indeed, these currents can be shown to be identical to those obtained by the gauge invariant point splitting method. Similarly, the naive Hamiltonian $\hat H=\hat H_1+\hat H_2$, \begin{eqnarray*} \hat H_1 &=& \hat H_0 +\hat{\int}_{\dLam}\hat{\rm d} k \xx \left( \frac{1}{4\pi}\hat E(k)\hat E(-k) + e\hat A_1(k)\hat j(-k)\right)\xx \\ \hat H_2 &=& \hat{\int}_{\dLam}\hat{\rm d} k \, \hat\rho^+(k)W_k \hat\rho^-(-k) \end{eqnarray*} is not gauge invariant: $\hat H_1$ -- which is the naive Hamiltonian of the Schwinger model -- obeys \[ \ccr{\hat G(k)}{\hat H_1} = 2ke^2 \hat A_1(k) \] and therefore becomes gauge invariant only after adding a photon mass term \cite{Manton} \[ \hat{\int}_{\dLam}\hat{\rm d} k \, e^2 \hat A_1(k)\hat A_1(-k) \] (note that in position space this mass term has the usual form $\frac{e^2}{2\pi}\int_{\Lam}{\rm d}{x}A_1(x)^2$, i.e.\ the photon mass--squared is $e^2/\pi$). Also the Luttinger--interaction term $\hat H_2$ becomes gauge invariant only if one replaces the non--gauge invariant currents $\hat\rho^\pm$ by the gauge invariant $\tilde\rho^\pm$ ones. Thus we obtain the gauge invariant Hamiltonian of the Luttinger--Schwinger model as follows, \begin{eqnarray} H= \hat H_0 + \hat{\int}_{\dLam}\hat{\rm d} k \xx\left( \frac{1}{4\pi} \hat E(k) \hat E(-k) + e\hat A_1(k) \hat j(-k) + e^2 \hat A_1(k)\hat A_1(-k) \right.\nonumber \\ \nopagebreak\left.\frac{}{}\!\!\!+ \left[\hat\rho^+(k)+e\hat A_1(k)\right] W_k\left[\hat\rho^-(-k)-e\hat A_1(-k)\right]\right)\xx. \end{eqnarray} We can now explain the choice (\ref{photon},b) for the representation of the Photon field: the factor $s$ is determined such that the free Photon Hamiltonian is equal to $\hat{\int}_{\dLam}\hat{\rm d} k \sqrt{\frac{e^2}{\pi}}\, b^*(k)b(k)$. \subsection{Bosonization} Kronig's identity\footnote{in the modern literature this is often referred to as (special case of the) Sugawara construction} allows us to rewrite the free Hamiltonian as $ \hat H_0 = \frac{1}{2}\hat{\int}_{\dLam}\hat{\rm d} k \, \xx \left( \hat\rho^+(k)\hat\rho^+(-k) \right.$ $\left.+ \hat\rho^-(k) \hat\rho^-(-k) \right) \xx $ (cf.\ Appendix A for the precise definition of normal ordering; for simplicity of notation we do not distinguish the normal ordering symbol for the photon fields and the fermion currents). With that, it follows from eq.\ \Ref{16} that \begin{equation} \label{20} H = \hat{\int}_{\dLam}\hat{\rm d} k\, \xx \left(\frac{1}{2}\left( \tilde\rho^+(k)\tilde\rho^+(-k) + \tilde\rho^-(k) \tilde\rho^-(-k) \right) + \frac{1}{4\pi}\hat E(k) \hat E(-k) + \tilde\rho^+(k) W_k \tilde\rho^-(-k)\right)\xx \end{equation} which is now explicitly gauge invariant. \section{Solution of the model} \subsection{Gauge Fixing} The only gauge invariant degree of freedom of the Photon field at fixed time is the holonomy $\int_\Lambda{\rm d}{x} A_1(x)$ and one can gauge away all Fourier modes $\hat A_1(k)$ of the gauge field except the one for $k=0$. Thus we can impose the gauge condition \begin{equation} \label{22} \hat A_1(k) = \delta_{k,0} Y \makebox{, \qquad } A_1(x)=\frac{2\pi}{L}Y \end{equation} and solve the Gauss' law $\hat G(k)\simeq 0$ (cf.\ eq.\ \Ref{14}) as \begin{equation} \hat E(k) \simeq \frac{e\hat\rho(k)}{{\rm i} k} \quad {\mbox{for $k\neq 0$}}. \end{equation} This determines all components of $E$ except those conjugate to $Y$: $\hat E(0) = \frac{L}{2\pi} \frac{\partial}{{\rm i} \partial Y}$. After that we are left with the ($k=0$)--component of Gauss' law, {\em viz.} \begin{equation} eQ_0\simeq 0,\quad Q_0 = \hat\rho(0) = \hat\rho^+ (0) + \hat\rho^- (0)\, . \end{equation} Inserting this into \Ref{20}, gives the Hamiltonian of the model on the physical Hilbert space ${\cal H}_{\rm phys}={\cal L}^2({\bf R},{\rm d}Y)\otimes {\cal H}'_{\rm Fermion}$ (where ${\cal H}'_{\rm Fermion}$ is the zero charge sector of the fermionic Fock space): \begin{eqnarray} \label{24} H = -\frac{L}{8\pi^2} \frac{\partial^2}{\partial Y^2} + \frac{\pi}{L}\left(\left(\hat\rho^+(0) + eY\right)^2 + \left(\hat\rho^-(0) - eY\right)^2 + \left(\hat\rho^+(0) + eY\right) 2 W_0 \left(\hat\rho^-(0) - eY\right) \right) + \nonumber \\ \nopagebreak \hat{\int}_{\dLam\backslash \{0\}}\hat{\rm d} k\, \xx \left( \frac{e^2}{4\pi k^2} \hat\rho(k)\hat\rho(-k) + \frac{1}{2}\left( \hat\rho^+(-k)\hat\rho^+(k) + \hat\rho^-(k)\hat\rho^-(-k) \right) + \hat\rho^+(k)W_k \hat\rho^-(-k)\right)\xx. \end{eqnarray} \subsection{Diagonalization of the Hamiltonian} \label{zeromode} {}Following \cite{HSU} we now write \begin{equation} H = \frac{2\pi}{L}\sum_{k\geq 0} h_k. \end{equation} Introducing boson creation-- and annihilation operators \alpheqn \begin{equation} \label{crho} c(k) = \left\{\bma{cc} \frac{1}{\sqrt{|k|}}\hat\rho^+(k) & \mbox{ for $k>0$}\\ \frac{1}{\sqrt{|k|}}\hat\rho^-(k) & \mbox{ for $k<0$} \end{array}\right. \end{equation} obeying usual CCR \begin{equation} \ccr{c(k)}{c^*(p)} = \hat{\delta}(k-p)\quad \mbox{etc.}. \end{equation} We then get for $h_{k>0}$ \begin{equation} h_k =\left(k + \frac{e^2}{2\pi k}\right)\left(c^*(k)c(k)+c^*(-k)c(-k) \right) + \left( kW_k + \frac{e^2}{2\pi k}\right)\left(c^*(k)c^*(-k)+c(k)c(-k) \right). \end{equation} \reseteqn {}For $k=0$ we introduce the quantum mechanical variables \alpheqn \begin{eqnarray} \label{qmv} P &=& \left( \hat\rho^+(0) -\hat\rho^-(0) + 2eY \right)\, , \nonumber \\ \nopagebreak X &=& {\rm i}\frac{L}{2\pi}\frac{1}{2e} \frac{\partial}{\partial Y} \end{eqnarray} obeying Heisenberg relations, $\ccr{P}{X}=-{\rm i} L/2\pi$, which allow us to write $h_0$ as Hamiltonian of a harmonic oscillator, \begin{equation} \label{zeromom0} h_0= \frac{e^2}{\pi} X^2 + \frac{1}{4}(1-W_0)\, P^2 + \frac{1}{4} (1+W_0)\, Q_0^2 -\frac{1}{2}\sqrt{\frac{e^2}{\pi}}\frac{L}{2\pi} \end{equation} \reseteqn (the last term stems from normal ordering $\,\xx\cdots\xx\,$ and is irrelevant for the following). We can now solve the model by diagonalizing its decoupled Fourier modes $h_k$ separately, with the help of a boson Bogoliubov transformation preserving the CCR, \begin{equation} \label{BT} C(k) = \cosh(\lambda_k) c(k) + \sinh(\lambda_k) c^*(-k) \end{equation} where $\lambda_k=\lambda_{-k}$. This leads to \alpheqn \begin{equation} h_k = \omega_k\left(C^*(k)C(k) + C^*(-k)C(-k)\right) -2\eta_k \frac{L}{2\pi} \end{equation} if we choose \begin{equation} \tanh(2\lambda_k) = \frac{2\pi k^2 W_k + e^2}{2\pi k^2 + e^2} \label{th}\, . \end{equation} Then \begin{equation} \omega_k^2 = k^2(1-W_k^2) + \frac{e^2}{\pi}(1-W_k) \end{equation} and \begin{equation} \eta_k = \frac{1}{2}\left(|k| + \frac{e^2}{2\pi|k|} - \omega_k \right) \quad (k\neq 0) . \end{equation} \reseteqn The zero--momentum piece $h_0$ is just a harmonic oscillator and can be written as \alpheqn \begin{equation} \label{zeromom} h_0 = \omega_0 C^*(0) C(0) + \frac{1}{4} (1+W_0)\, Q_0^2 - \eta_0\frac{L}{2\pi} \end{equation} with \begin{equation} \label{Cnull} C(0) = \frac{1}{\sqrt 2}\left(rX + \frac{1}{r}{\rm i} P\right),\quad r^4=\frac{e^2}{\pi}\frac{4}{1-W_0}, \end{equation} energy--squared \begin{equation} \omega_0^2 = \frac{e^2}{\pi}(1- W_0) \end{equation} and zero point energy \begin{equation} \eta_0 =\frac{1}{2}\left(\sqrt{\frac{e^2}{\pi}} - \sqrt{\frac{e^2}{\pi}(1-W_0)}\, \right). \end{equation} \reseteqn Thus we get the Hamiltonian in the following form \alpheqn \begin{equation} H=\hat{\int}_{\dLam}\hat{\rm d} k \omega_k C^*(k)C(k) -L E_0 \end{equation} with the ground state energy density given by \begin{equation} E_0=\frac{1}{2\pi}\hat{\int}_{\dLam}\hat{\rm d} k\, \eta_k. \end{equation} \reseteqn (Note that for large $|k|$, $ \eta_k = \frac{1}{2}\left( \frac{1}{2}|k W_k^2| + \frac{e^2}{2\pi |k|}W_k\right)\left(1+{\cal O}\left(\frac{1}{|k|}\right)\right), $ hence $E_0$ is finite due to our assumptions \Ref{condition1} on the potential.) We now construct the unitary operator ${\cal U}$ implementing the Bogoliubov transformation \Ref{BT}, i.e.\ \begin{equation} C(k) = {\cal U} c(k){\cal U}^* \quad \forall k\in\Lambda^* . \end{equation} It is easy to see that operators ${\cal U}_k$ satisfying $C(\pm k) = {\cal U}_k c(\pm k){\cal U}_k^*$ for all $k>0$ are given by \alpheqn \begin{equation} \label{cU} {\cal U}_k = \ee{S_k},\quad S_k = \lambda_k \left( c(k)c(-k) - c^*(k)c^*(-k) \right) \end{equation} which are unitary since the operators $S_k$ are screw-hermitian.\footnote{i.e.\ ${\rm i} S_k$ is selfadjoint} Thus, \begin{equation} {\cal U} = \ee{S},\quad S=\sum_{k>0}S_k . \end{equation} This operator $S$ can be shown to exist and defines an anti-selfadjoint operator if and only if \begin{equation} \sum_{k>0}|k||\lambda_k|^2<\infty \label{24c}, \end{equation}\reseteqn and therefore (\ref{24c}) is necessary and sufficient for the unitary operator ${\cal U}$ to exist. This latter condition is equivalent to the second one in \Ref{condition1} and thus fulfilled by assumption. Note that \begin{equation} \label{hdiag} {\cal U}^* H {\cal U} = \frac{2\pi}{L}h_0 + \frac{2\pi}{L}\sum_{k>0} \left( \omega_k\left( c^*(k)c(k) + c^*(-k)c(-k)\right) -2\eta_k\frac{L}{2\pi}\right) \equiv H_D \end{equation} and therefore ${\cal U}$ is the unitary operator diagonalizing the non--zero modes of our Hamiltonian. \subsection{Gauge invariant states} \label{GND} By the gauge fixing above we reduced the Hilbert space from ${\cal H}$ to ${\cal H}'_{{\rm phys}}$ containing all states invariant under {\em small} gauge transformations, i.e. of the form $\ee{{\rm i}\alpha(x)}$ with $\alpha(L/2)=\alpha(-L/2)$. There are, however, still large gauge transformations present which are generated by $\ee{\ii2\pi x/L}$. It is important to note that physical states need not be invariant under these latter transformations, but it is useful to construct states with simple transformation properties. This is the origin of the $\theta$--vacuum. The large gauge transformation $\ee{\ii2\pi x/L}$ acts on the fields as follows \begin{eqnarray} \label{large} \psi(x)&\stackrel{R}{\to}& \ee{{\rm i} 2\pi x/L}\psi(x) = (R_+R_-)^{-1} \psi(x)(R_+R_-)\, ,\nonumber \\ \nopagebreak eY&\stackrel{R}{\to}& eY - 1 \end{eqnarray} where $R_\pm$ are the implementers of $\ee{{\rm i} 2\pi x/L}$ in the chiral sectors of the fermions and are discussed in detail in Appendix A. The large gauge transformation $R$ obviously generates a group ${\bf Z}$, $n\to R^n$, and we denote this group as ${\bf Z}_R$. Our aim is to construct the states in ${\cal H}_{{\rm phys}}$ which carry an irreducible representation of ${\bf Z}_R$ and especially the ground states of our model. We start with recalling that the Fermion Fock space can be decomposed in sectors of different chiral charges $\hat\rho^\pm(0)$, $${\cal H}_{{\rm Fermion}} =\bigoplus_{n_+,n_-\in{\bf Z}}{\cal H}^{(n_+,n_-)}$$ where $$ {\cal H}^{(n_+,n_-)}=\left\{\left. \Psi\in {\cal H}_{{\rm Fermion}}\right| \hat\rho^\pm(0)\Psi = n_\pm \Psi \right\} = R_+^{n_+}R_-^{-n_-}{\cal H}^{(0,0)} $$ (for a more detailed discussion see Appendix A). Thus, \begin{equation} {\cal H}_{{\rm phys}}= {\cal L}^2({\bf R},{\rm d} Y)\otimes {\cal H}_{{\rm Fermion}}' \end{equation} where \begin{equation} {\cal H}_{{\rm Fermion}}' = \bigoplus_{n\in{\bf Z}}{\cal H}^{(n,-n)}, \quad {\cal H}^{(n,-n)} = (R_+ R_-)^n{\cal H}^{(0,0)} \end{equation} is the zero charge subspace of the Fermion Fock space and we use the Schr\"odinger representation for the physical degree of freedom $Y = \int_{\Lam} {\rm d}{x} A_1(x)/2\pi$ of the photon field as discussed in the last subsection. ${\cal H}_{{\rm phys}}$ can therefore be spanned by states \alpheqn \begin{equation} \label{tetn} \Psi(n) = \phi\mbox{$(Y+\frac{n}{e}) $} (R_+R_-)^n \Psi,\quad \phi \in {\cal L}^2({\bf R},{\rm d} Y),\, \Psi\in{\cal H}^{(0,0)} \end{equation} which, under a large gauge transformation \Ref{large}, transform as \begin{equation} \Psi(n)\stackrel{R}{\to} \Psi(n-1). \end{equation} \reseteqn Thus the states transforming under an irreducible representation of ${\bf Z}_R$ are given by \begin{equation} \label{tet} \Psi^\theta= \sum_{n\in{\bf Z}}\ee{{\rm i}\theta n}\Psi(n) \stackrel{R}{\to} \ee{{\rm i}\theta}\Psi^\theta \end{equation} It is easy to calculate the inner products of these states, \begin{equation} <\Psi_1^\theta,\Psi_2^{\theta'}>= 2\pi\delta_{2\pi}(\theta-\theta') <\Psi_1,\Psi_2>_{{\rm F}}(\phi_1,\phi_2)_{{\cal L}^2} \end{equation} ($2\pi\delta_{2\pi}(\theta)=\sum_{n\in{\bf Z}}\ee{{\rm i} n\theta}$, since $<(R_+R_-)^n\Psi_1,(R_+R_-)^m\Psi_2> = \delta_{n,m}<\Psi_1,\Psi_2>_{{\rm F}}$; $<\cdot,\cdot>_{{\rm F}}$ and $<\cdot,\cdot>_{{\cal L}^2}$ are the inner products in ${\cal H}_{{\rm Fermion}}$ and ${\cal L}^2({\bf R},{\rm d} Y)$, respectively). Thus the states $\Psi^\theta$ actually are not elements in ${\cal H}_{{\rm phys}}$ (they do not have a finite norm). In our calculation of Green functions below we find it useful to use the notation \begin{equation} \label{reg} <\Psi_1^\theta,\Psi_2^{\theta}>_\theta \equiv <\Psi_1,\Psi_2>_{{\rm F}}(\phi_1,\phi_2)_{{\cal L}^2} \end{equation} which can be regarded as redefinition of the inner product using a simple multiplicative regularization (dropping the infinite term $2\pi\delta_{2\pi}(0)$). We now construct the ground states of our model. As expected, the quantum mechanical variables $P,X$ \Ref{qmv} describing the zero mode $h_0$ of the Hamiltonian have a simple representation on the $\theta$-states \Ref{tet}, \begin{eqnarray} P\Psi^\theta = \sum_{n\in{\bf Z}}\ee{{\rm i}\theta n} 2e\mbox{$(Y+\frac{n}{e})$} \phi\mbox{$(Y+\frac{n}{e}) $}(R_+R_-)^n\Psi \, ,\nonumber \\ \nopagebreak X\Psi^\theta = \sum_{n\in{\bf Z}}\ee{{\rm i}\theta n} \frac{{\rm i}}{2e}\frac{L}{2\pi}\frac{\partial}{\partial Y} \phi\mbox{$(Y+\frac{n}{e}) $} (R_+R_-)^n\Psi. \end{eqnarray} Thus the ground states of $h_0$ annihilated by $C(0)$ are of the form \Ref{tetn} with \begin{equation} \label{gst} \phi_0(Y) = \left(\frac{\pi}{4e^2\alpha}\right)^\frac{1}{4} \exp\left(-\alpha(2eY)^2\right) \end{equation} where $ \label{alpha} \alpha=\frac{1}{L}\sqrt{\frac{\pi^3}{2e^2}(1-W_0)}, $ and the other eigenstates are the harmonic oscillator eigenfunctions $\phi_n\propto C^*(0)^n\phi_0$. From $C(k)={\cal U} c(k){\cal U}^*$ and $c(k)\Omega_{{\rm F}}=0$ it is clear that the ground state of all $h_{k>0}$ is ${\cal U} \Omega_{{\rm F}}$. We conclude that the ground states of our model obeying $H \Psi_0^\theta = L E_0\Psi_0^\theta$ are given by \begin{equation} \label{vac} \Psi_0^\theta= \sum_{n\in{\bf Z}}\ee{{\rm i}\theta n} \phi_0\mbox{$(Y+\frac{n}{e}) $}(R_+R_-)^n {\cal U}\Omega_{{\rm F}}. \end{equation} \subsection{Gauge invariant Green functions} \label{green} The observables of our model now are operators on ${\cal H}_{\rm phys}$ where $\int_\Lambda {\rm d} x A_1(x)$ is represented by $2\pi Y$. We recall that the fully gauge invariant field operators are the $\chi$, \Ref{chi}, which are represented in the present gauge fixed setting by $$\chi_\sigma(x)=\ee{{\rm i} 2\pi e Y(x-r)/L}\psi_\sigma(x).$$ These operators depend on the $r\in\Lambda$ chosen. Bilinears such as meson operators are, however, independent of $r$ and give rise to translational invariant equal time Green functions. Moreover, on the quantum level not only the Wilson line $W[A_1]$ \Ref{Wilson} but actually even \begin{equation} e \int_\Lambda{\rm d} x A_1(x) + \mbox{$\frac{1}{2}$} Q_5 \equiv w[A_1] \end{equation} is gauge invariant (note that $W[A_1] = \ee{{\rm i} w[A_1]}$). This operator is represented by $e Y+\mbox{$\frac{1}{2}$} Q_5 = P/2$ (cf. \Ref{qmv}). The gauge invariant equal time Green functions of the model are the ground state expectation values of products $(\cdots)$ of meson operators and functionals $F[P,X]$ of the zero mode operators $P$, $X$. Since we only consider $(\cdots)$ which are also invariant under large gauge transformations, the transition amplitudes $\left<\Psi_1^\theta,(\cdots)\Psi_2^{\theta'} \right>$ are always proportional to $2\pi\delta(\theta-\theta')$. Thus the Green functions we consider can be defined as \begin{equation} \label{greenf} \left< \Psi_0^\theta, F[P,X] \, \chi^*_{\sigma_1}(x_1)\chi_{\tau_1}(y_1) \cdots \chi^*_{\sigma_N}(x_N) \chi_{\tau_N}(y_N)\Psi_0^\theta \right>_\theta \end{equation} (note that $\left< \Psi_0^\theta, \Psi_0^\theta \right>_\theta =1$, cf. \Ref{reg}). {}Following \cite{HSU} it is useful to define {\em interacting fermion fields} \alpheqn \begin{equation} \label{intf} \Psi_\sigma(x) = {\cal U}^* \psi_\sigma(x)\, {\cal U} \end{equation} such that (\ref{greenf}) becomes \begin{equation} \mbox{Eq.\ \Ref{greenf}} = \left<\Omega^\theta, F[P,X]\, \Psi^*_{\sigma_1}(x_1) \Psi_{\tau_1}(y_1)\cdots \Psi^*_{\sigma_N}(x_N) \Psi_{\tau_N}(y_N) \Omega^{\theta'}\right> \end{equation} where \begin{equation} \label{freevac} \Omega^\theta= \sum_{n\in{\bf Z}}\ee{{\rm i}\theta n} \phi_0\mbox{$(Y+\frac{n}{e}) $}(R_+R_-)^n \Omega_{{\rm F}} \end{equation} \reseteqn is the $\theta$--state constructed from the free fermion vacuum. The strategy to calculate Green functions of the model using bosonization techniques is the following: the relation \Ref{k2} of appendix A can be used to move the operators $R_\pm$ and combine them to some power of $(R_+R_-)$. The operators $Q_\pm$ when applied to physical states become simple ${\bf C}$--numbers: $Q_\pm (R_+R_-)^n = (R_+R_-)^n (\pm n+Q_\pm)$ for all integers $n$, and $Q_\pm\Omega_{\rm F} = 0$. For the exponentials of boson operators we use the decomposition into creation and annihilation parts outlined in A.4. The normal ordering procedure gives a product of exponentials of commutators which are (${\bf C}$-number) functions. For the correlation functions of meson operators $\chi_\sigma^*(x)\chi_{\sigma'}(y)$ we obtain: \alpheqn \begin{eqnarray} \left<\Psi_0^\theta,\chi^*_{\pm}(x) \chi_{\pm}(y)\Psi_0^{\theta} \right>_\theta &=& \ee{-\frac{\pi}{4L}m(x-y)^2} \ee{\Delta(x-y)}g_0^\pm(x-y) \label{2p1}\, , \\ \left<\Psi_0^\theta,\chi_{\pm}^*(x) \chi_{\mp}(y)\Psi_0^{\theta} \right>_\theta &=& \ee{\mp {\rm i}\theta} \ee{-i\frac{2\pi}{L}(x-y)} \ee{-\frac{\pi m}{4L}((x-y)+\frac{2}{m})^2} C(L) \ee{D(x-y)} \label{2p2} \, . \end{eqnarray} \reseteqn with \begin{eqnarray} \Delta &=& \sum_{k>0}\frac{2\pi}{Lk}\sinh^2(\lambda_k)[\ee{{\rm i} kx }+ \ee{-{\rm i} kx }-2] \nonumber\, , \\ D(x) &=& -\sum_{k>0} \frac{\pi}{Lk} \sinh(2\lambda_k) [ \ee{{\rm i} kx }+ \ee{-{\rm i} kx }-2] \label{DeDC}\, , \\ C(L) &=& \frac{1}{L}\exp[\sum_{k>0} \frac{2\pi}{kL}(\sinh(2\lambda_k)-2\sinh^2(\lambda_k))]\,. \nonumber \end{eqnarray} where $g_0^\pm(x)=\frac{1}{L}\frac{e^{\mp i\frac{\pi}{L}x}}{1-e^{\pm i\frac{2\pi}{L} (x\pm i\varepsilon)}}$ is the 2-point function of free fermions, and the Schwinger mass is renormalized to $m^2=\frac{e^2}{\pi(1-W_0)}$. Note that the Green function \Ref{2p2} depends on $\theta$ and is non--zero due to chiral symmetry breaking as in the Schwinger model. As expected, for vanishing electromagnetic coupling, $e=0$, this Green function vanishes (due to the factor $\ee{-\pi/mL}$ appearing in (\ref{2p2})). {}From (\ref{2p2}) we can calculate the chiral condensate by setting $x=y$, and in the limit $L\to\infty$ we obtain \begin{equation} \lim_{L\to\infty} \left<\Psi_0^\theta,\chi_{\pm}^*(x) \chi_{\mp}(x)\Psi_0^{\theta} \right>_\theta = \lim_{L\to\infty} \ee{\mp {\rm i}\theta} \ee{-\frac{\pi}{mL}} C(L) \nonumber = \ee{\mp {\rm i}\theta} C \end{equation} with a constant $C$ which can be calculated in principle from eq.\ (\ref{DeDC}). In the special case of the Schwinger model ($W_k=0$), $C$ can be computed and we recover the well--known result $C_{W_k=0}=\frac{m}{4\pi}\ee{\gamma}$ where $\gamma=0.577\ldots$ is Eulers constant (see e.g.\ \cite{SaWi,Hoso}). \section{Multiplicative regularization and the Thirring-Schwinger model} We recall that the Thirring model is formally obtained from the Luttinger model in the limits \begin{equation} \label{LT} L\to \infty, \quad V(x)\to g \delta(x) \end{equation} i.e.\ when the interaction becomes local and space becomes infinite. The first limit amounts to remove the IR cut--off of our model. By inspection it can be easily done in all Green functions. The second limit in \Ref{LT} is non--trivial: we recall, that condition \Ref{condition1} on the Luttinger potential requires sufficient decay of the Fourier modes $W_k$ of the interaction, and this is violated in the Thirring model where $W_k=W_0$ is independent of $k$. This latter condition was necessary for the interacting model to be well--defined on the Hilbert space of the non--interacting model. A better understanding can be obtained by explicitly performing the limit \Ref{LT} in the present setting. The idea is to find a family of Luttinger potentials $\{ V_\ell(x)\}_{\ell>0}$ becoming local for $\ell\downarrow 0$, i.e.\ for all $\ell>0$ the condition \Ref{condition1} is fulfilled and $\lim_{\ell\downarrow 0} V_\ell(x)=g\delta(x)$. Then for all $\ell>0$ everything is well-defined on the free Hilbert space and one can work out in detail how to regularize such that the correlation functions make sense for $\ell\downarrow 0$. We note that a direct construction of the Thirring model in a framework similar to the one here has been completed in \cite{CRW}. This construction seems to be, however, different from the one outlined below. {}For the case of Luttinger-Schwinger model we split the function $\Delta(x)$ into a part corresponding to the pure Luttinger model and a part which describes the additional Schwinger coupling, i.~e.~$\Delta(x)=(\Delta(x)-\Delta^{e=0}(x))+\Delta^{e=0}(x)$. The limit $W_k=W_0={\rm const.}$ exists for $\Delta-\Delta^{e=0}$. As $L\to\infty$, the sum in (\ref{DeDC}) turns into an integral and we obtain \begin{eqnarray} \Delta(x)-\Delta^{e=0}(x)=&&\!\!\!\!\!\!\! \frac{1}{\sqrt{1-W_0^2}} \int_0^\infty {\rm d}k\, \left( \frac{1}{\sqrt{k^2+\mu^2}}-1\right)(\cos (kx) -1)+ \nonumber\\ && \!\!\!\!\!\!\! \sqrt{\frac{1+W_0}{1-W_0}}\, \int_0^\infty {\rm d}k\, \frac{\mu^2}{k^2}\frac{1}{\sqrt{k^2+\mu^2}} (\cos (kx) -1) \, . \end{eqnarray} The first integral becomes $K_0(|\mu x|)+\ln\frac{|\mu x|}{2}+\gamma$ and the expression in the last line is a second integral $(n=2)$ of $K_0$ defined iteratively by Ki$_n(x)=\int_x^\infty $Ki$_{n-1}(t) \, dt$, Ki$_0=K_0$ \cite{Abram}. Moreover we introduced a new mass by $\mu^2=e^2/(\pi(1+W_0))$. Note that the singularities at the origin of the Bessel function are removed by the additional terms, consistent with $\Delta(0)=0$. No regularization has been necessary so far. Renormalization comes along with $\Delta^{e=0}$. We choose a Luttinger-interaction such that $(1-W_k^2)^{-1/2}-1=2a^2 e^{-\ell k}$ where $\ell$ defines the range of the interaction. For this choice we find \begin{eqnarray} \Delta^{e=0}(x)=2a^2\ln\left|\frac{\ell}{x+i\ell}\right| \end{eqnarray} and obviously the Thirring limit makes sense only if one removes the singular part $\ln \ell$ which can be done by a wave function renormalization of the form \begin{eqnarray} \chi_\pm(x) \to \tilde\chi_\pm(x)=Z^{1/2}(a,\ell)\chi_\pm(x) \quad \makebox{with} \quad Z^{1/2}(a,\ell)=\ell^{-a^2} \, . \label{Th3} \end{eqnarray} A similar discussion holds for the chirality mixing correlation function. The 2-point function of the Thirring-Schwinger model therefore become \alpheqn \begin{eqnarray} \langle\Psi_0^\theta,\tilde\chi_\pm^*(x)\tilde\chi_\pm(0)\Psi_0^\theta\rangle_\theta &=&e^{\Delta_{\rm reg}(x)}g_0^\pm(x)\, ,\\ \left<\Psi_0^\theta,\tilde\chi_{\pm}^*(x) \tilde\chi_{\mp}(0)\Psi_0^{\theta} \right>_\theta &=& \ee{\mp {\rm i}\theta} C_{\rm reg}\, \ee{D_{\rm reg}(x)} \, . \end{eqnarray} \reseteqn If we define $\tau_0$ by $\tanh(2\tau_0)=W_0$ we can write \begin{eqnarray} \Delta_{\rm reg}(x)&\!=&\!\cosh (2\tau_0)\left[ K_0(|\mu x|)+\ln\frac{|\mu x|}{2}+\gamma\right] +\nonumber\\ &&\!\!\frac{1}{2}e^{2\tau_0}\left[1-\frac{\pi}{2}|\mu x|-{\rm Ki}_2(|\mu x|)\right]+ (\cosh (2\tau_0)-1)\ln |x| \, , \nonumber\\ D_{\rm reg}(x) &\!=&\! -\sinh (2\tau_0)\left[ K_0(|\mu x|)+\ln\frac{|\mu x|}{2}+\gamma\right] - \\ &&\!\! \frac{1}{2}e^{2\tau_0}\left[1-\frac{\pi}{2}|\mu x|-{\rm Ki}_2(|\mu x|) \right] \, ,\nonumber\\ \ln C_{\rm reg} &\!=&\! \gamma+\ln\frac{1}{2\pi}+e^{-2\tau_0}\ln \frac{\mu}{2} \nonumber \, . \end{eqnarray} We checked that all Green functions of the Thirring-Schwinger model have a well-defined limit after the wave function renormalization. We would like to stress that this procedure can be naturally interpreted as low--energy limit of the Luttinger--Schwinger model: if one is interested only in Green functions describing correlations of far--apart fermions, the precise form of the Luttinger interaction $V(x)$ should be irrelevant and only the total interaction strength $g = \int{\rm d} x\, V(x)$ should matter. Thus as far as these correlators are concerned, they should be equal to the ones of the Thirring model corresponding to this coupling $g$. \section{Conclusion} We formulated and solved the Luttinger-Schwinger model in the Hamiltonian formalism. Structural issues like gauge invariance, the role of anomalies and the structure of the physical states were discussed in detail. The necessary tools for computing all equal time correlation functions were prepared and illustrated by calculating the 2--point Green functions. From this the chiral condensate and critical exponents were computed. We could also clarify how the non trivial short distance behavior of the Thirring-Schwinger model arises in a limit from the Luttinger-Schwinger model. \app \section*{Appendix A: Bosons from fermions and vice versa} In this appendix we summarize the basics for the bosonization used in the main text to solve the Luttinger--Schwinger model. Bosonization is known in the physics literature since quite some time (\cite{CR,PressSegal,Kac,Mickelsson}), for a discussion of the older history see \cite{HSU}). We consider the fermion Fock space ${\cal H}_{\rm Fermion}$ generated by the fermion field operators from the vacuum $\Omega_{\rm F}$ as described in the main text. We note that ${\cal H}_{\rm Fermion}={\cal H}_{+}\otimes{\cal H}_{-}$ where ${\cal H}_{\pm}$ are generated by the left-- and right--handed chiral components $\hat\psi_+$ and $\hat\psi_-$ of our Dirac fermions. Bosonization can be formulated for the chiral components $\hat\psi_\pm$ separately as it leaves the two chiral sectors ${\cal H}_{\pm}$ completely decoupled. For our purpose it is more convenient to treat both chiral sectors together. \subsection*{A.1 Structure of fermion Fock space} We start by introducing two unitary operators $R_\pm$ which are defined up to an irrelevant phase factor (which we will leave unspecified) by the following equations, \begin{equation} \hat\psi_\pm(k)R_\pm = R_\pm \hat\psi_\pm (k - \frac{2\pi}{L}) \end{equation} and $R_\pm$ commutes with $\hat\psi_\mp$. A proof of existence and an explicit construction of these operators can be found in \cite{Ruij}. Here we just summarize their physical meaning and special properties. It is easy to see that $R_\pm$ are just the implementors of Bogoliubov transformations given by the {\em large gauge transformations} $\psi_\pm(x)\mapsto \ee{{\rm i} 2\pi x/L}\psi_\pm(x)$ and $\psi_\mp(x)\mapsto \psi_\mp(x)$, hence $R_+R_-$ and $R_+R_-^{-1}$ implement the vector-- and the axial large gauge transformations $\ee{{\rm i} 2\pi x/L}$ and $\ee{{\rm i}\gamma_5 2\pi x/L }$, respectively. These have non--trivial winding number\footnote{ the w.n. of a smooth gauge transformation $\Lambda\to{\rm U}(1)$, $x\mapsto \ee{{\rm i}\alpha(x)}$ is the integer $\frac{1}{2\pi}\left(\alpha(L)-\alpha(0)\right)$. } and change the vacuum $\Omega_{\rm F}$ to states containing (anti-) particles. The latter follows from the commutator relations with the chiral fermion currents \begin{eqnarray} (R_\pm)^{-1} \hat\rho^\pm (k) R_\pm = \hat\rho^\pm (k) \pm \delta_{k,0} \label{k2}\, . \end{eqnarray} The essential point of bosonization is that the total Hilbert space ${\cal H}_{\rm Fermion}$ can be generated from $\Omega_{\rm F}$ by the chiral fermion currents $\hat\rho^{\pm}(k)$ and $R_\pm$. More precisely, for all pairs of integers $n_+,n_-\in{\bf Z}$ we introduce the subspaces ${\cal D}^{(n_+,n_-)}$ of ${\cal H}_{{\rm Fermion}}$ containing all linear combinations of vectors \begin{equation} \label{lin} \hat\rho^+(k_1) \cdots \hat\rho^+(k_{m_+}) \hat\rho^-(q_1)\cdots \hat\rho^-(k_{m_-} ) R_+^{n_+} R_-^{-n_-} \Omega_F \end{equation} where $m_\pm\in{\bf N}_0$ and $k_i,q_i\in\Lambda^*$. The basic result of the boson--fermion correspondence is the following {\bf Lemma:} The space \begin{equation} {\cal D} \equiv \bigoplus_{n_+,n_-\in{\bf Z}} D^{(n_+,n_-)} . \end{equation} is dense in ${\cal H}_{\rm Fermion}$ (for a proof see e.g.\ \cite{CR}). {\em Remark:} This Lemma gives the following picture of the structure of the Fock space ${\cal H}_{\rm Fermion}$: It splits into {\em superselection sectors} ${\cal H}^{(n_+,n_-)}$ (which are the closure of ${\cal D}^{(n_+,n_-)}$) containing the eigenstates of the chiral charges $Q_\pm$ with eigenvalues $n_\pm$. The fermion currents $\hat\rho^\pm(k)$ leave all these sectors invariant, and the operators $R_\pm$ intertwine different sectors, $R_+: {\cal H}^{(n_+,n_-)}\to {\cal H}^{(n_+ +1,n_-)}$ and $R_-: {\cal H}^{(n_+,n_-)}\to {\cal H}^{(n_+,n_- - 1)}$. \subsection*{A.2 Kronig's identity} The basic formula underlying the solution of our model is \begin{equation} \label{kronig} \hat H_0 = \frac{\pi}{L}\left( Q_+^2 + Q_-^2 \right) + \frac{2\pi}{L} \sum_{k>0}\left( \hat\rho^+(-k)\hat\rho^+(k) + \hat\rho^-(k) \hat\rho^-(-k) \right) . \end{equation} It expresses the free Dirac Hamiltonian in terms of bilinears of the chiral fermion currents. \subsection*{A.3 Boson--fermion correspondence} The boson--fermion correspondence provides explicit formulas of the fermion operators $\psi_\pm(x)$ in terms of operators $\hat\rho^\pm(k)$ and $R_\pm$, \aalpheqn \begin{equation} \label{limit} \psi_\pm(x) = \lim_{\varepsilon\searrow 0} \psi_\pm(x;\varepsilon) \end{equation} (this limit can e.g. be understood in the weak sense for states in ${\cal D}$), where \begin{eqnarray} \psi_\pm(x;\varepsilon) = \frac{1}{\sqrt{L}} S_\pm(x) \normal{\exp(K_\pm(x;\varepsilon))} \label{bfc} \end{eqnarray} with \begin{equation} \label{S} S_\pm(x)= \ee{\pm {\rm i} \pi x Q_\pm/L } (R_\pm)^{\mp 1}\ee{\pm {\rm i} \pi x Q_\pm/L } = \ee{\mp \pi{\rm i} x/L} (R_\pm)^{\mp 1}\ee{\pm {\rm i} 2\pi x Q_\pm/L } \end{equation} and \begin{equation} \label{Kpm} K_\pm(x;\varepsilon) =\mp \frac{2\pi}{L} \sum_{k\in\Lambda^*\backslash\{0\}} \frac{\hat\rho^\pm(-k)}{k}\ee{-{\rm i} kx}\ee{-\varepsilon|k|} = - K_\pm(x;\varepsilon)^*. \end{equation} \areseteqn More explicitly, the normal ordering $\normal{\cdots}$ is with respect to the fermion vacuum $\Omega_{{\rm F}}$ (cf.\ \Ref{vacF}), \aalpheqn \begin{equation} \normal{\exp(K_\pm(x;\varepsilon))} = \exp(K^{(-)}_\pm(x;\varepsilon)) \exp(K^{(+)}_\pm(x;\varepsilon)) \end{equation} where \begin{equation} K^{(\sigma)}_\pm(x;\varepsilon) = \sigma\frac{2\pi}{L}\sum_{k>0} \frac{\hat\rho^\pm(\pm \sigma k)}{k}\ee{\pm\sigma {\rm i} kx}\ee{-\varepsilon|k|}, \quad \sigma=+,- \end{equation} \areseteqn is such that $K_\pm = K_\pm^{(-)}+K_\pm^{(+)}$ and $K_\pm^{(+)}\Omega_{{\rm F}} = 0$ (cf.\ \Ref{vacF}). \subsection*{A.4 Interacting fermions} {}From the definition of the interacting fermion fields $\Psi(x)$ (\ref{intf}) and the representation of free fermions in terms of bosons, we are led to investigate the interacting kernel $\tilde K_\pm(x)={\cal U}^* K_\pm(x) {\cal U}$: $$ \tilde{K}_\pm(x) =\mp \frac{2\pi}{L} \sum_{k\in\Lambda^*\backslash\{0\}} \frac{1}{k} \left( \cosh(\lambda_k)\, \hat\rho^\pm(-k) - \sinh(\lambda_k)\, \hat\rho^\mp(-k) \right) \ee{-{\rm i} kx}\ee{-\varepsilon|k|}\, . $$ It is convenient to write \newcommand{{K\! s}}{{K\! s}} \newcommand{{K\! c}}{{K\! c}} \newcommand{{K\! s/c}}{{K\! s/c}} \aalpheqn \begin{equation} \tilde{K}_\pm = \tilde{K}_\pm^{(+)} + \tilde K_\pm^{(-)},\quad \tilde{K}_\pm^{(\sigma)} = Kc_\pm^{(\sigma)} -Ks_\mp^{(\sigma)} \end{equation} where the upper index refers to the creation-- ($\sigma=-$) and annihilation-- ($\sigma=+$) parts of operators and \begin{eqnarray} {K\! c}^{(\sigma)}_\pm(x) &=& \sigma\sum_{k>0}\frac{2\pi}{Lk}\cosh(\lambda_k) \hat\rho^\pm(\pm\sigma k)\ee{\mp\sigma {\rm i} kx }\ee{-\varepsilon k} \, , \nonumber \\ \nopagebreak {K\! s}^{(\sigma)}_\pm(x) &=& \sigma\sum_{k>0}\frac{2\pi}{Lk}\sinh(\lambda_k) \hat\rho^\pm(\pm\sigma k)\ee{\mp\sigma {\rm i} kx }\ee{-\varepsilon k} \: . \end{eqnarray} \areseteqn The nonzero commutators of these operators are \begin{eqnarray} \ccr{{K\! c}^{(+)}_\pm(x)}{{K\! c}^{(-)}_\pm(y)} &=& - \sum_{k>0}\frac{2\pi}{Lk}\cosh^2(\lambda_k) \ee{ \mp {\rm i} k(x-y) }\ee{-2\varepsilon k} \, , \nonumber \\ \nopagebreak \ccr{{K\! c}^{(+)}_\pm(x)}{{K\! s}^{(-)}_\pm(y)} &=& - \sum_{k>0}\frac{\pi}{Lk}\sinh(2\lambda_k) \ee{ \mp{\rm i} k(x-y) }\ee{-2\varepsilon k}\, , \\ \ccr{{K\! s}^{(+)}_\pm(x)}{{K\! s}^{(-)}_\pm(y)} &=& - \sum_{k>0}\frac{2\pi}{Lk}\sinh^2(\lambda_k) \ee{ \mp{\rm i} k(x-y) }\ee{-2\varepsilon k} \: . \nonumber \end{eqnarray} We find the following normal ordered expression for the interacting fermions \begin{equation} \Psi_\pm(x) = \frac{1}{\sqrt{L}} z S_\pm(x) \normal{\ee{\tilde K_\pm(x) }} \end{equation} where $z = \ee{- \sum_{k>0} \frac{2\pi}{Lk}\sinh^2(\lambda_k) }$. \appende \begin{center}{\bf Acknowledgments}\end{center} E.L. would like to thank the Erwin Schr\"odinger International Institute in Vienna for hospitality where part of this work was done, and the ``\"Osterreichische Forschungsgemeinschaft'' for partial financial support in May/June 1994 when this work was begun. He would also like to thank S.G. Rajeev and Mats Wallin for usefull discussions. The authors thank the referee for valuable suggestions concerning the presentation of their results.
1,116,691,501,409
arxiv
\section{Introduction} In 1999, in the seminal paper on bi-Lipschitz classification of 2D real singularities \cite{Birbrair}, Lev Birbrair proved the following result \begin{theorem}[Theorem of Birbrair]\label{thm:birbrair} Given the germ of a semialgebraic set, $(X,a)$, with isolated singularity and connected link, there is a unique rational number $\beta\geq 1$ such that $(X,a)$ is bi-Lipschitz homeomorphic, with respect its inner distance, to the germ at $0\in\mathbb{R}^3$ of the $\beta$-horn $$\{(x,y,z)\in\mathbb{R}^3 \ \colon \ x^2+y^2=z^{2\beta} \ \mbox{and} \ z\geq 0\}.$$ \end{theorem} A similar result was also obtained in \cite{Grieser:2003}. The goal of this present paper is to bring the Theorem of Birbrair and its ideas to a global perspective on the inner bi-Lipschitz geometry of the 2D real subsets in $\mathbb{R}^n$. Such a perspective, in the smooth case, is closely related to Fu Conjecture (see \cite{Fu:1998}), which states that {\it a complete Riemannian surface in $\mathbb{R}^3$ with $\int K^+<2\pi$ and $\int K^-<+\infty$ and which is homeomorphic to $\mathbb{R}^2$ must be bi-Lipschitz homeomorphic to $\mathbb{R}^2$}. This conjecture was positively answered by Bonk and Lang in \cite{BonkL:2003} for surfaces endowed with the inner distance. Another related study was presented in \cite{BelenkiiB:2005} by Belen'ki\u{\i} and Burago, they presented a classification of complete Aleksandrov surfaces with finite total curvature under some restrictions in their singularities and such that their ends have non-zero growth speed. Coming back to our goal, let us start by recalling the topological classification of compact (without boundary) smooth surfaces. It is well-known (since the 1860s) that, given a compact smooth surface $S$ in $\mathbb{R}^n$, two symbols $\theta_S\in \{-1,1\}$ and $g_S\in\mathbb{N}\cup \{0\}$ complete determine $S$ up to diffeomorphisms, where $\theta_S$ says that $S$ is orientable or not; and $g_S$ is the genus of $S$. In the setting of (not necessarily compact) properly embedded smooth surfaces in $\mathbb{R}^n$, in order to have some control on the topology of such surfaces, let us assume they are semialgebraic. In some sense, since compact manifolds (without boundary) are diffeomorphic to semialgebraic ones (see \cite{Nash}), that assumption is not too restrictive. In this setting, there is a topological structure theorem that says: $\exists$ a radius $R>0$ such that for any $\rho \geq R$, the Euclidean sphere $\mathbb{S}_{\rho}$ intersects transversally $S$ and $S\setminus B_{\rho}$ the set of points in $S$ and outside the Euclidean ball $B_{0,\rho}$ is diffeomorphic to the cylinder $[\rho,\infty)\times S\cap\mathbb{S}_{\rho}$. The connected components of $S\setminus B_{\rho}$ are called the {\it ends} of $S$, and any two family of ends corresponding to $\rho$ and $\rho'$, respectively, are always diffeomorphic each other, for any pairs of radius $\rho,\rho'>R$ (see \cite{Coste}). Then, in the case of properly embedded smooth surfaces $S$ in $\mathbb{R}^n$ which are semialgebraic, so-called {\it Nash surfaces in $\mathbb{R}^n$}, we have a list of three symbols to determine $S$ up to diffeomorphism, namely: $\theta_S\in \{-1,1\}$, $g_S\in\mathbb{N}\cup \{0\}$ and $e_S\in\mathbb{N}\cup \{0\}$ that is the number of ends of $S$. In this paper, we consider Nash surfaces in $\mathbb{R}^n$ equipped with the inner distance $$d_{inn}(x_1,x_2) = \inf\{length(\gamma) \ \colon \ \gamma \ \mbox{is a path on} \ S \ \mbox{connecting} \ x_1,x_2\in S \}$$ and we classify those surfaces up to bi-Lipschitz homeomorphisms with respect to the inner distance, the so-called {\it inner lipeomorphims}. Actually, associated to each Nash surface $S$, we present a list of symbols, $\theta_S\in \{-1,1\}$, $g_S\in\mathbb{N}\cup \{0\}$, $e_S\in\mathbb{N}\cup \{0\}$ and $\beta_1$, ...,$\beta_{e_S}$, where $\beta_{i}'$s ($\leq 1$) are rational numbers associated to the ends of $S$; which determines $S$ up to inner lipeomorphisms. Finally, we address semialgebraic surfaces with isolated inner Lipschitz singularities, we classify all these surfaces up inner lipeomorphims by using a combinatorial invariant so called inner Lipschitz code (see Definition \ref{def:inner_code}), we also bring some applications of this classification to complex algebraic plane curves and minimal surfaces with finite total curvature. \section{Preliminaries} Given a path connected subset $X\subset\mathbb{R}^n$, the \emph{inner distance} on $X$ is defined as follows: given two points $x_1,x_2\in X$, $d_{X,inn}(x_1,x_2)$ is the infimum of the lengths of paths on $X$ connecting $x_1$ to $x_2$. \begin{definition}\label{def:lne} Let $X\subset\mathbb{R}^n$ be a subset. We say that $X$ is {\bf Lipschitz normally embedded (LNE)} if there exists a constant $c\geq 1$ such that $d_{X,inn}(x_1,x_2)\leq C\|x_1-x_2 \|$, for all pair of points $x_1,x_2\in X$. \end{definition} For instance, considering the real (resp. complex) cusp $x^2=y^3$, in $\mathbb{R}^2$ (resp. in $\mathbb{C}^2$), one can see that this set is not LNE. \begin{definition}\label{lipschitz function} Let $X\subset\mathbb{R}^n$ and $Y\subset\mathbb{R}^m$. A mapping $f\colon X\rightarrow Y$ is called {\bf outer} (resp. {\bf inner}) {\bf Lipschitz} if there exists $\lambda >0$ such that is $$\|f(x_1)-f(x_2)\|\le \lambda \|x_1-x_2\| \quad (\mbox{resp. } d_{X,inn}(f(x_1),f(x_2))\le \lambda d_{X,inn}(x_1,x_2))$$ for all $x_1,x_2\in X$. A outer Lipschitz (resp. inner Lipschitz) mapping $f\colon X\rightarrow Y$ is called {\bf outer} (resp. {\bf inner}) {\bf lipeomorphism} if its inverse mapping exists and is outer Lipschitz (resp. inner Lipschitz) and, in this case, we say that $X$ and $Y$ are {\bf outer} (resp. {\bf inner}) {\bf lipeomorphic}. \end{definition} Let $X\subset\mathbb{R}^n$ be a closed 2-dimensional semialgebraic set in the following two definitions, \begin{definition} A point $p\in X$ is called {\bf topologically regular} if there exists a neighborhood $V\subset X$ of $p$ homeomorphic to an open disc in $\mathbb{R}^2$. When all the points $p\in X$ are topologically regular, $X$ is said a {\bf semialgebraic topological surface} in $\mathbb{R}^n$. \end{definition} \begin{definition} A point $p\in X$ is called {\bf inner Lipschitz regular} if there exists a neighborhood $V\subset X$ of $p$ inner lipeomorphic to an open disc in $\mathbb{R}^2$; otherwise it is called {\bf inner Lipschitz singular}. We denote by ${\rm Reg}_{inLip}(X)$ (resp. ${\rm Sing}_{inLip}(X)$) the set of all inner Lipschitz regular (resp. singular) points of $X$. \end{definition} \subsection{Semialgebraic ends, topological and Lipschitz singularities} Let $S$ be a closed semialgebraic subset of $\mathbb{R}^n$. Let us assume that $S$ has possible isolated singularities, i.e., there exists a finite subset $\Sigma\subset S$ such that all points in $S\setminus\Sigma$ are inner Lipschitz regular points of $S$. Such a subset is what we call a {\bf semialgebraic surface in $\mathbb{R}^n$ with isolated inner Lipschitz singularities}. As a consequence of the Local Conic Structure Theorem (by using the inversion mapping $z\mapsto z/|z|^2$), we see that there exists a large radius $R>0$ such that \begin{enumerate} \item $S$ is transversal to the Euclidean sphere $\mathbb{S}(0,\rho)$ for any $\rho\geq R$. \item there exists a semialgebraic homeomorphism $$\phi\colon S\setminus B(0,R)\rightarrow \{ t\cdot u \ : \ t\geq R \ \mbox{and} \ u\in \mathbb{S}(0,R)\}$$ such that $|\phi (z)|= |z|$ for any $z\in S$ outside of the Euclidean ball $B(0,R)$. \end{enumerate} It follows that $S\setminus B(0,R)$ has finite many semialgebraic connected components $S_1,\dots S_{e_S}$, each $S_i$ is semialgebraicly homeomorphic to the cylinder $\mathbb{S}^1\times[R,\infty)$. Moreover, each $S_i$ is semialgebraicly homeomorphic to $S_i\setminus B(0,\rho)$ for any $\rho\geq R$. Those subsets $S_1,\dots S_{e_S}$ are called the {\bf ends} of $S$. Notice that the ends of $S$ are well-defined up to semialgebraic homeomorphisms. When $S$ has only one end, we say that it is {\bf connected at infinity}. Next, we recall the notion of tangent cone at infinity which is important to the study of Lipschitz geometry of ends of semialgebraic sets. \begin{definition} Let $X\subset \mathbb{R}^m$ be an unbounded subset. We say that $v\in \mathbb{R}^m$ is {\bf tangent to $X$ at infinity} if there are a sequence of real positive numbers $\{ t_j \}_{j\in \mathbb{N}}$ such that $t_j\to +\infty$ and a sequence of points $\{x_j\}_{j\in \mathbb{N}}\subset X$ such that $\lim\limits _{j\to +\infty }\frac{1}{t_j}x_j=v$. Denote by $C(X, \infty )$ the set of $v\in\mathbb{R}^m$ which are tangent to $X$ at infinity and we call it the {\bf tangent cone of $X$ at infinity}. \end{definition} \subsection{Contact of curves at infinity} \begin{definition} Let $\Gamma_1, \Gamma_2\subset \mathbb{R}^n$ be two unbounded semialgebraic curves, which are connected at infinity. Fixed $K>1$, we define $f_{\Gamma_1,\Gamma_2}^K\colon (0,+\infty)\to \mathbb{R}$ by $$ f_{\Gamma_1,\Gamma_2}^K(r)=dist(A^K_r(\Gamma_1),A^K_r(\Gamma_2)), $$ where $A^K_r(X)=\{y\in X;\frac{r}{K}\leq \|y\|\leq Kr\}$ and $dist(X,Y)=\inf\{\|x-y\|;x\in X$ and $y\in Y\}$. If $\Gamma_1\cap \Gamma_2$ is an unbounded set, we define $Cont^K(\Gamma_1, \Gamma_2)=-\infty$ and if $\Gamma_1\cap \Gamma_2$ is a bounded set, we define $$ Cont^K(\Gamma_1, \Gamma_2)=\lim\limits_{r\to +\infty}\frac{\log{f_{\Gamma_1,\Gamma_2}^K(r)}}{\log{r}}. $$ \end{definition} \begin{remark}\label{prop:contact_vs_tangency} Let $\Gamma_1, \Gamma_2\subset \mathbb{R}^n$ be unbounded semialgebraic curves, which are connected at infinity. Let $K>1$. Then $Cont^K(\Gamma_1, \Gamma_2)=1$ if and only if $C(\Gamma_1,\infty)\not=C(\Gamma_2,\infty)$. \end{remark} \begin{proposition}\label{prop:non-dependency_of_K} Let $\Gamma_1, \Gamma_2\subset \mathbb{R}^n$ be unbounded semialgebraic curves, which are connected at infinity. Let $K,\tilde K>1$. Then $Cont^K(\Gamma_1, \Gamma_2)=Cont^{\tilde K}(\Gamma_1, \Gamma_2)$. \end{proposition} \begin{proof} It follows from the definition that $Cont^K(\Gamma_1, \Gamma_2)=-\infty$ if and only if $Cont^{\tilde K}(\Gamma_1, \Gamma_2)=-\infty$. So, we may assume that $Cont^K(\Gamma_1, \Gamma_2)$ and $Cont^{\tilde K}(\Gamma_1, \Gamma_2)$ are finite numbers. From Remark \ref{prop:contact_vs_tangency}, we may also assume that $C(\Gamma_1,\infty)=C(\Gamma_2,\infty)$. We assume that $K<\tilde K$. \begin{claim}\label{claim:Ksquare} If $\tilde K\leq K^2$ then $Cont^K(\Gamma_1, \Gamma_2)=Cont^{\tilde K}(\Gamma_1, \Gamma_2)$. \end{claim} \begin{proof}[Proof of Claim \ref{claim:Ksquare}] Since $K<\tilde K$, it is clear that $f_{\Gamma_1,\Gamma_2}^{\tilde K}(r)\leq f_{\Gamma_1,\Gamma_2}^K(r)$. Since $C(\Gamma_1,\infty)=C(\Gamma_2,\infty)$, we obtain $$ \min\{f_{\Gamma_1,\Gamma_2}^K(Kr),f_{\Gamma_1,\Gamma_2}^K(r/K)\}\lesssim f_{\Gamma_1,\Gamma_2}^{\tilde K}(r) \mbox{ as }r\to +\infty. $$ Moreover, $$ f_{\Gamma_1,\Gamma_2}^K(r)\approx f_{\Gamma_1,\Gamma_2}^K(r/K) \mbox{ and }f_{\Gamma_1,\Gamma_2}^K(r)\approx f_{\Gamma_1,\Gamma_2}^K(r/K) \mbox{ as }r\to +\infty, $$ and thus we obtain $f_{\Gamma_1,\Gamma_2}^K(r)\approx f_{\Gamma_1,\Gamma_2}^{\tilde K}(r)$ as $r\to +\infty$, which gives $$ Cont^K(\Gamma_1, \Gamma_2)=Cont^{\tilde K}(\Gamma_1, \Gamma_2). $$ \end{proof} It follows from Claim \ref{claim:Ksquare} that $Cont^K(\Gamma_1, \Gamma_2)=Cont^{K^m}(\Gamma_1, \Gamma_2)$ for all positive integer $m$. Let $m$ be an positive integer such that $K^m\leq \tilde K\leq K^{2m}$. By Claim \ref{claim:Ksquare} again, $Cont^{K^m}(\Gamma_1, \Gamma_2)=Cont^{\tilde K}(\Gamma_1, \Gamma_2)$, which finishes the proof. \end{proof} Thus, we define $Cont(\Gamma_1, \Gamma_2)=Cont^K(\Gamma_1, \Gamma_2)$ for some $K>1$. \begin{proposition}\label{prop:contact_invariance} Let $\Gamma_1, \Gamma_2\subset \mathbb{R}^n$ and $\tilde\Gamma_1, \tilde\Gamma_2\subset \mathbb{R}^m$ be unbounded semialgebraic curves, which are connected at infinity. Assume that there exists an outer lipeomorphism $F\colon \Gamma_1\cup \Gamma_2\to \tilde\Gamma_1\cup \tilde\Gamma_2$ such that $F(\Gamma_i)=\tilde \Gamma_i$, $i=1,2$. Then $Cont(\Gamma_1, \Gamma_2)=Cont(\tilde\Gamma_1, \tilde\Gamma_2)$. \end{proposition} \begin{proof} Since $F$ is an outer lipeomorphism, there is $M\geq 1$ such that $$ \frac{1}{M}\|x-y\|\leq \|F(x)-F(y)\|\leq M\|x-y\|, \quad \forall x,y\in \Gamma_1\cup \Gamma_2. $$ Let $x_0\in \Gamma_1\cup \Gamma_2$ such that $\|x_0\|\geq 1$. Then, for any $x\in \Gamma_1\cup \Gamma_2$ such that $\|x\|\geq r_0=\max\{3\|x_0\|,3M\|F(x_0)\|\}$, we have \begin{eqnarray*} \|F(x)\|&\leq & \|F(x)-F(x_0)\|+\|F(x_0)\| \\ &\leq &M\|x-x_0\|+\|x\|\\ &\leq &3M\|x\| \end{eqnarray*} and \begin{eqnarray*} \|F(x)\|&\geq & \|F(x)-F(x_0)\|-\|F(x_0)\| \\ &\geq & \frac{1}{M}\|x-x_0\|-\frac{1}{3M}\|x\|\\ &\geq & \frac{1}{M}\|x\|-\frac{1}{M}\|x_0\|-\frac{1}{3M}\|x\|\\ &\geq & \frac{1}{3M}\|x\|. \end{eqnarray*} Therefore, for any $K>1$ and $\tilde K= 3MK$, we have that $F(A^K_r(\Gamma_1\cup \Gamma_2))\subset A^{\tilde K}_r(\tilde \Gamma_1\cup \tilde \Gamma_2)$ for all $r\geq Kr_0$. Thus, $$ dist(A^K_r(\Gamma_1),A^K_r(\Gamma_2))\geq \frac{1}{M}dist(F(A^K_r(\Gamma_1)),F(A^K_r(\Gamma_2))) \geq dist(A^{\tilde K}_r(\tilde \Gamma_1),A^{\tilde K}_r(\tilde \Gamma_2)) $$ for all $r\geq Kr_0$, which implies $Cont(\Gamma_1, \Gamma_2)\geq Cont(\tilde\Gamma_1, \tilde\Gamma_2)$. Similarly, we also prove that $Cont(\Gamma_1, \Gamma_2)\leq Cont(\tilde\Gamma_1, \tilde\Gamma_2)$, which finishes the proof. \end{proof} \begin{example}\label{exam:contact_horn} Let $\beta\in \mathbb{Q}$ with $\beta\leq 1$. Let $\Gamma_1=\{(x,0)\in\mathbb{R}^2;x\geq 1\}$ and $\Gamma_2=\{(x,y)\in \mathbb{R}^2;x\geq 1$ and $y=x^{\beta}\}$. Then $Cont(\Gamma_1, \Gamma_2)=\beta$. \end{example} \subsection{Lipeomorphisms between circles}\label{sec:circle_lipeomorphisms} The main goal of this Subsection is to show that, if $f,g\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ are two lipeomorphisms with the same orientation, then there exists a lipeotopy $H_t\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ ($0\leq t\leq 1$) such that $H_0=f$ and $H_1=g$. By lipeotopy we mean a lipeomorphism $H\colon [0,1]\times\mathbb{S}^1\rightarrow[0,1]\times\mathbb{S}^1$ of the type $H(t,x)=(t,H_t(x))$, which is equivalent to the following: $H_t$ is as a family of lipeomorphisms with uniform constant. Possibly this result is already known, but we did not find an appropriate reference to quote, this is why we present a proof of it. Let $P\colon\mathbb{R}\rightarrow\mathbb{S}^1$ be the covering mapping $P(x)=e^{2\pi ix}$. \begin{lemma} If $\phi\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ is a positive homeomorphism such that $\phi(1)=1$, then there exists a unique positive homeomorphism $\widetilde{\phi}\colon\mathbb{R}\rightarrow\mathbb{R}$ such that $\widetilde{\phi}(0)=0$ and $P\circ\widetilde{\phi}=\phi\circ P$ (in particular, $\widetilde{\phi}(x+ n)=\widetilde{\phi}(x)+n$ $\forall n\in\mathbb{Z}$). Conversely, for each positive homeomorphism $\widetilde{\phi}\colon\mathbb{R}\rightarrow\mathbb{R}$ such that $\widetilde{\phi}(0)=0$ and $\widetilde{\phi}(x+ n)=\widetilde{\phi}(x)+n$ $\forall n\in\mathbb{Z}$, there is a unique positive homeomorphism $\phi\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ such that $\phi(1)=1$ and $P\circ\widetilde{\phi}=\phi\circ P$. Finally, $\phi$ is a lipeomorphism iff $\widetilde{\phi}$ is a lipeomorphism. \end{lemma} \begin{proof} Let $\phi\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ is a positive homeomorphism such that $\phi(1)=1$. Let $\widetilde{\phi}\colon\mathbb{R}\rightarrow\mathbb{R}$ be defined by: given $x\in\mathbb{R}$, let $\gamma\colon [0,x]\rightarrow\mathbb{S}^1$ be the path defined by $\gamma(t)=\phi(e^{2\pi i t})$ and let $\widetilde{\gamma}\colon [0,x]\rightarrow\mathbb{R}$ be the lifting of $\gamma$ by the covering mapping $P$ with $\widetilde{\gamma}(0)=0$; so, $\widetilde{\phi}(x):=\widetilde{\gamma}(x)$. By definition, we have $\widetilde{\phi}(0)=0$ and $P\circ\widetilde{\phi}=\phi\circ P$, and, since $\phi$ is positive and $\widetilde{\phi}$ is a local homeomorphism, $\widetilde{\phi}$ is an increasing homeomorphism from $\mathbb{R}$ to $\mathbb{R}$. Conversely, let $\widetilde{\phi}\colon\mathbb{R}\rightarrow\mathbb{R}$ be an increasing homeomorphism such that $\widetilde{\phi}(0)=0$ and $\widetilde{\phi}(x+ n)=\widetilde{\phi}(x)+n$ $\forall n\in\mathbb{Z}$. Then, $\phi\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ defined by $\phi(e^{2\pi ix})=e^{2\pi\widetilde{\phi}(x)}$ is a positive homeomorphism such that $\phi(1)=1$. Finally, we are going to show that $\phi$ is a lipeomorphism iff $\widetilde{\phi}$ is a lipeomorphism with the same constants. Let us consider $\mathbb{R}$ and $\mathbb{S}^1$ equipped with the standard Riemannian Metric. Thus, $P\colon\mathbb{R}\rightarrow\mathbb{S}^1$ comes as a local (on each interval of length $2\pi$) isometry, hence $\widetilde{\phi}$ lipeomorphism implies that $\phi$ is a lipeomorphism with the same constants. From the other hand, if $\phi$ is a lipeomorphism with constants $c\geq 1$ and $1/c$, we have that $\widetilde{\phi}$ is a lipeomorphism with these constants on each interval of length $2\pi$. Now, given $a<b$ in $\mathbb{R}$, we partition the interval $[a,b]$ with subintervals of length smaller than $2\pi$: $a=x_0<x_1<\cdots<x_{n-1}<x_n=b$, and: \medskip \begin{eqnarray*} |\widetilde{\phi}(b)-\widetilde{\phi}(a)| &=& \widetilde{\phi}(b)-\widetilde{\phi}(a) \\ &=& \sum_{j=1}^{n} \widetilde{\phi}(x_j)-\widetilde{\phi}(x_{j-1})\\ &\leq& \sum_{j=1}^{n} c(x_j-x_{j-1}) \\ &=& c|b-a| \end{eqnarray*} and \begin{eqnarray*} |\widetilde{\phi}(b)-\widetilde{\phi}(a)| &=& \widetilde{\phi}(b)-\widetilde{\phi}(a) \\ &=& \sum_{j=1}^{n} \widetilde{\phi}(x_j)-\widetilde{\phi}(x_{j-1})\\ &\geq& \sum_{j=1}^{n} \frac{1}{c}(x_j-x_{j-1}) \\ &=& \frac{1}{c}|b-a| \end{eqnarray*} \end{proof} Once we have the above lemma, given a positive lipeomorphim $\phi\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ such that $\phi(1)=1$, let us consider $\widetilde{H}_t\colon\mathbb{R}\rightarrow\mathbb{R}$ defined by $\widetilde{H}_t(x) = (1-t)\widetilde{\phi}(x)+tx.$ We see that $\widetilde{H}_t(0)=0$ and $\widetilde{H}_t$ is a family of positive lipeomorphisms (with uniform constant) such that. $$\widetilde{H}_t(x+n)=\widetilde{H}_t(x)+n \ \forall n\in\mathbb{Z}.$$ Then, $H_t$ given by the above lemma is a family of lipeomorphisms from $\mathbb{S}^1$ to $\mathbb{S}^1$ (with uniform constant) such that $H_0=\phi$ and $H_1= id_{\mathbb{S}^1}$ \begin{proposition}\label{prop:lipeotopy} Let $f,g\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ be two lipeomorphisms with the same orientation. Then there exists a lipeotopy $H_t\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ such that $H_0=f$ and $H_1=g$. \end{proposition} \begin{proof} We do the proof in the case $f(1)=g(1)$, i.e. $f\circ g^{-1}(1)=1$. Then, by the previous discussion, there exists a lipeotopy $H_t\colon\mathbb{S}^1\rightarrow\mathbb{S}^1$ such that $H_0=f\circ g^{-1}$ and $H_1=id_{\mathbb{S}^1}$. Finally, we get $K_t:= H_t \circ g$ give us a lipeotopy such that $K_0=f$ and $K_1=g$. \end{proof} \section{Ends of semialgebraic surfaces in $\mathbb{R}^n$} \subsection{Infinity strips}\label{sec:triangles} Let $a>0$ and $\beta\in\mathbb{Q}$; $\beta\leq 1$. Let us denote $$T_{\beta}=\{(x,y)\in\mathbb{R}^2 \ \colon \ a\leq x \ \mbox{and} \ 0\leq y\leq x^{\beta}\} .$$ Notice that, up to outer lipeomorphims, the definition of $T_{\beta}$ does not depend on the constant $a>0$. \begin{lemma}\label{lemma: lne} $T_{\beta}$ is LNE. \end{lemma} \begin{proof} Since $T_{\beta}$ is a convex subset of $\mathbb{R}^2$ in the case $\beta > 0$, we are going to prove this lemma for $\beta < 0$. Let $P,Q\in T_{\beta}$. The the length of segment $\overline{PQ}$ is exactly $|Q-P|$. So, if the segment $\overline{PQ}$ is contained in $T_{\beta}$, we have $d_{inn}(P,Q)=|Q-P|$, otherwise $\overline{PQ}$ intersects the boundary $\{(x,x^{\beta}) \ \colon \ 1\leq x \}$ of the set $T_{\beta}$ into two points $A=(a,a^{\beta})$ and $B=(b,b^{\beta})$ ($a<b$). \begin{claim} The length $l(\gamma)$ of the boundary path $\gamma\colon[a,b]\rightarrow T_{\beta}$; $\gamma(t)=(t,t^{\beta})$ is bounded by $2|B-A|$. \end{claim} In fact, once $l(\gamma)=\int_{a}^{b}|\gamma'(t)|dt$, we have \begin{eqnarray*} l(\gamma) &=& \int_{a}^{b}\sqrt{1+\beta^2t^{2\beta-2}}dt \\ &\leq & \int_{a}^{b} (1-\beta t^{\beta-1})dt \quad \mbox{(see that $\beta < 0$)} \\ &=& (b-a) - (b^{\beta}-a^{\beta}) \\ &=& (b-a) + (a^{\beta}-b^{\beta}) \\ &\leq& 2 |B-A| \end{eqnarray*} The claim is proved. \medskip Finally, once we have proved the claim, we get \begin{eqnarray*} d_{inn}(P,Q) &\leq& |A-P|+l(\gamma)+|Q-B| \\ &\leq& |A-P|+2 |B-A|+|Q-B| \\ &\leq& 2 |Q-P|. \end{eqnarray*} This finishes the proof that $T_{\beta}$ is LNE. \end{proof} \begin{definition}\label{def:triangle} Let $X\subset\mathbb{R}^n$ be a semialgebraic subset. We say that $X$ is a {\bf $\beta$-strip at infinity} if there exist a compact subset $K\subset\mathbb{R}^n$ and a germ of a semialgebraic inner lipeomorphism $F\colon X\setminus K\rightarrow T_{\beta}$. \end{definition} \begin{remark} As an immediate consequence of Example \ref{exam:contact_horn}, we have no ambiguity to define $\beta$-strip at infinity, in other words, if $T_{\beta}$ is inner lipeomorphic to $T_{\beta'}$ then $\beta=\beta'$. \end{remark} \begin{proposition}\label{prop: triangle-f} Let $f\colon[a,\infty)\rightarrow\mathbb{R}$ be a positive semialgebraic function such that $f(x)\approx x^{\beta}$ as $x\to\infty$ for some rational number $\beta\leq 1$. In this case $$X=\{(x,y)\in\mathbb{R}^2 \ \colon \ a\leq x \ \mbox{and} \ 0\leq y\leq f(x)\}$$ is a LNE $\beta$-strip at infinity. \end{proposition} \begin{proof} By assumption, we have a real number $c>0$ and $f(x)=cx^{\beta}+o_{\infty}(x^{\beta})$ where, $\displaystyle\frac{o_{\infty}(x^{\beta})}{x^{\beta}}\to 0$ as $x\to\infty$. \medskip Let $F\colon T_{\beta}\rightarrow X$ be defined by $\displaystyle F(x,y)=(x,\frac{yf(x)}{cx^{\beta}})$. It is clear that $F$ is a semialgebraic homeomorphism. The Jacobian matrix $DF(x,y)$ is bounded as we see below $$ DF(x,y)=\left(\begin{array}{ccc} 1& &0\\ & & \\ \frac{y}{cx^{2\beta}}[f'(x)x^{\beta}-f(x)x^{\beta-1}]& & \frac{f(x)}{cx^{\beta}}\\ \end{array}\right) $$ and, also, its determinant is bounded and away from zero as $x\to \infty$. This proves that $F$ is an outer lipeomorphim which give us that $X$ is LNE and a $\beta$-strip at infinity. \end{proof} Consider the following semialgebraic arcs on the boundary of $T_{\beta}$ $$\gamma_1=\{(x,y)\in T_{\beta} \ \colon \ y=x^{\beta} \} \quad \mbox{and} \quad \gamma_2=\{(x,y)\in T_{\beta} \ \colon \ y=0 \}.$$ In the case of a $\beta$-strip at infinity $X$, its {\it boundary arcs at infinity} are $F(\gamma_1)$ and $F(\gamma_2)$ where $F \colon X\setminus K\rightarrow T_{\beta}$ is any semialgebraic inner lipeomorphism and $K$ is a compact subset of $X$. \begin{lemma}\label{lemma:glue_2_triangles} Let $X_i$ be a $\beta_i$-strip at infinity, $i=1,2$. If $X_1\cap X_2$ is a common boundary arc to $X_1$ and $X_2$, then $X_1\cup X_2$ is a $\beta$-strip at infinity with $\beta=\max\{\beta_1,\beta_2\}$. \end{lemma} \begin{proof} Let $F_1\colon X_1\rightarrow T_{\beta_1}$ and $F_2\colon X_2\rightarrow T_{\beta_2}^*$ be semialgebraic inner lipeomorphisms; where $T_{\beta}^*=\{(x,y)\in\mathbb{R}^2 \ \colon \ (x,-y)\in T_{\beta}\}$. For each $x\geq 1$, let us denote $r_i(x)=|F_i^{-1}(x,0)|$. We see that $r_i$ is a semialgebraic outer lipeomorphism function; and $R_i(x,y)=(r_i(x),y)$ give us a semialgebraic outer lipeomorphism ($i=1,2$). Then, we define $$ F(z)= \begin{cases} R_1(F_1(z)) ,& \mbox{if} \ z\in X_1 \\ R_2(F_2(z)) ,& \mbox{if} \ z\in X_2 \end{cases} $$ Since $R_1(F_1(z))=R_2(F_2(z))$ for all $z\in X_1\cap X_2$, we have $F$ is a continuous and semialgebraic mapping. Now, we are going to show that $F$ is an inner Lipschitz mapping. In fact, we know that $F_|X_1$ and $F_|X_2$ are inner lipeomorphism, then there exists a constant $c$ such that $d_{inn}(F(z_1),F(z_2))\leq c d_{inn}(z_1,z_2)$ if $z_1,z_2\in X_i$ ($i=1,2$). Thus, let us consider the case $z_1\in X_1$ and $z_2\in X_2$. Let $\gamma$ be a path on $X_1\cup X_2$ connecting $z_1$ to $z_2$ such that $d_{inn}(z_1,z_2)=l(\gamma)$. Then, we can write $\gamma=\gamma_1 * \cdots *\gamma_r$ in such a way that each $\gamma_j$ is a path on $X_1$ or $X_2$. Let us denote by $a_j$ the initial point and $b_j$ the final point of $\gamma_j$. Thus, \begin{eqnarray*} d_{inn}(F(z_1),F(z_2)) &\leq& \sum d_{inn}(F(b_j),F(a_j)) \\ &\leq& \sum cd_{inn}(b_j,a_j) \\ &\leq& c\sum l(\gamma_j) \\ &=& cl(\gamma) \\ &=& cd_{inn}(z_1,z_2) \end{eqnarray*} This proves that $F$ is an inner Lipschitz mapping. So, we can use similar arguments to show that $F^{-1}$ is also an inner Lipschitz mapping. \medskip Finally, we are going to show that the image of $F$ is a $\beta$-strip at infinity. Since $x\mapsto r_i(x)$ is an outer lipeomorphim ($i=1,2$), we have the image $F(X_1\cup X_2)=R_1(T_{\beta})\cup R_2(T_{\beta}^*)$ is the following subset of $\mathbb{R}^2$ $$I=\{(x,y)\in\mathbb{R}^2 \ \colon \ x\geq 1 \ \mbox{and} \ -f_2(x)\leq y\leq f_1(x)\}$$ where $f_1,f_2\colon[1,\infty)\rightarrow\mathbb{R} $ are semialgebraic positive functions such that $$ f_i(x) \approx x^{\beta_i} \quad \mbox{as} \quad x\to\infty \quad i=1,2.$$ Then, we see the mapping $(x,y)\mapsto (x,y+f_2(x))$ gives a semialgebraic outer lipeomorphism between the image $I$ and the set below $$ J=\{(x,y)\in\mathbb{R}^2 \ \colon \ x\geq 1 \ \mbox{and} \ 0\leq y\leq f_1(x)+f_2(x)\}.$$ Thus, since $[f_1(x)+f_2(x)]\approx x^{\beta}$ as $x\to \infty$ ($\beta=\max\{\beta_1,\beta_2\}$), by Proposition \ref{prop: triangle-f}, it follows that $J$ is a $\beta$-strip at infinity. \end{proof} \begin{proposition}[Gluing of Strips]\label{lemma:gluing_strips} Let $X_1,\dots,X_r$ be semialgebraic subsets of $\mathbb{R}^n$ such that: \begin{enumerate} \item[a)] $X_i$ is a $\beta_i$-strip at infinity, $i=1,\dots,r$. \item[b)] $X_i\cap X_{i+1}$ is a common boundary arc to $X_i$ and $X_{i+1}$, $i=1,\dots,r-1$. \item[c)] $X_i\cap X_j=\emptyset$ if $|i-j|>1$. \end{enumerate} In this case, $X_1\cup\cdots\cup X_r$ is a $\beta$-strip at infinity, where $\beta=\max\{\beta_1,\dots,\beta_r\}$. \end{proposition} \begin{proof} It is imediate consequence from Lemma \ref{lemma:glue_2_triangles}. \end{proof} As an immediate consequence of the proof of Lemma \ref{lemma:glue_2_triangles}, we can state the following lemma. \begin{lemma}[Parametrization Lemma]\label{lemma:parametrization} Let $X$ be a $\beta$-strip at infinity with boundary arcs $\gamma_1$ and $\gamma_2$. Then, there exist a compact subset $K\subset X$ and a semialgebraic inner lipeomorphism $F\colon X\setminus K\rightarrow T_{\beta}$ such that; - For $z\in\gamma_1\setminus K$, $F(z)=(|z|,|z|^{\beta})$, . - For $z\in\gamma_2\setminus K$, $F(z)=(|z|,0)$. \end{lemma} \subsection{Tubes}\label{sec:tubes} Given a rational number $\beta \leq 1$, let us denote $$P_{\beta}=\{(x,y,z)\in\mathbb{R}^3 \ \colon \ x^2+y^2=z^{2\beta} \ \mbox{and} \ z\geq a\}$$ where $a>0$. It is important to mention that, up to outer lipeomorphims, the definition of $P_{\beta}$ does not depend on $a$. \begin{definition}\label{def:tube} Let $X\subset\mathbb{R}^n$ be a semialgebraic subset. We say that $X$ is a {\bf $\beta$-tube} if there exist a compact subset $K\subset\mathbb{R}^n$ and a germ of a semialgebraic inner lipeomorphism $F\colon X\setminus K\rightarrow P_{\beta}$. \end{definition} \begin{remark} We have no ambiguity to define $\beta$-tube, in other words, if $P_{\beta}$ is inner lipeomorphic to $P_{\beta'}$ then $\beta=\beta'$. \end{remark} \begin{proposition}\label{prop:glue-tube} Let $X_1,\dots X_r$ be semialgebraic subsets of $\mathbb{R}^n$ such that: \begin{enumerate} \item[a)] $X_i$ is a $\beta_i$-strip at infinity, $i=1,\dots,r$. \item[b)] if $r=2$, then $X_1\cap X_2$ is the union of the boundary arcs of $X_1$ and $X_2$; \item[c)] if $r>2$, then $X_i\cap X_{i+1}$ is a common boundary arc to $X_i$ and $X_{i+1}$, $i=1,\dots,r$ (here, $X_{r+1}:=X_1$) and $X_i\cap X_j=\emptyset$ if $1<|i-j|<r$. \end{enumerate} In this case, $X_1\cup\cdots\cup X_r$ is a $\beta$-tube, where $\beta=\max\{\beta_1,\dots,\beta_r\}$. \end{proposition} \begin{proof} Without loss of generality, one may assume $\beta_1 = \beta$. Let us write $X_1$ as a union of two other $\beta$-strips at infinity $X_{1,1}$ and $X_{1,2}$ such that $X_{1,1}\cap X_{1,2}$ is a common boundary arc to $X_{1,1}$ and $X_{1,2}$. On may suppose that $X_{1,2}$ shares a boundary arc with $X_2$ and $X_{1,1}$ shares a boundary arc with $X_r$. So, the family $X_{1,2},X_2,\dots,X_r$ satisfies the conditions of Gluing of Strips in Proposition \ref{lemma:gluing_strips}, hence $Y_2=X_{1,2}\cup X_2\cdots\cap X_r$ is a $\beta$-strip at infinity. Then, $X$ is the union of two $\beta$-strip at infinity $Y_1=X_{1,1}$ and $Y_2$ ($Y_2$ defined above), such that $Y_1\cap Y_2$ is the union of the boundary arcs of $Y_1$ and $Y_2$. Let us consider the following decomposition of $P_{\beta}$: $$P_{\beta}^1=\{(x,y,z)\in P_{\beta} \ \colon \ x\geq 0\} \ \mbox{and} \ P_{\beta}^2=\{(x,y,z)\in P_{\beta} \ \colon \ x\leq 0\}.$$ We see that $P_{\beta}^1$ and $P_{\beta}^2$ are $\beta$-strip at infinity, $P_{\beta}=P_{\beta}^1\cup P_{\beta}^2$, and $P_{\beta}^1\cap P_{\beta}^2$ is the union of the boundary arcs of $P_{\beta}$. Since $P_{\beta}^1$ and $P_{\beta}^2$ are $\beta$-strip at infinity, we have a compact subset of $\mathbb{R}^n$ and semialgebraic inner lipeomorphism $$ F_1\colon Y_1\setminus K\rightarrow P_{\beta}^1 \quad \mbox{and} \quad F_2\colon Y_2\setminus K\rightarrow P_{\beta}^2 $$ such that $|F(z)|=|z|$ for any $z$ belonging to the boundary arcs of $Y_i$, $i=1,2$ (see Parametrization Lemma \ref{lemma:parametrization}). Finally, the mapping $F\colon X\setminus K\rightarrow P_{\beta}$ defined by $$ F(z)= \begin{cases} F_1(z) ,& \mbox{if} \ z\in Y_1\setminus K \\ F_2(z) ,& \mbox{if} \ z\in Y_2\setminus K \end{cases} $$ is a semialgebraic inner lipeomorphism. \end{proof} \begin{theorem}\label{thm:beta_ends} Let $S\subset\mathbb{R}^n$ be a semialgebraic surface with isolated inner Lipschitz singularities. For each end of $S$, let us say $S_i$, there is a unique rational $\beta_i\leq 1$ such that $S_i$ is a $\beta_i$-tube. \end{theorem} In order to proof this theorem, we need to recall the notion of $L$-regular sets. Such subsets of $\mathbb{R}^n$ are defined by induction on $n$ (see \cite{KP}). Given $x\in\mathbb{R}^n$, let us write $x=(x',x_n)\in\mathbb{R}^{n-1}\times\mathbb{R}$. A semialgebraic subset $X\subset\mathbb{R}^n$ is called a {\it standard L-regular cell} in $\mathbb{R}^n$, with constant $C>0$ if: $X=\{0\}$ for $n=0$, and for $n>0$ the set $X$ is of one of the following types: \medskip \noindent{(\it graph)} $$X=\{(x',x_n)\in\mathbb{R}^{n-1}\times\mathbb{R} \ \colon \ x_n=h(x'); \ x'\in X'\}$$ \medskip \noindent{(\it band)} $$X=\{(x',x_n)\in\mathbb{R}^{n-1}\times\mathbb{R} \ \colon \ f(x')<x_n<g(x'); \ x'\in X'\}$$ \noindent where $X'\subset\mathbb{R}^{n-1}$ is a standard L-regular cell in $\mathbb{R}^{n-1}$ with constant $C$, $f,g,h\colon X'\rightarrow\mathbb{R}$ are $C^1$ semialgebraic functions such that $$f(x')<g(x') \quad \forall \ x'\in X'$$ and $$ |df(x')|\leq C, \ |dg(x')|\leq C\ \mbox{and} \ \ |dh(x')|\leq C, \quad \forall \ x'\in X'.$$ In general, a semialgebraic subset $Z\subset\mathbb{R}^n$ is called a {\it L-regular cell} in $\mathbb{R}^n$, with constant $C>0$, if there exists an orthogonal change of variables $\Psi\colon\mathbb{R}^n\rightarrow\mathbb{R}^n$ such that $\Psi(Z)$ is a standard L-regular cell in $\mathbb{R}^n$ with constant $C$. \begin{proposition}\label{prop:regular-cell} Let $X\subset\mathbb{R}^n$ be a 2-dimensional L-regular cell in $\mathbb{R}^n$ (with constant $C>0$). If $X$ is unbounded and has only one end, then $\overline{X}$ is a $\beta$-strip at infinity for some rational number $\beta\leq 1$. \end{proposition} \begin{proof} It is enough to assume $X$ is a standard L-regular cell in $\mathbb{R}^n$. This proof is by induction on $n$. Since $X$ is 2-dimensional, we have $n\geq 2$. \medskip \noindent{\it Case $n=2$}. In this case, necessarily $X$ is a band, let us say $$X=\{(x_1,x_2)\in\mathbb{R}\times\mathbb{R} \ \colon \ f(x_1)<x_2<g(x_1), \ x_1\in X'\}$$ where $X'$ is an open interval in $\mathbb{R}$. Since $X$ is unbounded and $|df(x_1)|\leq C$ and $|dg(x_1)|\leq C$ for all $x_1\in X'$, we get $X'$ is also unbounded and has only one end. Let us suppose $X=(a,\infty)$. Thus, the closure $\overline{X}$ is outer lipeomorphic to the set $$\{(x_1,x_2)\in\mathbb{R}\times\mathbb{R} \ \colon \ 0\leq x_2 \leq g(x_1)-f(x_1), \ x_1\geq a\}$$ which is a $\beta$-strip at infinity, for some rational number $\beta\leq 1$, according to Proposition \ref{prop: triangle-f}. \medskip \noindent{\it Case $n>2$}. In this case, $X$ can be either a graph or a band. First, let $X$ be a graph. Since $X$ is a graph of an outer Lipschitz function on a $\beta$-strip at infinity, we get $X$ itself is a $\beta$-strip at infinity ($\beta\leq 1$). Now, let us consider $X$ is a band $$X=\{(x',x_n)\in\mathbb{R}^{n-1}\times\mathbb{R} \ \colon \ f(x')<x_n<g(x'); \ x'\in X'\}$$ \noindent where $X'\subset\mathbb{R}^{n-1}$ is a (1-dimensional) standard L-regular cell in $\mathbb{R}^{n-1}$ with constant $C$, $f,g\colon X'\rightarrow\mathbb{R}$ are $C^1$ semialgebraic functions such that $$f(x')<g(x') \quad \forall \ x'\in X'$$ and $$ |df(x')|\leq C, \ |dg(x')|\leq C \quad \forall \ x'\in X'.$$ Since $X'$ is 1-dimensional, unbounded and has only one end, there exists a $C^1$-semialgebraic parametrization (outer lipeomorphism) $\gamma\colon(a,\infty)\rightarrow X'$; hence the closure $\overline{X}$ is semialgebraicly outer lipeomorphic to the set $$\{(t,s)\in\mathbb{R}\times\mathbb{R} \ \colon \ f\circ\gamma(t)\leq s \leq g\circ\gamma(t), \ t\geq a\}$$ which implies $\overline{X}$ is a $\beta$-strip at infinity for some rational number $\beta\leq 1$. \end{proof} It is proved in \cite{KP}, more precisely, see Proposition 1.4 in \cite{KP} that any semialgebraic subset $X\subset\mathbb{R}^n$ can be stratified by L-regular cells in $\mathbb{R}^n$ with a constant $C=c(n)>0$. Now, we are ready to prove Theorem \ref{thm:beta_ends}. \begin{proof}[Proof of Theorem \ref{thm:beta_ends}] Let $R>0$ be a sufficient large radius such that the connected components of $S\setminus B(0,R)$ are the ends of $S$. Let $X=S_i$ be one of those ends. As we mentioned above, we have a stratification $\displaystyle X=\bigcup_{i=1}^r C_i$ such that each stratum $C_i$ is a L-regular cell in $\mathbb{R}^n$ with constant $C=c(n)>0$. By taking $R>0$ large enough, one may suppose that all 2-dimensional strata of $X$ are unbounded (hence all of them have only one end). Then, let $C_{i_1},\dots,C_{i_k}$ be the 2-dimensional strata of $X$. It follows from Proposition \ref{prop:regular-cell} that the closure $\overline{C_{i_j}}$ of each cell $C_{i_j}$ is a $\beta_j$-strip at infinity for some rational number $\beta_j\leq 1$. Since $$X=\bigcup_{j=1}^k\overline{C_{i_j}}$$ and, by topological restrictions, the family $\overline{C_{i_1}},\dots,\overline{C_{i_k}}$ satisfies the assumptions of Proposition \ref{prop:glue-tube}, we get $X$ is a $\beta$-tube where $\beta=\max\{\beta_1,\dots,\beta_k\}$. \end{proof} \section{Classification of semialgebraic surfaces }\label{sec:classification} In this Section, we are going to present a classification of all semialgebraic surfaces with isolated singularities. \begin{remark}\label{rem:horn_exponent} Let $X\subset\mathbb{R}^n$ be a closed 2-dimensional semialgebraic set. According to the notion of topological regular points, we can read the Theorem \ref{thm:birbrair} (Theorem of Birbrair) in the following way: if $p\in X$ is a topological regular point, then there exist a neighborhood $V\subset X$ and a semialgebraic inner lipeomorphism $\phi\colon V\rightarrow H_{\beta}$; $\phi(p)=(0,0,0)$, where $\beta\geq 1$ is a rational number and $$H_{\beta}=\{(x,y,z)\in\mathbb{R}^3 \ \colon \ x^2+y^2=z^{2\beta} \ \mbox{and} \ z\geq 0\}.$$ The rational number $\beta$ is called the {\bf horn exponent} of $X$ at $p$. Notice that, it also follows from Theorem of Birbrair that a point $p\in X$ is inner Lipschitz regular if, and only if, the horn exponent of $X$ at $p$ is equal to $1$. \end{remark} \begin{definition}\label{def:symbols_code} Let $X\subset\mathbb{R}^n$ be a semialgebraic surface with isolated inner Lipschitz singularities. Let us consider the following symbols: \begin{enumerate} \item [i)] For $p\in {\rm Sing}_{inLip}(X) $, $\ell(X,p)$ denotes the number of connected components of the link of $X$ at $p$; \item [ii)] We can consider a sufficient large radius $R>0$ and a small enough radius $\rho >0$ such that $$ X'=(X\cap \overline{B(0,R)})\setminus \bigg\{ B(x_1,\rho)\cup\cdots\cup B(x_s,\rho) \bigg\} $$ is a topological surface with boundary and its topological type does not depend on $R$ and $\rho$. Thus, we define $$\theta(X)= \begin{cases} \ \ 1, \ \mbox{if} \ X' \ \mbox{is orientable} \\ -1, \ \mbox{if} \ X' \ \mbox{is not orientable}. \end{cases} $$ \item[iii)] $g(X)$ is the genus of $X'$; \item [iv)] For each $p\in X $, there is $r>0$ such that $$X\cap B(p,r)=\bigcup\limits_{i=1}^{\ell(X,p)}X_i$$ and each $X_i$ is a topological surface. Let $\beta_i$ be the horn exponent of $X_i$ at $p$ (see Remark \ref{rem:horn_exponent}). By reordering the indices, if necessary, we assume that $\beta_1\leq \beta_2\leq \cdots \leq \beta_{\ell(X,p)}$. In this way, we define $\beta(X,p)=(\beta_1, \beta_2, \cdots, \beta_{\ell(X,p)})$. \item[v)] $e(X)$ is the number of ends of $X$, and if $E_1,\dots,E_{e(X)}$ are the ends of $X$, then denote by $\beta_i$, the tube exponent of $E_i$, the only rational number smaller than or equal to 1 such that $E_i$ is a $\beta_i$-tube. By reordering the indices, if necessary, we assume that $\beta_1\leq \beta_2\leq \cdots \leq \beta_{e(X)}$. In this way, we define $\beta(X,\infty)=(\beta_1, \beta_2,...,\beta_{e(X)})$. \end{enumerate} \end{definition} \begin{definition}[Inner Lipschitz code]\label{def:inner_code} Let $X\subset\mathbb{R}^n$ be a semialgebraic surface with isolated inner Lipschitz singularities. Let $S=\{p_1,...,p_k\}\subset X$ be a finite subset such that ${\rm Sing}_{inLip}(X)\subset S$ and let $\sigma\colon S\to \tilde S$ be a bijection for some subset $\tilde S$ in some Euclidean space. \begin{itemize} \item If ${\rm Reg}_{inLip}(X)$ is a connected set, then the collection of symbols $$\bigg\{ \theta(X),g(X),\beta(X,\infty),\{(\sigma(p);\beta(X,p))\}_{p\in S} \bigg\}$$ is called the {\bf inner Lipschitz code of $X$ w.r.t. $\sigma$} and we denote it by ${\rm Code}_{inLip}(X,\sigma)$. The collection of symbols $$\bigg\{ \theta(X),g(X),\beta(X,\infty),\{\beta(X,p)\}_{p\in S} \bigg\}$$ is called the {\bf inner Lipschitz code of $X$} and we denote it by ${\rm Code}_{inLip}(X)$; \item For the general case, let $C_1,...,C_r$ be the closure of the connected components of ${\rm Reg}_{inLip}(X)$. The collection of inner Lipschitz codes $$\bigg\{ {\rm Code}_{inLip}(C_1,\sigma|_{C_1\cap S}),\cdots, {\rm Code}_{inLip}(C_r,\sigma|_{C_r\cap S}) \bigg\}$$ is called the {\bf inner Lipschitz code of $X$ w.r.t. $\sigma$} and we also denote it by ${\rm Code}_{inLip}(X,\sigma)$. When $S={\rm Sing}_{inLip}(X)$ and $\sigma$ is the identity, we only denote ${\rm Code}_{inLip}(X,\sigma)$ by ${\rm Code}_{inLip}(X)$ and we also call it the {\bf inner Lipschitz code of $X$}. \end{itemize} \end{definition} \begin{example} Let us see the inner Lipschitz code of some well-known semialgebraic topological surfaces. \begin{enumerate} \item[a)] Right cylinder: $\{1,0,(0,0),\emptyset\}$; \item[b)] Unbounded Moebius band $\{(x,y,u,v)\in \mathbb{R}^4: \ x^2+y^2=1, \ (u^2-v^2)y=2uv x \}$: $\{-1,0,1,\emptyset\}$; \item[c)] Global $\beta$-horn in $\mathbb{R}^3$; $\beta\geq 1$: $\{1,0,1,\{\beta\}\}$; \item[d)] $\{(z,w)\in\mathbb{C}^2 \ \colon \ z^2=w(w-a)(w-b)\}$; $a,b\neq 0$ and $a\neq b$: $\{1,1,(1,1,1),\emptyset \}$; \item[e)] Paraboloid in $\mathbb{R}^3$: $\{1,0,1/2,\emptyset\}$ \item[f)] Torus: $\{1,1,\emptyset,\emptyset\}$ \item[g)] Klein bottle: $\{-1,1,\emptyset,\emptyset\}$ \item[h)] Edge of two spheres $\{(x,y,z\in\mathbb{R}^3;((x-1)^2+y^2+z^2-1)((x+1)^2+y^2+z^2-1)=0\}$ : $\{\{1,0,\emptyset,\{((0,0,0);1)\}, \{1,0,\emptyset,\{((0,0,0);1)\}\}$ \item[i)] Cayley surface $\{(x,y,z)\in\mathbb{R}^3;x^2+y^2+z^2-2xyz=1\}$ (see Figure 1): $\{\{1,0,1,(p_1;1)\}$, $\{1,0,1,(p_2;1)\}$, $\{1,0,1,(p_3;1)\}$, $\{1,0,1,(p_4;1)\}$, $\{1,0,\emptyset,$ $\{(p_1;1),(p_2;1),(p_3;1),(p_4;1)\}\}\}$. \end{enumerate} \end{example} \begin{figure}[H] \centering \includegraphics[scale=0.04]{cayley_surface.png} \caption{Decomposition of the Cayley surface $\{(x,y,z)\in\mathbb{R}^3;x^2+y^2+z^2-2xyz=1\}$.}\label{figure1} \end{figure} \begin{definition} Let $X$ and $Y$ be two semialgebraic sets. We say that ${\rm Code}_{inLip}(X)$ and ${\rm Code}_{inLip}(Y)$ are equivalent if one of the following items holds true: \begin{enumerate} \item ${\rm Reg}_{inLip}(X)$ and ${\rm Reg}_{inLip}(Y)$ are connected sets and ${\rm Code}_{inLip}(X)={\rm Code}_{inLip}(Y)$; \item ${\rm Reg}_{inLip}(X)$ and ${\rm Reg}_{inLip}(Y)$ are disconnected sets and ${\rm Code}_{inLip}(X,\sigma)={\rm Code}_{inLip}(Y)$ for some bijection $\sigma\colon {\rm Sing}_{inLip}(X)\to {\rm Sing}_{inLip}(Y)$. \end{enumerate} \end{definition} \begin{theorem}\label{thm:class_surf} Let $X\subset\mathbb{R}^n$ and $Y\subset\mathbb{R}^m$ be semialgebraic surfaces with isolated inner Lipschitz singularities. Then, $X$ and $Y$ are inner lipeomorphic if, and only if, their inner Lipschitz code are equivalent. \end{theorem} \begin{proof} Of course, we have the inner Lipschitz code is an inner Lipschitz invariant in the sense: if $X$ and $Y$ are inner lipeomorphic, then their codes are equivalent. From another hand, let us suppose that the inner Lipschitz codes of $X$ and $Y$ are equivalent. Let us assume, initially, that ${\rm Reg}_{inLip}(X)$ (and, consequently, ${\rm Reg}_{inLip}(Y)$) is a connected set. Let us denote by $E_1^X,\dots,E_{e}^X$ the ends of $X$, with respective tube exponents $\beta_1(X)\leq \dots\leq\beta_{e}(X)$, and $E_1^Y,\dots,E_{e}^Y$ the ends of $Y$, with respective tube exponents $\beta_1(Y)\leq \dots\leq\beta_{e}(Y)$. Also, let us denote by $x_1,\dots,x_s$ the inner Lipschitz singularities of $X$, with respective horn exponents $\beta(X,x_1),\dots,\beta(X,x_s)$, and $y_1,\dots,y_s$ the inner Lipschitz singularities of $Y$, with respective horn exponents $\beta(Y,y_1),\dots,\beta(Y,y_s)$. So, we are assuming that $\theta(X)=\theta(Y)$, $g(X)=g(Y)$, $\beta(X,\infty)=\beta(Y,\infty)$ and $\beta(X,x_j)=\beta(Y,y_j)$, $j=1,\dots,s$. Then, we can consider $R>0$ a sufficient large radius and $\rho >0$ a small enough radius such that, for each $i\in \{1,\dots,e\}$ and $j\in \{1,\dots,s\}$, there exist semialgebraic inner lipeomorphisms $$ h_i \colon E_i^X\setminus B(0,R)\rightarrow E_i^Y\setminus B(0,R)\quad \mbox{and}\quad g_{j}\colon X\cap \overline{B(x_j,\rho)}\to Y\cap \overline{B(y_j,\rho)}. $$ In fact, the existence of the $h_i$'s follows from Theorem \ref{thm:beta_ends}, and by writing $X\cap \overline{B(x_j,\rho)}=\bigcup\limits_{\ell=1}^{\ell(X,x_j)}X_{j\ell}$ (resp. $Y\cap \overline{B(y_j,\rho)}=\bigcup\limits_{\ell=1}^{\ell(Y,y_j)}Y_{j\ell}$) and $X_{j\ell}\cap X_{j\ell'}=\{x_j\}$ (resp. $Y_{j\ell}\cap Y_{j\ell'}=\{y_j\}$) whenever $\ell\not =\ell'$, by Theorem of Birbrair, there are inner lipeomorphims $g_{j\ell}\colon X_{j\ell}\to Y_{j\ell}$. So, we define $g_{j}\colon X\cap \overline{B(x_j,\rho)}\to Y\cap \overline{B(y_j,\rho)}$ by $g_{j}(z)=g_{j\ell}(z)$ whenever $z\in X_{j\ell}$. Now, we consider the following Lipschitz surfaces with boundary $$X'=(X\cap \overline{B(0,R)})\setminus \bigg\{ B(x_1,\rho)\cup\cdots\cup B(x_s,\rho) \bigg\}$$ and $$Y'=(Y\cap \overline{B(0,R)})\setminus \bigg\{ B(y_1,\rho)\cup\cdots\cup B(y_s,\rho) \bigg\},$$ and the following semialgebraic lipeomorphism $\kappa\colon\partial X'\rightarrow\partial Y'$ given by: $$\kappa(z)= \begin{cases} h_i(z), \ \mbox{if} \ z\in E_i^X; \ |z|=R \\ g_{j}(z), \ \mbox{if} \ z\in X; \ |z-x_j|=\rho \end{cases}.$$ Since $X'$ is orientable if, and only if, $Y'$ is orientable too, and $X'$ has the same genus and same number of boundary components as $Y'$, it follows from Proposition \ref{prop:lipeotopy}, maybe after changing the orientation of some $h_i$'s and $g_{j\ell}$'s, the following result. \begin{lemma}\label{lemma:extension} There exists a lipeomorphism $\Phi\colon X'\rightarrow Y'$ that extends $\kappa\colon\partial X'\rightarrow\partial Y'$ \end{lemma} Finally, the mapping $F\colon X\rightarrow Y$ defined below is an inner lipeomorphism: $$F(z)= \begin{cases} h_i(z), \ \mbox{if} \ z\in E_i^X; \ |z|\geq R \\ g_j(z), \ \mbox{if} \ z\in X; \ |z-x_j|\leq \rho \\ \Phi(z), \mbox{if} \ z\in X' \end{cases},$$ which finishes the proof in this case. Now, we have to consider the case that ${\rm Reg}_{inLip}(X)$ and ${\rm Reg}_{inLip}(Y)$ are disconnected sets. For this case, let $X_1,...,X_r$ (resp. $Y_1,...,Y_r$) be the closure of the connected components of ${\rm Reg}_{inLip}(X)$ (resp. ${\rm Reg}_{inLip}(X)$). We have assumed that ${\rm Code}_{inLip}(X)$ and ${\rm Code}_{inLip}(Y)$ are equivalent, then ${\rm Code}_{inLip}(X,\sigma)={\rm Code}_{inLip}(Y)$ for some bijection $\sigma\colon S={\rm Sing}_{inLip}(X)\to \tilde S={\rm Sing}_{inLip}(Y)$. By reordering the indices, if necessary, we may assume that ${\rm Code}_{inLip}(X_i,\sigma|_{X_i\cap S})={\rm Code}_{inLip}(Y_i, id_{\tilde S}|_{Y_i\cap \tilde S})$, $i=1,...,r$, where $id_{\tilde S}\colon \tilde S\to \tilde S$ is the identity mapping. For a closed semialgebraic set $A$ and $p\in A$, we have that $p\in {\rm Reg}_{inLip}(A)$ if and only if $\beta(A,p)=1$. Thus, fixed $i\in \{1,...,r\}$, for $S_i=(X_i\cap S)\setminus {\rm Sing}_{inLip}(X_i)$ and $\tilde S_i=(Y_i\cap \tilde S)\setminus {\rm Sing}_{inLip}(Y_i)$, we have $\sigma(S_i)=\tilde S_i$, and therefore ${\rm Code}_{inLip}(X_i)={\rm Code}_{inLip}(Y_i)$. By the first part of this proof, there is an inner lipeomorphism $F_i\colon X_i\to Y_i$. Moreover, we can take $F_i$ satisfying $F_i(p)=\sigma(p)$ for all $p\in X_i\cap S$. Thus, the mapping $F\colon X\rightarrow Y$, defined by $F(z)= F_i(z)$ whenever $z\in X_i$, is an inner lipeomorphism, which finishes the proof. \end{proof} From now on, we start to list some consequences of Theorem \ref{thm:class_surf} and its proof. The first consequence is a classification of the Nash surfaces, even for unbounded Nash surfaces as in Figure \ref{figure2}. \begin{figure}[H] \centering \includegraphics[scale=0.5]{nash_surface.png} \caption{An oriented Nash surface with 5 ends and genus 4.}\label{figure2} \end{figure} \begin{corollary}\label{cor:classf_nash_surfaces} Let $N_1,N_2\subset \mathbb{R}^n$ be two Nash surfaces. Then, the following statements are equivalent: \begin{itemize} \item [(1)] $N_1$ and $N_2$ are homeomorphic and $\beta(N_1,\infty)=\beta(N_2,\infty)$; \item [(2)] $N_1$ and $N_2$ are inner lipeomorphic; \item [(3)] $\theta(N_1)=\theta(N_2)$, $g(N_1)=g(N_2)$ and $\beta(N_1,\infty)=\beta(N_2,\infty)$. \end{itemize} \end{corollary} \begin{remark} Since properly embedded smooth surfaces in $\mathbb{R}^3$ are orientable, we obtain that two Nash surfaces $N_1$ and $N_2$ in $\mathbb{R}^3$ are inner lipeomorphic if and only if $g(N_1)=g(N_2)$ and $\beta(N_1,\infty)=\beta(N_2,\infty)$. \end{remark} In fact, we obtain a stronger result than Corollary \ref{cor:classf_nash_surfaces}, since we can present normal forms for the classification presented in \ref{cor:classf_nash_surfaces}. In order to that, for $\theta \in \{-1,1\}$ and $g\in \mathbb{N}$, let $N(\theta, g)\subset \mathbb{R}^5$ be a compact Nash surface such that $\theta(N(\theta, g))=\theta$ and $g(N(\theta, g))=g$. For a positive integer number $e$ and $\beta=(\beta_1,...,\beta_e)\in \mathbb{Q}$ such that $\beta_1\leq \beta_2\leq ....\leq \beta_e\leq 1$, we remove $e$ distinct points of $N(\theta, g)$, let us say $x_1,...,x_e\in N(\theta, g)$, and we define $F\colon N(\theta, g)\setminus \{x_1,...,x_e\}\to \mathbb{R}^{6e}$ given by $$ F(x)=(\frac{x-x_1}{\|x-x_1\|^{1+\beta_1}},\|x-x_1\|^{-1},\frac{x-x_2}{\|x-x_2\|^{1+\beta_2}},\|x-x_2\|^{-1}, ..., \frac{x-x_e}{\|x-x_e\|^{1+\beta_e}},\|x-x_e\|^{-1}). $$ We denote the image of $F$, which is a Nash surface, by $N(\theta, g,\beta)$. We also define $N(\theta, g,\emptyset)=N(\theta, g)$. Note that $\theta(N(\theta, g,\beta))=\theta$, $g(N(\theta, g,\beta))=g$ and $\beta(N(\theta, g,\beta),\infty)=\beta$. Thus, $N(\theta, g,\beta)$ is well defined up to inner lipeomorphisms, and we obtain the following: \begin{corollary}\label{cor:normal_forms_classf_nash_surfaces} Let $N\subset \mathbb{R}^n$ be a Nash surface. Then, $N(\theta(N),g(N),\beta(N,\infty))$ and $N$ are inner lipeomorphic. \end{corollary} \begin{corollary} Let $M_1,M_2\subset \mathbb{R}^3$ be two connected properly embedded minimal surfaces with finite total curvature. Then, the following statements are equivalent: \begin{itemize} \item [(1)] $M_1$ and $M_2$ are homeomorphic; \item [(2)] $M_1$ and $M_2$ are inner lipeomorphic; \item [(3)] $g(M_1)=g(M_2)$ and $e(M_1)=e(M_2)$. \end{itemize} \end{corollary} \begin{proof} Obviously, (2) implies (1), and (1) implies (3). Thus, we only have to show that (3) implies (2). Let us assume $g(M_1)=g(M_2)$ and $e(M_1)=e(M_2)$. Since the tangent cone at infinity of each end of a properly embedded minimal surfaces with finite total curvature is a plane, it follows from, for example, Lemma 1 in \cite{BelenkiiB:2005} that such an end is inner lipeomorphic to $\mathbb{R}^2$. By proof of Theorem \ref{thm:class_surf}, $M_1$ and $M_2$ are inner lipeomorphic. \end{proof} \begin{remark}\label{non-degen-tubes} Let $X\subset \mathbb{R}^n$ be a closed semialgebraic surface which is a $\beta$-tube. Then, $\beta=1$ if and only if $\dim C(X,\infty)=2$. \end{remark} \begin{corollary} Let $C_1,C_2\subset \mathbb{C}^2$ be two complex algebraic curves. Then, the following statements are equivalent: \begin{itemize} \item [(1)] $C_1$ and $C_2$ are homeomorphic; \item [(2)] $C_1$ and $C_2$ are inner lipeomorphic; \item [(3)] If $X_1,...,X_r$ and $Y_1,...,Y_s$ are the irreducible components of $C_1$ and $C_2$, respectively, then there exist bijections $\pi\colon \{1,...r\}\to \{1,...,s\}$ and $\sigma\colon {\rm Sing}_{inLip}(C_1)\to {\rm Sing}_{inLip}(C_2)$ such that $g(X_i)=g(Y_{\pi(i)})$, $e(X_i)=e(Y_{\pi(i)})$ and $\ell(X_i,p)=\ell(Y_{\pi(i)},\sigma(p))$ for all $p\in {\rm Sing}_{inLip}(C_1)$, $i=1,...,r$. \end{itemize} \end{corollary} \begin{proof} Obviously, (2) implies (1). We are going to show that (1) implies (2). Assume that there is a homeomorphism $h\colon C_1\to C_2$. Therefore, $g(C_1)=g(C_2)$ and $e(C_1)=e(C_2)$. Since the tangent cone at infinity of each end of complex algebraic curve is a complex line, by Remark \ref{non-degen-tubes}, we obtain that each such end is a $1$-tube. Thus $\beta(C_1,\infty)=\beta(C_2,\infty)$. It follows from the Birbrair Theorem that two irreducible germs of complex analytic are inner lipeomorphic. In particular, $p\in {\rm Sing}_{inLip}(C_1)$ if and only if $\ell(C_1,p)>1$. Thus, $h({\rm Sing}_{inLip}(C_1))={\rm Sing}_{inLip}(C_2)$ and $\sigma=h|_{{\rm Sing}_{inLip}(C_1)}$ is a bijection. Moreover, each irreducible germ of a complex analytic curve is the germ of a $1$-horn. Therefore, $\beta(C_1,p)=\beta(C_2,\sigma(p))$ for all $p\in {\rm Sing}_{inLip}(C_1)$. Then ${\rm Code}_{inLip}(C_1,\sigma)={\rm Code}_{inLip}(C_2)$. By Theorem \ref{thm:class_surf}, $C_1$ and $C_2$ are inner lipeomorphic. In order to finish the proof, due to the comments made in this proof, we note that the item (3) is equivalent to say that ${\rm Code}_{inLip}(C_1,\sigma)$ and ${\rm Code}_{inLip}(C_2)$ are equivalent, which finishes the proof. \end{proof}
1,116,691,501,410
arxiv
\section{Introduction} \subsection{Problem setup and background} Mean Field Games (abbreviated as MFGs in what follows) are differential games involving non-atomic players, where one aims to study the behaviours of a large population of symmetric agents as the number of agents goes to infinity. They provide quantitative modelling of the macroscopic behaviours of the agents who wish to minimise a certain cost. The theory of MFGs was pioneered by Caines-Huang-Malhame \cite{HCM06,HCM071,HCM072,HCM073} and Lasry-Lions \cite{LL06a, LL06b, LL07a, Lions} independently in 2006, and since then has received considerable attention and increasing studies int the literature. We refer to the books \cite{17} and the lecture notes \cite{CarPor} as well as the references cited therein for more related discussions. One of the main features of an MFG is that there is an adversarial regime and in this so-called monotone regime, the Nash equilibrium exists and is unique. The Nash equilibria of the MFGs can be characterized by the so-called master equation, which is a system of PDEs if the state space of the players is continuous. Next, we introduce the coupled PDE system which forms the forward model problem for our subsequent inverse problem study. Let $\mathbb{R}^n$ be the Euclidean space with $n\in\mathbb{N}$, and $\Omega'\subset\mathbb{R}^n$ be a bounded Lipschitz domain, which signifies the state space. Let $x\in\Omega'$ be the state variable and $t\in [0, \infty)$ be the time variable. Let $\mathcal{P}$ stand for the set of Borel probability measures on $\mathbb{R}^n$, and $\mathcal{P}(\Omega')$ stand for the set of Borel probability measures on $\Omega'$. Let $m\in\mathcal{P}(\Omega')$ denote the population distribution of the agents and $u(x, t):\Omega'\times [0, T]\mapsto \mathbb{R}$ denote the value function of each player. Here, $T\in\mathbb{R}_+$ signifies the terminal time in what follows. The MFG system for our study is introduced as follows: \begin{equation}\label{main} \left\{ \begin{array}{ll} \displaystyle{-\partial_t u(x,t) -\Delta u(t,x)+\frac{1}{2}|\nabla u(x,t)|^2-F(x, m(x,t))=0} & {\rm{in}}\ Q',\medskip\\ \displaystyle{\partial_tm(x,t)-\Delta m(x,t)-{\rm div} \big(m(x,t) \nabla u(x,t)\big)=0} & {\rm{in}}\ Q',\medskip\\ \p_{\nu} u(x,t)=\p_{\nu} m(x,t)=0 & {\rm{on}}\ \Sigma',\medskip\\ u(x,T)=G(x,m(x,T)),\ m(x,0)=m_0(x) & {\rm{in}}\ \Omega',\medskip \end{array} \right. \end{equation} where $\Delta$ and ${\rm div}$ are the Laplacian and divergent operators with respect to the $x$-variable, respectively; and $\Sigma':=\p\Omega'\times[0,T]$ , $Q':=\overline{\Omega'}\times[0,T]$ and $\nu$ is the exterior unit normal to $\partial\Omega'$. In \eqref{main}, $F:\Omega'\times\mathcal{P}(\Omega')\mapsto\mathbb{R} $ is the running cost function which signifies the interaction between the agents and the population; $m_0$ is the initial population distribution and $G:\Omega'\times\mathcal{P}(\Omega')\mapsto\mathbb{R}$ signifies the terminal cost. The MFG system \eqref{main} is also known as the first-order master equation which corresponds to the case that the volatility of the common noise among small players is vanishing \cite{CCP}. The master equation can be understood as an optimal nonlinear transport in the space of probability measures. The single player's value function $u$ satisfies the Hamilton-Jacobi-Bellman (HBJ) equation, namely the first equation in \eqref{main}, where the Hamiltonian is the canonical quadratic form $|\nabla u|^2/2$. The distribution law of the population $m$ is described by the Kolmogorov-Fokker-Planck (KFP) equation, namely the second equation in \eqref{main}. When the players have to control a process in a bounded domain $\Omega'\subset\mathbb{R}^n$ with reflexion on the boundary of $\Omega'$, it is constrained with the Neumann boundary conditions. Many economic and financial models are described by this system; see for instance, the models in \cite{model}. It is also interesting to note that the HBJ equation is given backward in time, whereas the KFP equation is forward in time. In this paper, we focus on the case that $F$ and $G$ depend on $m$ locally. Due to this fact, we can define \begin{equation}\label{eq:distr1} \mathcal{O}:=\{ m:\Omega'\to [0,\infty) \ \ |\ \ \int_{\Omega'} m\, dx =1 \}. \end{equation} In other words, if $m\in \mathcal{O}$, then it is the density of a distribution in $\Omega'$. It can directly verified from \eqref{main} that if the initial distribution $m_0\in\mathcal{O}$, then $m(\cdot; t)\in\mathcal{O}$ for any subsequent time $t$. It is clear that by scaling, the total population $1$ in \eqref{eq:distr1} can be replaced by any positive number. The running cost $F$ is one of the key parameters (functions) in the MFG system. However, in practice the running cost is often unknown or only partially known for the agents. This motivates us to consider the inverse problem of determining the running cost function $F$ by indirect measurement/knowledge of the MFG system that results from optimal actions. To that end, we next introduce the measurement/observation dataset for our inverse problem study. Let $\Omega$ be a given closed proper subdomain of $\Omega'$ with a smooth boundary $\partial\Omega$ and $Q:=\Omega\times [0, T]$, $\Sigma:=\partial\Omega\times[0,T].$ We define \begin{equation}\label{eq:meop0} \mathcal{N}_F(m_0):=\Big( u(x, t), m(x, t)\Big)_{(x, t)\in \partial Q}, \end{equation} where $(u, m)$ is the (unique) pair of solutions to the MFG system \eqref{main} associated with the initial population distribution $m(x, 0)=m_0(x)$. Noting that $\partial Q=\Omega\times \{0, T\}\cup \Sigma$, we clearly have that \begin{equation}\label{eq:meop1} \mathcal{N}_F(m_0):=\Big(\big(u(x,0),m(x,T)\big)\Big|_{x\in\Omega}, \big(u(x,t), m(x,t)\big)\Big|_{(x,t)\in\Sigma}\Big). \end{equation} $\mathcal{N}(m_0)$ encodes the (state) space-time boundary data of $u$ and $m$ in the subdomain $\Omega\times[0, T]$ associated with a given initial population distribution $m_0$. The inverse problem that we aim to investigate can be formulated as follows: \begin{equation}\label{eq:ip1} \mathcal{N}_{F}(m_0)\rightarrow F\quad \mbox{for all}\ \ m_0\in\mathcal{H}\subset\mathcal{O}, \end{equation} where $\mathcal{H}$ is a proper subset and it shall be described in more details in what follows. Here, we emphasise that in our study, we have no more restriction on $\Omega$ other than described above which makes our inverse problem study more realistic from a practical point of view. To be more specific, one can think of measuring/observing the space-time boundary data of $u$ and $m$ and from which to determine the running cost function $F$ over the space-time domain $Q:={\Omega}\times [0, T]$. We shall prove in a general setup that one can indeed achieve this goal. In fact, we shall establish sufficient conditions to guarantee that unique identifiability for \eqref{eq:ip1}, which is of primary importance in the theory of inverse problems and can be stated as follows: \begin{equation}\label{eq:ui1} F_1=F_2\quad\mbox{if and only if}\quad \mathcal{N}_{F_1}(m_0)=\mathcal{N}_{F_2}(m_0)\ \ \mbox{for all}\ m_0\in\mathcal{H}, \end{equation} where $F_j\in\mathcal{A}$, $j=1,2$, with $\mathcal{A}$ signifying an a-priori class that shall be detailed in what follows. \subsection{Technical developments and discussion } As also discussed earlier, the forward problem of MFG systems has been extensively and intensively studied in the literature in recent years, and we refer to \cite{Car,high_order_der,Cira,FerGomTad} for more results on the well-posedness of the forward problems that are related to the MFG system \eqref{main} of the current study. Nevertheless, we would like to point out that for our inverse problem study, we need to establish new results for the forward MFG system \eqref{main}, especially on the regularity of the solutions and the high-order variation of the system around a fixed pair of solutions. On the other hand, the inverse problems for MFGs are much less studied in the literature. To our best knowledge, there are only several numerical results available in \cite{CFLNO,DOY} and in \cite{LMZ} unique identifiability results were established for an MFG inverse problem, which, though related, is of a different formulation from the one in the present article. Among several different aspects, a major technical difference between the studies in \cite{LMZ} and the current article is that the arguments in \cite{LMZ} critically depend on a high-order linearisation technique around a pair of trivial solutions $(u_0, m_0)\equiv (0, 0)$. This makes the study in \cite{LMZ} is unobjectionably unrealistic from a physical point of view, though mathematically rigorous. For the current study of \eqref{eq:ip1}--\eqref{eq:ui1}, due to the probability measure requirement on $m$, namely the constraint in \eqref{eq:distr1}. This technical constraint makes the corresponding inverse problem radically more challenging. In fact, in \cite{LMZ}, the high-order linearisation process around the trivial solutions can decouple the two PDEs of the MFG system in a certain sense. However, in the current one, we have to develop a new high-order linearisation process around a nontrivial pair of solutions, and the derived linearised systems are still coupled PDE systems. It is emphasised that even the treatments of the intermediate inverse problems for those linearised systems are technically novel to the literature. Furthermore, the constraint \eqref{eq:distr1} as well as the coupling of the MFG system significantly restrict the range of ``probing inputs", i.e. the initial distributions $m_0$, which in turn restrict the generation of ``many" probing modes $u$ and $m$ for detecting the unknown $F$. For general PDE inverse problems, it is generically held that the more dense of the set of PDE solutions (which play the role of the probing modes) can be generated, the more easily one can recover the unknowns. To overcome those challenges, we devise the inverse problem of recovering $F$ over $Q$ by measuring $u$ and $m$ on $\partial Q$ and in doing so, the free region $Q'\backslash Q$ enables us to construct a new and delicate family of the so-called CGO (Complex-Geometric-Optics) solutions to fulfil our inverse problem purpose. Moreover, our analysis in this aspect accompanies several novel and subtle estimates. We believe those technical developments shall be useful for tackling MFG inverse problems in different setups, and we chose to investigate these extensions in our future works. Finally, we would like to briefly mention that the mathematical study of inverse problems associated with nonlinear PDEs have received considerable interest in the literature recently; see e.g. \cite{KLU,LLLS,LLLS21,LLLZ} and the references cited therein. Even in such a context, there are several salient features of our study that are worth highlighting. First, the MFG system \eqref{main} couples the two nonlinear PDEs in a backward-forward manner with respect to the time. Second, there is a probability measure constraint \eqref{eq:distr1} which restricts the generation of ``probing modes". We construct a novel class of CGO solutions for the inverse problem study. Third, we develop a high-order linearisation method around a nontrivial pair of solutions. The resulting linearised systems are still coupled in nature. We believe the mathematical strategies developed in this article can find more applications in tackling other inverse problems associated with coupled nonlinear PDEs in different contexts. The rest of the paper is organized as follows. In Section 2, we fix some notations and introduce several auxiliary results as well as state the main result of the inverse problem. Section 3 is devoted to the study of the forward problem, and Sections~4 and 5 are devoted to the proof of the main result. \section{Preliminaries and statement of main results} \subsection{Notations and Basic Setting}\label{notation} For $k\in\mathbb{N}$ and $0<\alpha<1$, the H\"older space $C^{k+\alpha}(\overline{\Omega'})$ is defined as the subspace of $C^{k}(\overline{\Omega'})$ such that $\phi\in C^{k+\alpha}(\overline{\Omega'})$ if and only if $D^l\phi$ exist and are H\"older continuous with exponent $\alpha$ for all $l=(l_1,l_2,\ldots,l_n)\in \mathbb{N}^n$ with $|l|\leq k$, where $D^l:=\partial_{x_1}^{l_1}\partial_{x_2}^{l_2}\cdots\partial_{x_n}^{l_n}$ for $x=(x_1, x_2,\ldots, x_n)$. The norm is defined as \begin{equation} \|\phi\|_{C^{k+\alpha}(\overline{\Omega'}) }:=\sum_{|l|\leq k}\|D^l\phi\|_{\infty}+\sum_{|l|=k}\sup_{x\neq y}\frac{|D^l\phi(x)-D^l\phi(y)|}{|x-y|^{\alpha}}. \end{equation} If the function $\phi$ depends on both the time and space variables, we define $\phi\in C^{k+\alpha, \frac{k+\alpha}{2}}(Q')$ if $D^lD^{j}_t\phi$ exist and are are H\"older continuous with exponent $\alpha$ in $x$ and $\frac{k+\alpha}{2} $ in $t$ for all $l\in \mathbb{N}^n$, $j\in\mathbb{N}$ with $|l|+2j\leq k.$ The norm is defined as \begin{equation} \begin{aligned} \|\phi\|_{ C^{k+\alpha, \frac{k+\alpha}{2}}(Q')}:&=\sum_{|l|+2j\leq k}\|D^lD^j_t\phi\|_{\infty}+\sum_{|l|+2j= k}\sup_{t,x\neq y}\frac{|\phi(x,t)-\phi(y,t)|}{|x-y|^{\alpha}}\\ &+\sum_{|l|+2j= k}\sup_{t\neq t',x} \frac{|\phi(x,t)-\phi(x,t')|}{|t-t'|^{\alpha/2}}. \end{aligned} \end{equation} Moreover, we denote by $H^s(\Omega')$, $H^r(\Sigma')$, $H^s(0,T;H^r(\Omega'))$ the standard Sobolev spaces for $s,r\in\mathbb{R}.$ Since we need to study the linearized system of $\eqref{main}$ for our inverse problem study, we next define the variation of a function defined on $\mathcal{P}(\Omega') $ (cf. \cite{num_boundary}). Recall that $\mathcal{P}(\Omega')$ denotes the set of probability measures on $\Omega'$ and let $U$ be a real function defined on $\mathcal{P}(\Omega') $. \begin{defi}\label{def_der_1} Let $U :\mathcal{P}(\Omega')\to\mathbb{R}$. We say that $U$ is of class $C^1$ if there exists a continuous map $K: \mathcal{P}(\Omega')\times \Omega'\to\mathbb{R}$ such that, for all $m_1,m_2\in\mathcal{P}(\Omega') $ we have \begin{equation}\label{derivation} \lim\limits_{s\to 0^+}\frac{U\big(m_1+s(m_2-m_1)-U(m_1)\big)}{s}=\int_{\Omega'} K(m_1,x)d(m_2-m_1)(x). \end{equation} \end{defi} Note that the definition of $K$ is up to additive constants. We deinfe the derivative $\dfrac{\delta U}{\delta m}$ as the unique map $K$ satisfying $\eqref{derivation}$ and \begin{equation} \int_{\Omega'} K(m,x) dm(x)=0. \end{equation} Similarly, we can define higher order derivatives of $U$, and we refer to \cite{high_order_der} for more related discussion. Finally, we define the Wasserstein distance between $m_1$ and $m_2$ in $\mathcal{P}(\Omega')$, which shall be needed in studying the regularity of the derivative $\dfrac{\delta U}{\delta m}$. \begin{defi}\label{W_distance} Let $m_1,m_2$ be two Borel probability measures on $\Omega'$. Define \begin{equation} d_1(m_1,m_2):=\sup_{Lip(\psi)\leq 1}\int_{\Omega'}\psi(x)d(m_1-m_2)(x), \end{equation} where $Lip(\psi)$ denotes the Lipschitz constant for a Lipschitz function, i.e., \begin{equation}\label{eq:Lip1} Lip(\psi)=\sup_{x, y\in\Omega', x\neq y}\frac{|\psi(x)-\psi(y)|}{|x-y|}. \end{equation} \end{defi} \begin{rmk} In Definitions~\ref{def_der_1} and \ref{W_distance}, $m$ ( i.e. $m_1$ or $m_2$ ) is viewed as a distribution. However, in other parts of the paper, we use $m$ to denote the density of a distribution such as in the MFG system \eqref{main}. \end{rmk} \subsection{Well-posedness conditions and admissible class}\label{assump} Throughout the rest of the paper and without loss of generality, we shall always assume that $|\Omega'|=1$ in order to simplify the exposition. Next, we introduce several assumptions which are generally needed in guaranteeing the well-posedness of the forward MFG system \eqref{main}; see \cite{num_boundary} and \cite{Cardaliaguet} for more related discussions on this aspect. \begin{assum}\label{hypo} (I) (monotonicity property) For any $m_1,m_2\in\mathcal{P}(\Omega')$, we have \begin{equation}\label{mono1} \int_{\Omega'} \big( F(x,m_1)-F(x,m_2)d(m_1-m_2)(x)\big)\geq 0, \end{equation} and \begin{equation} \int_{\Omega'} \big( G(x,m_1)-G(x,m_2)d(m_1-m_2)(x)\big)\geq 0, \end{equation} for all $m_1,m_2\in \mathcal{P}(\Omega').$ (II) (regularity conditions) Assume that \begin{equation} \sup_{m\in\mathcal{P}(\Omega')} \Big(\left\|F(\cdot,m)\right\|_{\alpha}+ \left\|\frac{\delta F}{\delta m}(\cdot,m,\cdot)\right\|_{\alpha,2+\alpha}\Big)+Lip (\frac{\delta F}{\delta m})< \infty, \end{equation} \begin{equation} \sup_{m\in\mathcal{P}(\Omega')} \Big(\left\|G(\cdot,m)\right\|_{2+\alpha}+ \left\|\frac{\delta G}{\delta m}(\cdot,m,\cdot)\right\|_{2+\alpha,2+\alpha}\Big)+ Lip (\frac{\delta G}{\delta m})< \infty, \end{equation} where \begin{equation}\label{eq:Lip2} \begin{split} Lip (\frac{\delta F}{\delta m}):=& \sup_{m_1\neq m_2}\Big (d_1(m_1-m_2)^{-1} \left\|\frac{\delta F}{\delta m_1}(\cdot,m,\cdot)- \frac{\delta F}{\delta m_1}(\cdot,m_2,\cdot)\right\|_{\alpha,1+\alpha}\Big)\\ Lip (\frac{\delta G}{\delta m}):=& \sup_{m_1\neq m_2}\Big (d_1(m_1-m_2)^{-1} \left\|\frac{\delta G}{\delta m_1}(\cdot,m,\cdot)- \frac{\delta G}{\delta m_1}(\cdot,m_2,\cdot)\right\|_{2+\alpha,2+\alpha}\Big) \end{split} \end{equation} for all $m_1,m_2\in \mathcal{P}(\Omega').$ Here, we note the definitions of $Lip$ in \eqref{eq:Lip1} and \eqref{eq:Lip2} are slightly different, which should be clear from the context. (III)(compatibility conditions) \begin{equation} \begin{split} \left< D_y\frac{\delta G}{\delta m}(x,m,y),\nu(y)\right>&=0,\\ \left< D_y\frac{\delta F}{\delta m}(x,m,y),\nu(y)\right>&=0,\\ \left< D_x G(m,x),\nu(x)\right>&=0, \end{split} \end{equation} for all $m\in \mathcal{P}(\Omega').$ \end{assum} Assumption~\ref{hypo} is mainly needed to guarantee the well-posedness of the forward MFG system \eqref{main}, and it shall be imposed throughout the rest of the paper. Next, we introduce the admissible conditions on $F$ and $G$, which shall be mainly needed for our subsequent inverse problem study of \eqref{eq:ip1} and \eqref{eq:ui1}. \begin{defi}\label{Admissible class2} We say $U(x,z):\mathbb{R}^n\times\mathbb{C}\to\mathbb{C}$ is admissible, denoted by $U\in\mathcal{A}$, if it satisfies the following conditions: \begin{enumerate} \item[(i)] The map $z\mapsto U(\cdot,z)$ is holomorphic with value in $C^{\alpha}(\mathbb{R}^n)$ for some $\alpha\in(0,1)$. \item[(ii)] $U(x,1)=0$ for all $x\in\mathbb{R}^n$. Here we recall that we assume $|\Omega'|=1.$ \item[(iii)] There exists $a_1\in\mathbb{R}$ such that $ U^{(1)}(x)>a_1>0$. \end{enumerate} Clearly, if (1) and (2) are fulfilled, then $U$ can be expanded into a power series as follows: \begin{equation}\label{eq:G} U(x,z)=\sum_{k=1}^{\infty} U^{(k)}(x)\frac{(z-1)^k}{k!}, \end{equation} where $ U^{(k)}(x)=\frac{\p^kU}{\p z^k}(x,1)\in C^{\alpha}(\mathbb{R}^n).$ \end{defi} To make this definition clear, we have the following remark. \begin{rmk} For the MFG system \eqref{main}, if we assume $F\in\mathcal{A}$, it means that $F$ possesses the power series expansion \eqref{eq:G} with the complex variable $z$ restricted on the real line. For this reason, we always assume in the series expansions \eqref{eq:G} that the coefficient functions $U^{(k)}$ are real-valued since $F$ is always real valued in our study. Moreover, we would like to emphasise that the admissibility conditions in Definition~\ref{Admissible class2} are compatible to those well-posedness conditions in Assumption~\ref{hypo}. In fact, there are overlaps among them and we shall not explore in details here since it is not the focus of our study. As a simple illustration, one can take $F(x, z)=e^z-1$ or $z-1$ and directly verify that all conditions can be fulfilled. \end{rmk} Next, we introduce the following function space, \begin{equation}\label{eq:fsh} H_{\pm}(Q):=\{u\in \mathcal{D}'(Q) \ \ |\ \ u\in L^2(Q) \text{ and } (\pm\p_t-\Delta)u\in L^2(Q) \}, \end{equation} endowed with the norm $$\|u\|^2_{H_{\pm}(Q)}:=\|u\|^2_{L^2(Q)}+\|(\pm\p_t-\Delta)u\|^2_{L^2(Q)}.$$ This function shall be needed in our analysis in Section~4 as well as the following admissibility condition for the terminal cost $G$ in \eqref{main}. \begin{defi}\label{improve_regular} We say $G$ is an action operator at $m=1$ if for any $\rho(x,t)\in H_{\pm}(Q)$, we have $$\dfrac{\delta G}{ \delta m}(x,1)(\rho(x,T)):=\left<\dfrac{\delta G}{ \delta m}(x,1,\cdot),\rho(x,T)\right>_{L^2}\in L^2(\Omega),$$ and $$ \left\| \dfrac{\delta G}{ \delta m}(x,1)(\rho(x,T)) \right\|_{L^2(\Omega)}\leq C \|\rho\|_{ L^2(Q) }.$$ We say $G\in\mathcal{B}$ if it is an action operator at $m=1$ and $ G(x,1)=B$ for some constant $B\in\mathbb{R}.$ \end{defi} Further discussions about $H_{\pm}$ shall be provided in section $\ref{sec_pre_est}$. In particular, we shall show that $\rho(x,T)$ is well-defined. \begin{rmk}\label{rem:1} Similar to the admissibility condition on $F$, we would like to remark that the admissibility condition on $G$ in Definition~\ref{improve_regular} is also generic ones. In fact, in many practical setups, the terminal cost functions fulfil that the belong to the class $\mathcal{B}$; see \cite{high_order_der} for related examples. Here, we mention a typical one by letting $$ G(x,m)=\int_{\Omega'}\psi(z,\rho*m(z))\rho(x-z)\ dz, $$ where $*$ denotes the convolutional operation and $\psi: \mathbb{R}^2\mapsto\mathbb{R}$ is a smooth map which is nondecreasing with respect to the second variable and $\rho$ is a smooth, even function with a compact support in $\Omega'$. Clearly, $G$ is an action operator at any point as long as it is in the form of convolution. \end{rmk} \begin{rmk} Note that if $F(x,1)=0$ and $G(x,1)=B$ (as in Definition $\ref{improve_regular}$), then $(u,m)=(B,1)$ is a solution of the MFG system \eqref{main}. In this case, the initial distribution $m(x, 0)=1$. This is a common nature of MFG system that the uniform distribution is a stable state. \end{rmk} \subsection{Main unique identifiability result} We are in a position to state the main result for the inverse problem \eqref{eq:ip1}-\eqref{eq:ui1}, which shows in a generic scenario that one can recover the running cost $F$ from the measurement map $\mathcal{N}_F$. \begin{thm}\label{der F} Let Assumption $\ref{hypo}$ holds for $ 0<\alpha<1$. Assume that $F_j \in\mathcal{A}$ ($j=1,2$) and $G\in\mathcal{B}$. Let $\mathcal{N}_{F_j}$ be the measurement map associated to the following system: \begin{equation}\label{eq:mfg1} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+\frac 1 2 {|\nabla u(x,t)|^2}= F_j(x,m(x,t)),& \text{ in } Q',\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div} (m(x,t) \nabla u(x,t))=0,&\text{ in } Q',\medskip\\ \p_{\nu} u(x,t)=\p_{\nu}m(x,t)=0 &\text{ on } \Sigma',\medskip\\ u(x,T)=G(x,m(x,T)), & \text{ in } \Omega',\medskip\\ m(x,0)=m_0(x), & \text{ in } \Omega'.\\ \end{cases} \end{equation} If for any $m_0\in C^{2+\alpha}(\Omega') \cap \mathcal{O}$, where $\mathcal{O}$ is defined in \eqref{eq:distr1}, one has $$\mathcal{N}_{F_1}(m_0)=\mathcal{N}_{F_2}(m_0),$$ then it holds that $$F_1(x,z)=F_2(x,z)\ \text{ in }\ \Omega\times \mathbb{R}.$$ \end{thm} \section{Auxiliary results on the forward problem}\label{section wp} In this section, we derive several auxiliary results on the forward problem of the MFG system \eqref{main}. One of the key results is the infinite differentiability of the system with respect to small variations around (the density of) a uniform distribution ($m_0(x)$). First, we show the existence result of an auxiliary system. \begin{lem}\label{linear app unique} Assume that $F^{(1)}\in C^{\alpha}(\Omega')$. For any $g,\tilde g\in C^{2+\alpha}(\overline\Omega')$, and $h,\tilde{h}\in C^{\alpha,\alpha/2}(\overline Q')$ with the compatibility conditions: \begin{align}\label{c-systems} \p_{\nu}\tilde g(x)= \p_{\nu} g(x)=0, \end{align} the following system \begin{equation}\label{surjective} \begin{cases} -u_t-\Delta u-F^{(1)}(x)m=h & \text{ in } Q',\medskip\\ m_t-\Delta m-\Delta u=\tilde{h} & \text{ in } Q',\medskip\\ \p_{\nu}u(x,t)=\p_{\nu}m(x,t)=0 & \text{ on } \Sigma',\medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)(m(x,T))+g} & \text{ in } \Omega',\medskip\\ m(x,0)=\tilde{g} & \text{ in } \Omega'.\\ \end{cases} \end{equation} admits a pair of solutions $(u,m)\in [ C^{2+\alpha,1+\alpha/2}(\overline Q')]^2$. \end{lem} \begin{proof} This is consequence of Proposition 5.8 in \cite{num_boundary}. In fact, by Proposition 5.8 in \cite{num_boundary}, we have that \eqref{surjective} admits a pair of solutions $(u,m)$ such that $u\in C^{2+\alpha,1+\alpha/2}(\overline Q')$. Since $m(x,0)=\tilde{g} \in C^{2+\alpha}(\overline\Omega')$ and $F\in C^{\alpha}(\Omega')$ in this case, we have $m\in C^{2+\alpha,1+\alpha/2}(\overline Q') $. \end{proof} Now we present the proof of the local well-posedness of the MFG system \eqref{main}, which shall be needed in our subsequent inverse problem study. \begin{thm}\label{local_wellpose} Let Assumption $\ref{hypo}$ hold for $ 0<\alpha<1$. Suppose that $F\in\mathcal{A}$ and $G\in\mathcal{B}$. The following results hold: \begin{enumerate} \item[(a)] There exist constants $\delta>0$ and $C>0$ such that for any \[ m_0\in B_{\delta}(C^{2+\alpha}(\Omega') :=\{m_0\in C^{2+\alpha}(\Omega'): \|m_0\|_{C^{2+\alpha}(\Omega')}\leq\delta \}, \] the MFG system $\eqref{main}$ has a solution $(u,m)\in [C^{2+\alpha,1+\frac{\alpha}{2}}(Q)]^2$ which satisfies \begin{equation}\label{eq:nn1} \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}:= \|u\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}+ \|m\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}\leq C\|m_0\|_{ C^{2+\alpha}(\Omega')}. \end{equation} Furthermore, the solution $(u,m)$ is unique within the class \begin{equation}\label{eq:nn2} \{ (u,m)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q')\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q'): \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}\leq C\delta \}. \end{equation} \item[(b)] Define a function \[ S: B_{\delta}(C^{2+\alpha}(\Omega')\to C^{2+\alpha,1+\frac{\alpha}{2}}(Q')\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q')\ \mbox{by $S(m_0):=(u,v)$}, \] where $(u,v)$ is the unique solution to the MFG system \eqref{main}. Then for any $m_0\in B_{\delta}(C^{2+\alpha}(\Omega'))$, $S$ is holomorphic. \end{enumerate} \end{thm} \begin{proof} Let \begin{align*} &X_1:= \{ m\in C^{2+\alpha}(\Omega' ): \p_{\nu}m=0 \} , \\ &X_2:=\{ (u,m)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q')\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q') : \p_{\nu}m=\p_{\nu}u=0 \text{ on } \Sigma' \} \},\\ &X_3:=X_1\times X_1\times C^{\alpha,\frac{\alpha}{2}}(Q' )\times C^{\alpha,\frac{\alpha}{2}}(Q'), \end{align*} and we define a map $\mathscr{K}:X_1\times X_2 \to X_3$ by that for any $(m_0,\tilde u,\tilde m)\in X_1\times X_2$, \begin{align*} & \mathscr{K}( m_0,\tilde u,\tilde m)(x,t)\\ :=&\big( \tilde u(x,T)-G(x,\tilde m(x,T)), \tilde m(x,0)-m_0(x) , -\p_t\tilde u(x,t)-\Delta \tilde u(x,t)\\ &+\frac{|\nabla \tilde u(x,t)|^2}{2}- F(x,t,\tilde m(x,t)), \p_t \tilde m(x,t)-\Delta \tilde m(x,t)-{\rm div}(\tilde m(x,t)\nabla \tilde u(x,t)) \big) . \end{align*} First, we show that $\mathscr{K} $ is well-defined. Since the H\"older space is an algebra under the point-wise multiplication, we have $|\nabla u|^2, {\rm div}(m(x,t)\nabla u(x,t)) \in C^{\alpha,\frac{\alpha}{2}}(Q' ).$ By the Cauchy integral formula, \begin{equation}\label{eq:F1} F^{(k)}\leq \frac{k!}{R^k}\sup_{|z|=R}\|F(\cdot,\cdot,z)\|_{C^{\alpha,\frac{\alpha}{2}}(Q' ) },\ \ R>0. \end{equation} Then there is $L>0$ such that for all $k\in\mathbb{N}$, \begin{equation}\label{eq:F2} \left\|\frac{F^{(k)}}{k!}m^k\right\|_{C^{\alpha,\frac{\alpha}{2}}(Q' )}\leq \frac{L^k}{R^k}\|m\|^k_{C^{\alpha,\frac{\alpha}{2}}(Q' )}\sup_{|z|=R}\|F(\cdot,\cdot,z)\|_{C^{\alpha,\frac{\alpha}{2}}(Q' ) }. \end{equation} By choosing $R\in\mathbb{R}_+$ large enough and by virtue of \eqref{eq:F1} and \eqref{eq:F2}, it can be seen that the series converges in $C^{\alpha,\frac{\alpha}{2}}(Q' )$ and therefore $F(x,m(x,t))\in C^{\alpha,\frac{\alpha}{2}}(Q' ).$ Using the compatibility assumption and regularity conditions on $G$, we have that $\mathscr{K} $ is well-defined. Let us show that $\mathscr{K}$ is holomorphic. Since $\mathscr{K}$ is clearly locally bounded, it suffices to verify that it is weakly holomorphic. That is we aim to show that the map $$\lambda\in\mathbb C \mapsto \mathscr{K}((m_0,\tilde u,\tilde m)+\lambda (\bar m_0,\bar u,\bar m))\in X_3,\quad\text{for any $(\bar m_0,\bar u,\bar m)\in X_1\times X_2$}$$ is holomorphic. In fact, this follows from the condition that $F\in\mathcal{A}$ and $G\in\mathcal{B}$. Note that $ \mathscr{K}(\frac{1}{|\Omega'|},0,\frac{1}{|\Omega'|})=\mathscr{K}(1,0,1)= 0$. Let us compute $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)$: \begin{equation}\label{Fer diff} \begin{aligned} \nabla_{(\tilde u,\tilde m)} \mathscr{K}(1,0,1) (u,m)& =( u|_{t=T}-\frac{\delta G}{\delta m}(x,1)(m(x,T)), m|_{t=0}, \\ &-\p_tu(x,t)-\Delta u(x,t)-F^{(1)}m, \p_t m(x,t)-\Delta m(x,t)-\Delta u). \end{aligned} \end{equation} On the one hand, $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)$ is injective. In fact, if $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)(u,m)=0$, then we have \begin{equation} \begin{cases} -u_t-\Delta u= F^{(1)}(x)m & \text{ in } Q',\medskip\\ m_t-\Delta m-\Delta u=0 & \text{ in } Q',\medskip\\ \p_{\nu}u(x,t)=\p_{\nu}m(x,t)=0 & \text{ on } \Sigma',\medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)(m(x,T)) } & \text{ in } \Omega',\medskip\\ m(x,0)=0 & \text{ in } \Omega'.\\ \end{cases} \end{equation} Notice that \begin{equation}\label{eq:dd1} \begin{split} &\int_{\Omega}\quad u_tm+um_t\ dx\\ =&\int_{\Omega} (-\Delta u- F^{(1)}m)m + (\Delta m+\Delta u)u\ dx\\ =&\int_{\Omega} -F^{(1)}m^2-|\nabla u|^2\ dx\\ \leq& \int_{\Omega} -F^{(1)}m^2\ dx. \end{split} \end{equation} Integrating both sides of \eqref{eq:dd1} from $0$ to $T$, we can obtain \begin{equation} \int_{\Omega}\frac{\delta G}{\delta m}(x,1)(m(x,T)) m(x,T) dx\leq -\int_{Q} F^{(1)}m^2 dx. \end{equation} Since $F^{(1)}(x)$ is positive and $G$ satisfies the monotonicity property, we readily see that $m=0$ and then $u=0$. Hence, $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)$ is injective. On the other hand, by Lemma $\ref{linear app unique}$, $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)$ is surjective. Therefore, $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (1,0,1)$ is a linear isomorphism between $X_2$ and $X_3$. Hence, by the Implicit Function Theorem, there exist $\delta>0$ and a unique holomorphic function $S: B_{\delta}(\Omega')\to X_2$ such that $\mathscr{K}(m_0,S(m_0))=0$ for all $m_0\in B_{\delta}(\Omega') $. By letting $(u,m)=S(m_0)$, we obtain the unique solution of the MFG system \eqref{main}. Let $ (u_0,v_0)=S(0)$. Since $S$ is Lipschitz, we know that there exist constants $C,C'>0$ such that \begin{equation*} \begin{aligned} &\|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q')^2}\\ \leq& C'\|m_0\|_{B_{\delta}(\Omega')} +\|u_0\|_ { C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}+\|v_0\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q')}\\ \leq& C \|m_0\|_{B_{\delta}(\Omega')}. \end{aligned} \end{equation*} The proof is complete. \end{proof} \begin{rmk} In the proof of this theorem, we do not make use of the fact that the initial data $m_0$ is a density of a distribution. It is not necessary in the proof of the well-posedness of the forward problem. On the other hand, it is noted that if we choose $m_0$ to be a density of a distribution, the KFP equation forces the solution $m$ to be a density of a distribution for all $t\in (0,T).$ However, when we consider the inverse problem, we need to restrict our discussion to the case that $m_0$ is a density. \end{rmk} \begin{comment} \begin{rmk} Regarding the local well-posedness, several remarks are in order. \begin{enumerate} \item[(a)] The conditions on $F$ and $G$ (Definition \ref{Admissible class1}-(i) and $G$ satisfies Definition \ref{Admissible class2}-(i) ) are not essential and it is for convenience to apply implicit function theorem . Also, the analytic conditions on $F$ and $G$ can be replayed by weaker regularity conditions in the proof of the local well-posedness \cite{Lions} , but these conditions will be utilized in our inverse problem study. \item[(b)] In order to apply the higher order linearization method that shall be developed in Section 5 for the inverse problems, we need the infinite differentiability of the equation with respect to the given input $m_0(x)$, it is shown by the fact that the solution map $S$ is holomorphic. \item[(c)] In the proof of Theorem $\ref{local_wellpose}$, we show the solution map $S$ is holomorphic. As a corollary, the measurement map $\mathcal{M}=(\pi_1\circ S)\Big|_{t=0}$ is also holomorphic, where $\pi_1$ is the projection map with respect to the first variable. \end{enumerate} \end{rmk} \end{comment} \section{ A-priori estimates and analysis of the linearized systems }\label{analysis of lin} \subsection{Higher-order linearization}\label{HLM} We next develop a high-order linearization scheme of the MFG system \eqref{main} in the probability space around a uniform distribution. This method depends on the infinite differentiability of the system with respect to a given input $m_0(x)$, which was established in Theorem~$\ref{local_wellpose}$. First, we introduce the basic setting of this higher order linearization method. Consider the system $\eqref{main}$. Let $$m_0(x;\varepsilon)=\frac{1}{|\Omega'|}+\sum_{l=1}^{N}\varepsilon_lf_l=1+ \sum_{l=1}^{N}\varepsilon_lf_l,$$ where \[ f_l\in C^{2+\alpha}(\mathbb{R}^n)\quad\mbox{and}\quad\int_{\Omega'} f_l(x) dx =0, \] and $\varepsilon=(\varepsilon_1,\varepsilon_2,...,\varepsilon_N)\in\mathbb{R}^N$ with $|\varepsilon|=\sum_{l=1}^{N}|\varepsilon_l|$ small enough. Clearly, $m_0(x;\varepsilon)$ is a density of a distribution in $\mathcal{P}(\Omega').$ By Theorem $\ref{local_wellpose}$, there exists a unique solution $(u(x,t;\varepsilon),m(x,t;\varepsilon) )$ of $\eqref{main}$. If $\varepsilon=0,$ by our assumption, we have $(u(x,t;0),m(x,t;0) )= (B,1)$ for some $B\in\mathbb{R}.$ Let $$u^{(1)}:=\p_{\varepsilon_1}u|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{u(x,t;\varepsilon)-u(x,t;0) }{\varepsilon_1},$$ $$m^{(1)}:=\p_{\varepsilon_1}m|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{m(x,t;\varepsilon)-m(x,t;0) }{\varepsilon_1}.$$ The idea is that we consider a new system of $(u^{(1)},m^{(1)}).$ It is sufficient for us to only consider this system in $Q:=\Omega\times[0,T]$. Now, we have that $(u_{j}^{(1)},m_{j}^{(1)} )$ satisfies the following system: \begin{equation}\label{linear l=1,eg} \begin{cases} -\p_tu^{(1)}(x,t)-\Delta u^{(1)}(x,t)= F^{(1)}(x)m^{(1)}(x,t)& \text{ in } Q,\medskip\\ \p_t m^{(1)}(x,t)-\Delta m^{(1)}(x,t)-\Delta u^{(1)}(x,t)=0&\text{ in } Q,\medskip\\ \displaystyle{u^{(1)}_j(x,T)=\frac{\delta G}{\delta m}(x,1)(m^{(1)}(x,T))} & \text{ in } \Omega,\medskip\\ m^{(1)}_j(x,0)=f_1(x). & \text{ in } \Omega.\\ \end{cases} \end{equation} Since $\Omega$ is a closed proper subset of $\Omega'$, in this system, $f_1(x)$ can be arbitrary $C^{2+\alpha}$ function in $\Omega.$ Then we can define $$u^{(l)}:=\p_{\varepsilon_l}u|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{u(x,t;\varepsilon)-u(x,t;0) }{\varepsilon_l},$$ $$m^{(l)}:=\p_{\varepsilon_l}m|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{m(x,t;\varepsilon)-m(x,t;0) }{\varepsilon_l},$$ for all $l\in\mathbb{N}$ and obtain a sequence of similar systems. In the proof of Theorem $\ref{der F}$, we recover the first Taylor coefficient of $F$ by considering this new system $\eqref{linear l=1,eg}$. In order to recover the higher order Taylor coefficients, we consider \begin{equation}\label{eq:ht1} u^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u|_{\varepsilon=0}, m^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m|_{\varepsilon=0}. \end{equation} We have the second-order linearization as follows: \begin{equation}\label{linear l=1,2 eg} \begin{cases} -\p_tu^{(1,2)}-\Delta u^{(1,2)}(x,t)+\nabla u^{(1)}\cdot \nabla u^{(2)}\medskip\\ \hspace*{3cm}= F^{(1)}(x)m^{(1,2)}+F^{(2)}(x)m^{(1)}m^{(2)},& \text{ in } \Omega\times(0,T),\medskip\\ \p_t m^{(1,2)}-\Delta m^{(1,2)}-\Delta u^{(1,2)}= {\rm div} (m^{(1)}\nabla u^{(2)})+{\rm div}(m^{(2)}\nabla u^{(1)}) ,&\text{ in } \Omega\times (0,T),\medskip\\ \displaystyle{u^{(1,2)}(x,T)=\frac{\delta G}{\delta m}(x,1)m^{(1,2)}(x,T)+\frac{\delta^2G}{\delta m^2}(x,1)(m^{(1)}(x,T)m^{(2)}(x,T)),} & \text{ in } \Omega,\medskip\\ m^{(1,2)}(x,0)=0, & \text{ in } \Omega.\\ \end{cases} \end{equation} Notice that the non-linear terms of the system $\eqref{linear l=1,2 eg}$ depend on the first-order linearised system $\eqref{linear l=1,eg}$. Since we shall make use of the mathematical induction to recover the high-order Taylor coefficients of $F$. This shall be an important ingredient in our proof of Theorem~\ref{der F} in what follows. Similarly, for $N\in\mathbb{N}$, we consider \begin{equation*} u^{(1,2...,N)}=\p_{\varepsilon_1}\p_{\varepsilon_2}...\p_{\varepsilon_N}u|_{\varepsilon=0}, \end{equation*} \begin{equation*} m^{(1,2...,N)}=\p_{\varepsilon_1}\p_{\varepsilon_2}...\p_{\varepsilon_N}m|_{\varepsilon=0}. \end{equation*} we can obtain a sequence of parabolic systems, which shall be employed again in determining the higher order Taylor coefficients of the unknowns $F$ . \subsection{A-priori estimates}\label{sec_pre_est} Let us consider the linearised system: \begin{equation} \begin{cases} -u_t-\Delta u= F^{(1)}(x)m & \text{ in } Q,\medskip\\ m_t-\Delta m-\Delta u=0 & \text{ in } Q,\medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)(m(x,T))} & \text{ in } \Omega,\medskip\\ m(x,0)=f(x) & \text{ in } \Omega.\\ \end{cases} \end{equation} We need derive several quantitative a-priori estimates of this linearised system. Since we also need to focus on the duality of this system, we claim similar results for the backward parabolic equation. We first present two auxiliary Carleman estimates. \begin{lem}\label{Carleman 1} Let $\lambda>0$ and $v:=e^{\lambda^2t}u(x,t)\in L^2(0,T;H^2(\Omega))\cap H^1(0,T; L^2(\Omega))$ ,$u(x,T)=0$ and $u(x,t)=0$ in $\Sigma$. Then \begin{equation}\label{est1} \int_Q e^{2\lambda^2 t}(-\p_t u-\Delta u)^2 dxdt \geq C \Big[\lambda^4\int_Q v^2 dxdt+ \int_Q (\Delta v)^2dxdt+\lambda^2\int_Q |\p_{\nu}v|^2 dxdt\Big]. \end{equation} \end{lem} \begin{proof} Notice that $$ e^{2\lambda^2 t}(-\p_t u-\Delta u)^2=( -\p_t v-\Delta v + \lambda^2 v )^2.$$ Therefore, \begin{align*} &\int_Q e^{2\lambda^2 t}(-\p_t u-\Delta u)^2 dxdt\\ = &\int_Q ( -\p_t v-\Delta v + \lambda^2 v )^2 dxdt\\ \geq &\lambda^4\int_Q v^2 + (\Delta v)^2 dxdt + 2\int_Q \p_tv \Delta v dxdt- 2\lambda^2\int_Q (\Delta v )v+(\p_tv )v dxdt.\\ \end{align*} Note that \begin{align*} &2\int_Q \p_tv \Delta v dxdt =-\int_Q \p_t |\nabla v|^2 dxdt=\int_{\Omega} |\nabla v(x,0)|^2 dx,\\ &-2\int_Q (\Delta v )v dxdt= 2\int_Q |\nabla v|^2 dxdt\geq 2C\int_Q |\p_{\nu}v|^2 dxdt,\\ &2\int_Q (\p_tv )v dxdt=\int_Q \p_t v^2 dxdt= -\int_{\Omega} v(x,0)^2 dx.\\ \end{align*} It follows that $\eqref{est1}$ is true. The proof is complete. \end{proof} Similarly, we have \begin{lem}\label{Carleman 2} Let $\lambda>0$ and $v:=e^{-\lambda^2t}u(x,t)\in L^2(0,T;H^2(\Omega))\cap H^1(0,T; L^2(\Omega))$. Then it holds that \begin{equation}\label{est2} \int_Q e^{-2\lambda^2 t}(\p_t u-\Delta u)^2 dxdt \geq C \Big[\lambda^4\int_Q v^2 dxdt+ \int_Q (\Delta v)^2dxdt+\lambda^2\int_Q |\p_{\nu}v|^2 dxdt\Big]. \end{equation} \end{lem} Next, we recall the function space $H_\pm(Q)$ defined in \eqref{eq:fsh}. We use the following facts about these spaces. First, $C^{\infty}(Q)$ is dense $H_{\pm}.$ Second, we can define the trace operator on $H_{\pm}$. In particular, we can define $u(x,0),u(x,T)\in H^{-1}(\Omega).$ Hence, we have Green's formula in $H_{\pm}$; see also \cite{cgo} for related study. \begin{lem}\label{prior estimate} Suppose there are $M>\varepsilon>0$ such that $\varepsilon<F(x)<M$ and $G\in \mathcal{B}$. Let $(u,m)\in \big(L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega)\big)\times H_{+}(Q)$ be a solution to the following system \begin{equation}\label{prior estimate 1 } \begin{cases} -u_t-\Delta u= F(x)m & \text{ in }\ Q,\medskip\\ m_t-\Delta m-\Delta u=0 & \text{ in }\ Q,\medskip\\ u(x,t)=m(x,t)=0 & \text{ on }\ \Sigma,\medskip\\ \displaystyle{u(x,T)= \frac{\delta G}{\delta m}(x,1)(m(x,T) )} & \text{ in }\ \Omega.\\ \end{cases} \end{equation} we have $$ \|m\|_{L^2(Q)}\leq C\|m(x,0)\|_{H^{-1}(\Omega)}.$$ \end{lem} \begin{proof} Notice that \begin{equation}\label{eq:ddd2} \begin{split} &\int_{\Omega}\quad u_tm+um_t\quad dx\\ =&\int_{\Omega} (-\Delta u- Fm)m + (\Delta m+\Delta u)u dx\\ =&\int_{\Omega} -Fm^2-|\nabla u|^2 dx\\ \leq& \int_{\Omega} -Fm^2 dx. \end{split} \end{equation} Integrating both sides of \eqref{eq:ddd2} from $0$ to $T$, we can obtain that \begin{equation} \int_{\Omega} \frac{\delta G}{\delta m}(x,1)(m(x,T))m(x,T)\ dx-\int_{\Omega} u(x,0)m(x,0)\ dx\leq -\int_{Q} Fm^2\ dx. \end{equation} Since $G\in\mathcal{B}$, the standard a-prior estimate for the parabolic equation (cf. \cite{evans}) yields that \begin{equation}\label{control norm of H_+} \|u(x,0)\|_{H_0^1(\Omega)}\leq C(\|m\|_{L^2(Q)}+ \|\frac{\delta G}{\delta m}(x,1)(m(x,T) )\|_{L^2(Q)}) . \end{equation} Then by the assumption $G\in\mathcal{B}$, we have \begin{align*} \varepsilon \|m\|^2_{L^2(Q)}&\leq \int_{\Omega} Fm^2 dx\\ &\leq \int_{\Omega} u(x,0)m(x,0) dx\\ &\leq C (\|m\|_{L^2(Q)}+ \|\frac{\delta G}{\delta m}(x,1)(m(x,T) )\|_{L^2(Q)}) \|m(x,0)\|_{H^{-1}(\Omega)}\\ &\leq C\|m\|_{L^2(Q)} \|m(x,0)\|_{H^{-1}(\Omega)}, \end{align*} which readily proves the statement of the lemma and competes its proof. \end{proof} By following a similar argument, one can show that \begin{lem}\label{prior estimate2} Suppose there is $M>\varepsilon>0$ such that $\varepsilon<F(x)<M$. Let $(u,m)\in \big(L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega)\big)\times H_{-}(Q)$ be a solution to the following system \begin{equation}\label{prior estimate 2} \begin{cases} u_t-\Delta u= F(x)m & \text{ in } Q,\medskip\\ -m_t-\Delta m-\Delta u=0 & \text{ in } Q,\medskip\\ u(x,t)=m(x,t)=0 & \text{ on } \Sigma,\medskip\\ u(x,0)= 0 & \text{ in } \Omega.\\ \end{cases} \end{equation} we have $$ \|m\|_{L^2(Q)}\leq C\|m(x,T)\|_{H^{-1}(\Omega)}.$$ \end{lem} \subsection{Construction of CGO solutions} In this subsection, we focus on constructing certain solutions of a special form for the linearized systems, which shall serve as the ``probing modes" for our inverse problem. Those special solutions are referred to as the CGO (complex geometric optic) solutions. Next, we consider the weighted Hilbert space $L^2(Q; e^{2\lambda^2t})$ and $L^2(Q; e^{-2\lambda^2t}) $ with the scalar products $$\left<u,v\right>_{\lambda^2}=\int_Q\quad u(x,t)v(x,t) e^{2\lambda^2t } \quad dxdt,$$ $$\left<u,v\right>_{-\lambda^2}=\int_Q\quad u(x,t)v(x,t) e^{-2\lambda^2t } \quad dxdt,$$ respectively. Similarly, we define $L^2(\Sigma; e^{2\lambda^2t})$ and $L^2(\Sigma; e^{-2\lambda^2t})$. \begin{thm}\label{construct CGO 1} Consider the system \begin{equation}\label{CGO} \begin{cases} -u_t-\Delta u= F(x)m & \text{ in } Q,\medskip\\ m_t-\Delta m-\Delta u=0 & \text{ in } Q,\medskip\\ m(x,t)=u(x,t)=0 & \text{ on } \Sigma,\medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)(m(x,T)) } & \text{ in } \Omega. \\ \end{cases} \end{equation} Assume that $F$ is uniformly bounded by $M>0$ and $G\in\mathcal{B}$. Let $\theta_{+}=1-e^{-\lambda^{3/4}t}$ where $\lambda$ is large enough (depending only on $M$ and $\Omega$). Then we have that: \begin{enumerate} \item[(i)] the system $\eqref{CGO}$ has a solution in the form $$(u,m)=\Big(u, e^{-\lambda^2t-\lambda\mathrm{i}x\cdot\xi}(\theta_{+} e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)}+ w_{+}(x,t) )\Big),$$ such that $\xi,\eta\in\mathbb{S}^{n-1}$, $\xi\cdot\eta=0$, $\tau\in\mathbb{R}$ and \begin{equation} \begin{aligned} &\lim\limits_{\lambda\to\infty} \|w_{+}\|_{L^2(Q)}=0. \end{aligned} \end{equation} Here and also in what follows, $\mathrm{i}:=\sqrt{-1}$ is the imaginary unit. \item[(ii)] the solution $(u,m)$ in (i) satisfies $u\in L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega))$ and $m(x,t)\in H_{+}.$ \end{enumerate} \end{thm} \begin{proof} (i).~Define $\rho=e^{-\lambda^2t- \lambda\mathrm{i}x\cdot\xi} w_{+}:=\psi w_{+}.$ It can be directly verified that $(u,\rho)$ is a solution of the following system: \begin{equation}\label{CGO'} \begin{cases} -u_t-\Delta u-F\rho = F(x)\theta_{+}\psi e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} & \text{ in } Q,\medskip\\ \rho_t-\Delta\rho-\Delta u=-\psi e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)}[\lambda^{3/4}e^{-\lambda^{3/4}t}-\mathrm{i}\theta_{+}\tau-\theta_{+}] & \text{ in } Q,\medskip\\ u(x,t)=0 & \text{ on } \Sigma,\medskip\\ \rho(x,t)=e^{-\lambda^2t-\lambda\mathrm{i}x\cdot\xi}(\theta_{+} e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} ) & \text{ on } \Sigma,\medskip\\ \displaystyle{u(x,T)= \frac{\delta G}{\delta m}(x,1)(m(x,T))} & \text{ in } \Omega. \\ \end{cases} \end{equation} Next, we define a map $\mathcal{M}$ form $ J:= \{\rho : e^{\lambda^2t}\rho\in H_{+}(\Omega) \} $ to itself as follows. Let $\rho\in J\subset L^2(Q; e^{2\lambda^2t}) $ and $u$ be the solution of the system: \begin{equation}\label{define M pre} \begin{cases} -u_t-\Delta u-F\rho = F(x)\theta_{+}\psi e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} & \text{ in } Q,\medskip\\ u(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)\Big(e^{-\lambda^2T-\lambda\mathrm{i}x\cdot\xi}\theta_{+}(T) e^{-\mathrm{i}(x,T)\cdot (\eta,\tau)}+ \rho(x,T)\Big)} & \text{ in } \Omega.\\ \end{cases} \end{equation} Then we consider the following system \begin{equation}\label{define M} \begin{cases} (\mathcal{M}(\rho))_t-\Delta(\mathcal{M}(\rho))-\Delta u=-\psi e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)}[\lambda^{3/4}e^{-\lambda^{3/4}t}-\mathrm{i}\theta_{+}\tau-\theta_{+}] & \text{ in } Q, \medskip\\ (\mathcal{M}(\rho))(x,t)= e^{-\lambda^2t-\lambda\mathrm{i}x\cdot\xi}(\theta_{+} e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} )& \text{ on } \Sigma. \\ \end{cases} \end{equation} Let \[ H(x,t)= -\psi e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)}[\lambda^{3/4}e^{-\lambda^{3/4}t}-\mathrm{i}\theta_{+}\tau-\theta_{+}], \] and \[ h(x,t)=e^{-\lambda^2t-\lambda\mathrm{i}x\cdot\xi}(\theta_{+} e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} ). \] Next, we define a map $\mathcal{L}:X \to \mathbb{R}$, where $$ X:=\{( -\p_t-\Delta)f : f(x,t)\in C_0^{2}(\overline{Q}) \},$$ by \[ \mathcal{L}(( -\p_t-\Delta)f)=\left< f,\Delta u+H(x,t) \right>_0-\left<\p_{\nu}f,h(x,t)\right>_0. \] By Lemma $\ref{Carleman 1}$, we have \begin{equation}\label{bouned map} \begin{aligned} &| \mathcal{L}(( -\p_t-\Delta)f)|\\ =&|\left< f,H(x,t) \right>-\left<\p_{\nu}f,h(x,t)\right>|\\ \leq& \|f\|_{L^2(Q;e^{-2\lambda^2t})}\|H\|_{L^2(Q;e^{2\lambda^2t}) }+ \|\Delta f\|_{L^2(Q;e^{-2\lambda^2t})}\| u\|_{L^2(Q;e^{2\lambda^2t}) }\\ &+\|\p_{\nu}f \|_{L^2(\Sigma;e^{-2\lambda^2t})} \|h\|_{L^2(\Sigma;e^{2\lambda^2t}) }\\ \leq & C \Big[\lambda^{-2}\|H\|_{L^2(Q;e^{2\lambda^2t}) }+\lambda^{-1}\|h\|_{L^2(\Sigma;e^{2\lambda^2t})}+\| u\|_{L^2(Q;e^{2\lambda^2t}) } \Big]\\ &\times \|( -\p_t-\Delta)f)\|_{L^2(Q;e^{-2\lambda^2t})}. \end{aligned} \end{equation} Notice that $\eqref{define M pre}$ can be rewritten as \begin{equation}\label{define M pre'} \begin{cases} -(e^{\lambda^2t}u)_t-\Delta(e^{\lambda^2t}u)+\lambda^2(e^{\lambda^2t}u)-F(\rho e^{\lambda^2t} )= F(x)\theta_{+}e^{- \lambda\mathrm{i}\xi\cdot x} e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} & \text{ in } Q,\medskip\\ (e^{\lambda^2t}u)(x,t)=0 & \text{ on } \Sigma,\medskip\\ \displaystyle{(e^{\lambda^2t}u)(x,T)=\frac{\delta G}{\delta m}(x,1)\Big( e^{- \lambda\mathrm{i}\xi\cdot x}e^{-\mathrm{i}(x,T)\cdot (\eta,\tau)}\theta_{+}(T) +e^{\lambda^2T}\rho(x,T) \Big)} & \text{ in } \Omega.\\ \end{cases} \end{equation} Considering the system \eqref{define M pre'} and letting $v= e^{\lambda^2t}u$, we have $ v\in L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega))$ and \begin{equation}\label{car corollary} \begin{aligned} \lambda^2 \int_{Q} v^2 dxdt &=\int_{Q} \quad \Big(F(\rho e^{\lambda^2t} )+F(x)\theta_{+}e^{- \lambda\mathrm{i}\xi\cdot x}e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} \Big)v + v_tv+\Delta v v \quad dxdt\\ &=\int_{Q} \Big(F(\rho e^{\lambda^2t} )+F(x)\theta_{+}e^{- \lambda\mathrm{i}\xi\cdot x}e^{-\mathrm{i}(x,t)\cdot (\eta,\tau)} \Big)v + \frac{1}{2}\frac{d}{dt}v^2-|\nabla v|^2 dxdt\\ &\leq \| F(\rho e^{\lambda^2t} )+F(x)\theta_{+}e^{- \lambda\mathrm{i}\xi\cdot x} \|_{L^2(Q)}\|v\|_{L^2(Q)}+\frac{1}{2}\int_{\Omega} v(x,T)^2 dx\\ &\leq C\|\rho\|_{L^2(Q;e^{2\lambda^2t })}. \end{aligned} \end{equation} Hence, \begin{equation}\label{app of u} \| u\|_{L^2(Q;e^{2\lambda^2t})}\leq C\lambda^{-2}\|\rho\|_{L^2(Q;e^{2\lambda^2t })}. \end{equation} It follows that $\mathcal{L}$ is bounded in $X$. By Hahn Banach's extension theorem, $\mathcal{L}$ can be extended to a bounded linear map on $L^2(Q; e^{-2\lambda^2t})$. Hence, there exisits $p\in L^2(Q; e^{2\lambda^2t})$ such that $$\left< f,\Delta u+H(x,t) \right>_0-\left<\p_{\nu}f,h(x,t)\right>_0= \left<( -\p_t-\Delta)f) ,p \right>_{0}, $$ and \begin{equation}\label{risze} \begin{aligned} \|p\|_{L^2(Q; e^{2\lambda^2t})} &\leq C \Big[\lambda^{-2}\|H\|_{L^2(Q;e^{2\lambda^2t}) }+\lambda^{-1}\|h\|_{L^2(\Sigma;e^{2\lambda^2t})}+\| u\|_{L^2(Q;e^{2\lambda^2t})} \Big]\\ &\leq C \Big[\lambda^{-1/2}+\lambda^{-1}+\| u\|_{L^2(Q;e^{2\lambda^2t})} \Big]. \end{aligned} \end{equation} Therefore, $p$ is a solution of $\eqref{define M}$ and we define $$\mathcal{M}(\rho)=p.$$ Now, $\mathcal{M}$ is a map from $J$ to itself. Let $\rho_1,\rho_2\in J$. By a subtraction, we have \begin{equation} \begin{cases} (\mathcal{M}(\rho_1)- \mathcal{M}(\rho_2))_t-\Delta(\mathcal{M}(\rho_1)- \mathcal{M}(\rho_2))-\Delta (u_1-u_2)=0 & \text{ in } Q,\medskip\\ (\mathcal{M}(\rho_1)- \mathcal{M}(\rho_2))(x,t)= 0& \text{ on } \Sigma,\\ \end{cases} \end{equation} where $u_1-u_2$ satisfies \begin{equation} \begin{cases} -(u_1-u_2)_t-\Delta (u_1-u_2)-F(\rho_1-\rho_2) =0 & \text{ in } Q,\medskip\\ (u_1-u_2)(x,t)=0 & \text{ on } \Sigma, \medskip\\ (u_1-u_2)(x,T)=(\rho_1-\rho_2) (x,T)) & \text{ in } \Omega.\\ \end{cases} \end{equation} Then by the definition of $\mathcal{M}(\rho_1)$, $\mathcal{M}(\rho_2)$ and simialr arguement above, we have for any $(-\p_t-\Delta)f\in X$, \begin{equation} \begin{aligned} &|\left< (-\p_t-\Delta)f, \mathcal{M}(\rho_1)- \mathcal{M}(\rho_2)\right>_0|\\ =&|\left<f,\Delta (u_1-u_2)\right>_0|\\ \leq& C \Big(\lambda^{-2}\|\rho_1-\rho_2\|_{L^2(Q; e^{2\lambda^2t}) } \Big)\|( -\p_t-\Delta)f)\|_{L^2(Q;e^{-2\lambda^2t})}. \end{aligned} \end{equation} It follows that \begin{equation}\label{final app} \|\mathcal{M}(\rho_1)- \mathcal{M}(\rho_2)\|_{L^2(Q; e^{2\lambda^2t})} \leq C \Big(\lambda^{-2}\|\rho_1-\rho_2\|_{L^2(Q; e^{2\lambda^2t}) } \Big). \end{equation} Therefore, $\mathcal{M}$ is a contraction mapping if $\lambda$ is large enough. By Banach's Fixed Point Theorem, there exists $\rho\in J$ such that $ \mathcal{M}(\rho)=\rho.$ Clearly, it is a solution of the system \eqref{CGO'}. Finally, by combining \eqref{risze} and \eqref{app of u}, we have that \begin{equation}\label{final app'} \|\rho\|_{L^2(Q; e^{2\lambda^2t})} \leq C \Big(\lambda^{-1/2}+\lambda^{-1} \Big)\frac{\lambda^2-C}{\lambda^2}. \end{equation} Therefore, it holds that \begin{equation*} \begin{aligned} &\lim\limits_{\lambda\to\infty} \|w_{+}\|_{L^2(Q)}=\lim\limits_{\lambda\to\infty}\|\rho\|_{L^2(Q; e^{2\lambda^2t})}= 0. \end{aligned} \end{equation*} \bigskip \noindent (ii).~Since $m\in H_{+}$ and $G\in \mathcal{B}$, we have $\frac{\delta G}{\delta m}(x,1)(m(x,T)) \in L^2(\Omega)$. Hence, $u\in L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega))$. The proof is complete. \end{proof} By following similar arguments in the previous theorem, we can derive a similar construction for the adjoint system of $\eqref{CGO}$. In fact, we have \begin{thm}\label{CGO2} Consider the system \begin{equation}\label{CGO-} \begin{cases} u_t-\Delta u= F(x)m & \text{ in } Q, \medskip\\ -m_t-\Delta m-\Delta u=0 & \text{ in } Q,\medskip\\ u(x,t)=m(x,t)=0 & \text{ on } \Sigma, \medskip\\ u(x,0)= 0& \text{ in } \Omega. \end{cases} \end{equation} Assume $F$ is uniformly bounded by $M>0$. Let $\theta_{-}=1-e^{-\lambda^{3/4}(T-t)}$. Then we have that \begin{enumerate} \item[(i)] the system $\eqref{CGO-}$ has a solution in the form $$(u,m)=\Big(u, e^{\lambda^2t+\lambda\mathrm{i}\xi\cdot x}(\theta_{-}e^{\mathrm{i}(x,t)(\eta,\tau)}+ w_{-}(x,t) )\Big),$$ such that $\xi,\eta\in\mathbb{S}^{n-1}$, $\xi\cdot\eta=0$, $\tau\in\mathbb{R}$ and \begin{equation} \begin{aligned} &\lim\limits_{\lambda\to\infty} \|w_{-}\|_{L^2(Q)}=0. \end{aligned} \end{equation} \item[(ii)] the solution $(u,m)$ in (i) satisfies $u\in L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega))$ and $m\in H_{-}(\Omega).$ \end{enumerate} \end{thm} Now we can construct a sequence of solutions $(u,m)\in \big(L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega)\big)\times H_{\pm}(Q)$ to the linearized systems. However, we need consider the forward problem in H\"older spaces. So, we need the following approximation property in the proof of Theorem $\ref{der F}$ in what follows. \begin{lem}\label{Runge approximation} Suppose there is $M>\varepsilon>0$ such that $\varepsilon<F(x)<M$. For any solution $ (u,m)\in \big(L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega)\big)\times H_{+}(Q)$ to \begin{equation}\label{Runge 1} \begin{cases} -u_t-\Delta u= F(x)m & \text{ in } Q,\medskip\\ m_t-\Delta m-\Delta u=0 & \text{ in } Q, \medskip\\ u(x,t)=m(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{u(x,T)=\frac{\delta G}{\delta m}(x,1)(m(x,T) ) } & \text{ in } \Omega,\\ \end{cases} \end{equation} and any $\eta>0$, there exists a solution $(\hat{u},\hat{m})\in\Big[ C^{1+\frac{\alpha}{2},2+\alpha}(\overline{Q})\Big]^2$ to $\eqref{Runge 1}$ such that $$\|m-\hat{m}\|_{L^2(Q)}\leq \eta. $$ \end{lem} \begin{proof} Let $ (u,m)\in \big(L^2(0,T; H_0^1(\Omega))\cap H^1(0,T;H^{-1}(\Omega)\big)\times H_{+}(Q)$ be a solution to the system $\eqref{Runge 1}.$ Let $\widetilde{m}(x)= m(x,0)$. Then by Theorem $\ref{construct CGO 1}$, we have $ \widetilde{m}(x)\in H^{-1}(\Omega).$ Since $C^{2+\alpha}(\Omega) $ is dense in $H^{-1}(\Omega)$, for any $\eta>0$, there exists $\widetilde{M}\in C^{2+\alpha}(\Omega)$ such that $ \|\widetilde{m}-\widetilde{M} \|_{H^{-1}(\Omega)}\leq\eta.$ Then by Lemma $\ref{linear app unique}$, there is a solution $(\hat{u},\hat{m})\in\Big[ C^{1+\frac{\alpha}{2},2+\alpha}(\overline{Q})\Big]^2$ to the PDE system: \begin{equation}\label{Runge 2} \begin{cases} -\hat{u}_t-\Delta \hat{u}= F(x)\hat{m} & \text{ in } Q,\medskip\\ \hat{m}_t-\Delta\hat{m}-\Delta\hat{u}=0 & \text{ in } Q, \medskip\\ \hat{u}(x,t)=\hat{m}(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{\hat{u}(x,T)=\frac{\delta G}{\delta m}(x,1) (\hat{m}(x,T) )} & \text{ in } \Omega,\medskip\\ \hat{m}(x,0)=\widetilde{M} & \text{ in } \Omega.\\ \end{cases} \end{equation} Then we have \begin{equation}\label{Runge 3} \begin{cases} -(u-\hat{u})_t-\Delta( u-\hat{u})= F(x)(m-\hat{m}) & \text{ in } Q, \medskip\\ (m-\hat{m})_t-\Delta(m-\hat{m})-\Delta( u-\hat{u})=0 & \text{ in } Q, \medskip\\ (u-\hat{u})(x,t)=(m-\hat{m})(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{(u-\hat{u})(x,T)=\frac{\delta G}{\delta m}(x,1) ((m-\hat{m})(x,T)) } & \text{ in } \Omega,\medskip\\ (m-\hat{m})(x,0)=\widetilde{m}-\widetilde{M} & \text{ in } \Omega.\\ \end{cases} \end{equation} Hence, by Lemma $\ref{prior estimate}$, $$ \|m-\hat{m}\|_{L^2(Q)}\leq C \|\widetilde{m}-\widetilde{M} \|_{H^{-1}(\Omega)}\leq C \eta,$$ which readily completes the proof. \end{proof} \section{Proof of Theorem ~\ref{der F}} Before the main proof, we present a key observation as a lemma first. \begin{lem}\label{key} Let $(v,\rho)$ be a solution of the following system: \begin{equation} \begin{cases} v_t-\Delta v= F(x)\rho & \text{ in } Q,\medskip\\ -\rho_t-\Delta \rho-\Delta v=0 & \text{ in } Q, \medskip\\ v(x,t)=\rho(x,t)=0 & \text{ on } \Sigma, \medskip\\ v(x,0)= 0& \text{ in } \Omega. \end{cases} \end{equation} Let $(\overline{u},\overline{m})$ satisfy \begin{equation} \begin{cases} -\overline{u}_t-\Delta \overline{u}- F_1(x)\overline{m}=(F_1-F_2)m_2 & \text{ in } Q, \medskip\\ \overline{m}_t-\Delta \overline{m}-\Delta \overline{u}=0 & \text{ in } Q, \medskip\\ \overline{u}(x,t)= \overline{m}(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{\overline{u}(x,T)=\frac{\delta G}{\delta m}(x,1)(\overline{m}(x,T))} &\text{ in } \Omega, \medskip\\ \overline{u}(x,0)=0 & \text{ in } \Omega, \medskip\\ \overline{m}(x,0)=\overline{m}(x,T)=0& \text{ in } \Omega\\ \end{cases} \end{equation} Then we have \begin{equation}\label{implies F_1=F_2} \int_Q \quad (F_1-F_2)m_2\rho \ dxdt=0. \end{equation} \end{lem} \begin{proof} Notice that \begin{equation}\label{IBP1} \begin{aligned} 0&=\int_Q (\overline{m}_t-\Delta \overline{m}-\Delta \overline{u} )\rho\ dxdt\\ &=\int_{\Omega}\overline{m}\rho\Big|_0^T dx-\int_Q\overline{m}\rho_t dxdt-\int_Q\rho(\Delta\overline{m}+\Delta\overline{u})\ dxdt\\ &=-\int_Q\overline{m}( -\Delta\rho-\Delta v)dxdt-\int_Q\rho( \Delta\overline{m}+\Delta\overline{u})\ dxdt\\ &=\int_Q (\overline{m}\Delta v-\rho\Delta\overline{u})\ dxdt. \end{aligned} \end{equation} Similarly, one can deduce that \begin{equation}\label{IBP2} \begin{aligned} 0&=\int_Q (\overline{m}_t-\Delta \overline{m}-\Delta \overline{u} )v\ dxdt\\ &=-\int_Q\overline{m}(\Delta v+F_1\rho)dxdt-\int_Q v(\Delta\overline{m}+\Delta\overline{u} )\ dxdt\\ &=-\int_Q(2\overline{m}\Delta v+F_1\rho\overline{m}+\overline{u} \Delta v)\ dxdt. \end{aligned} \end{equation} Then we have \begin{align*} \int_Q (F_1-F_2)m_2\rho dxdt&=\int_Q\quad \rho(-\overline{u}_t-\Delta\overline{u}-F_1\overline{m})\ dxdt\\ &=\int_{\Omega}\overline{u}\rho\Big|_0^T dx+\int_Q\quad (\rho_t\overline{u}-\rho\Delta\overline{u}-F_1\rho\overline{m})\ dxdt\\ &=\int_{\Omega} \overline{u}(x,T)\rho(x,T) dx+ \int_Q\quad (-\Delta\rho-\Delta v)-\rho\Delta\overline{u}-F_1\rho\overline{m}\ dxdt\\ &=\int_{\Omega} \frac{\delta G}{\delta m}(x,1)(\overline{m}(x,T))\rho(x,T) dx+ \int_Q\quad (-2\rho\Delta\overline{u}-\overline{u}\Delta v-F_1\rho\overline{m})\ dxdt\\ &=\int_Q\quad (-2\rho\Delta\overline{u}-\overline{u}\Delta v-F_1\rho\overline{m})\ dxdt, \end{align*} which in combination with $\eqref{IBP1}$ and $\eqref{IBP2}$ readily yields that $$ \int_Q \quad (F_1-F_2)m_2\rho \ dxdt=0.$$ The proof is complete. \end{proof} With all the preparations, we are in a position to present the proof of Theorem~\ref{der F}. \begin{proof}[ Proof of Theorem $\ref{der F}$ ] For $j=1,2$, let us consider \begin{equation}\label{MFG 1,2} \begin{cases} -u_t-\Delta u+\frac{1}{2}|\nabla u|^2= F_j(x,m) & \text{ in } Q',\medskip\\ m_t-\Delta m-\text{div} (m\nabla u)=0 & \text{ in } Q', \medskip\\ \p_{\nu}u(x,t)=\p_{\nu}m(x,t)=0 & \text{ on } \Sigma', \medskip\\ u(x,T)=G(x,m(x,T)) & \text{ in } \Omega',\medskip\\ m(x,0)=m_0(x) & \text{ in } \Omega'.\\ \end{cases} \end{equation} Next, we divide our proof into three steps. \bigskip \noindent {\bf Step I.}~First, we do the first order linearization to the MFG system \eqref{MFG 1,2} in $Q$ and can derive: \begin{equation}\label{linearization} \begin{cases} -\p_{t}u^{(1)}_j-\Delta u_j^{(1)}= F_j^{(1)}(x)m_j^{(1)} & \text{ in } Q, \medskip\\ \p_{t}m^{(1)} _j-\Delta m_j^{(1)} -\Delta u_j^{(1)}=0 & \text{ in } Q, \medskip\\ \displaystyle{u^{(1)}_j(x,T)=\frac{\delta G}{\delta m}(x,1)(m^{(1)}(x,T))} & \text{ in } \Omega,\medskip\\ m^{(1)} _j(x,0)=f_1(x) & \text{ in } \Omega.\\ \end{cases} \end{equation} Let $\overline{u}^{(1)}=u^{(1)}_1-u^{(1)}_2$ and $ \overline{m}^{(1)}=m^{(1)} _1-m^{(1)} _2. $ Let $(v,\rho)$ be a solution to the following system \begin{equation}\label{adjoint} \begin{cases} v_t-\Delta v= F^{(1)}_1(x)\rho & \text{ in } Q, \medskip\\ -\rho_t-\Delta \rho-\Delta v=0 & \text{ in } Q, \medskip\\ v(x,t)=\rho(x,t)=0 & \text{ on } \Sigma, \medskip\\ v(x,0)= 0& \text{ in } \Omega. \end{cases} \end{equation} Since $\mathcal{N}_{F_1}=\mathcal{N}_{F_2}$, by Lemma $\ref{key}$, we have \begin{equation}\label{implies to 0;1} \int_Q \quad( F^{(1)}_1(x)-F^{(1)}_2(x))m^{(1)} _2\rho \ dxdt =0, \end{equation} for all $ m^{(1)} _2\in C^{1+\frac{\alpha}{2},2+\alpha}(Q)$ with $m^{(1)} _2 $ being a solution to $\eqref{linearization}.$ By Theorems $\ref{construct CGO 1}$ and $\ref{CGO2}$, there exist $$ m^{(1)} _2(x,t;\lambda)\in H_{+}(Q), \rho(x,t;\lambda)\in H_{-}(Q)$$ in the form $$m^{(1)} _2= e^{-\lambda^2t-\lambda\mathrm{i}\xi_1\cdot x }(\theta_{+}e^{-\mathrm{i}(x,t)(\eta_1,\tau_1)}+ w_{+}(x,t;\lambda) ),$$ $$\rho=e^{\lambda^2t+\lambda\mathrm{i}\xi_2\cdot x}(\theta_{-}e^{ -\mathrm{i}(x,t)(\eta_2,\tau_2) }+ w_{-}(x,t;\lambda) ).$$ Furthermore, we have $$\lim\limits_{\lambda\to\infty}\|w_{\pm}(x,t;\lambda)\|=0.$$ Now, by Lemma $\ref{Runge approximation}$, there exist a sequence of solutions $\hat{m}_k\in C^{1+\frac{\alpha}{2},2+\alpha}(Q)$ to $\eqref{linearization}$ and $\hat{\rho}_k\in C^{1+\frac{\alpha}{2},2+\alpha}(Q)$ to $\eqref{adjoint}$ such that $$\lim\limits_{k\to\infty}\|\hat{m}_k-m^{(1)} _2(x,t;\lambda) \|_{L^2(Q)}=0,$$ $$\lim\limits_{k\to\infty}\|\hat{\rho}_k-\rho(x,t;\lambda) \|_{L^2(Q)}=0.$$ Therefore, $\eqref{implies to 0;1}$ implies that \begin{equation}\label{implies to 0;2} \int_Q \quad( F^{(1)}_1(x)-F^{(1)}_2(x)\hat{m}_k\rho \ dxdt =0, \end{equation} for all $k\in\mathbb{N}.$ Let $k,\lambda\to\infty$ in $\eqref{implies to 0;2}$, we have \begin{equation}\label{implies to 0;3} \int_Q \quad( F^{(1)}_1(x)-F^{(1)}_2(x))e^{-\mathrm{i}(\xi_1+\xi_2,\tau_1+\tau_2)\cdot (x,t)} \ dxdt =0, \end{equation} for all $\xi_1,\xi_2\in\mathbb{S}^{n-1}$ and $\tau_1,\tau_2\in\mathbb{R}.$ Hence, the Fourier transform of $ ( F^{(1)}_1(x)-F^{(1)}_2(x))$ vanishes in an open set of $\mathbb{R}^n$. Therefore, we have $$ F^{(1)}_1(x)=F^{(1)}_2(x):= F^{(1)}(x)$$ in $\Omega.$ \bigskip \noindent{\bf Step II.}~We proceed to consider the second linearization to the MFG system $\eqref{MFG 1,2}$ in $Q$ and can obtain for $j=1,2$: \begin{equation} \begin{cases} -\p_tu_j^{(1,2)}-\Delta u_j^{(1,2)}(x,t)+\nabla u_j^{(1)}\cdot \nabla u_j^{(2)}\medskip\\ \hspace*{3cm}= F^{(1)}(x,t)m_j^{(1,2)}+F^{(2)}(x,t)m_j^{(1)}m_j^{(2)} & \text{ in } \Omega\times(0,T),\medskip\\ \p_t m_j^{(1,2)}-\Delta m_j^{(1,2)}-\Delta u_j^{(1,2)}= {\rm div} (m_j^{(1)}\nabla u_j^{(2)})+{\rm div}(m_j^{(2)}\nabla u_j^{(1)}) ,&\text{ in } \Omega\times (0,T) \medskip\\ \displaystyle{u_j^{(1,2)}(x,T)=\frac{\delta G}{\delta m}(x,1)(m_j^{(1,2)}(x,T))+\frac{\delta^2 G}{\delta m^2}(x,1)(m_j^{(1)}m_j^{(2)}(x,T))} & \text{ in } \Omega,\medskip\\ m_j^{(1,2)}(x,0)=0 & \text{ in } \Omega.\\ \end{cases} \end{equation} By the proof in Step~I, we have $ (u_1^{(1)},m_1^{(1)})=( u_2^{(1)},m_2^{(1)})$. Define $\overline{u}^{(1,2)}=u_1^{(1,2)}-u_2^{(1,2)} $ and $\overline{m}^{(1,2)}=m_1^{(1,2)}-m_2^{(1,2)} $. Since $\mathcal{N}_{F_1}=\mathcal{N}_{F_2}$, we have \begin{equation} \begin{cases} -\overline{u}^{(1,2)}_t-\Delta \overline{u}^{(1,2)}- F_1(x)\overline{m}^{(1,2)}=(F^{(2)}_1-F^{(2)}_2)m_1^{(1)}m_2^{(1)} & \text{ in } Q\\ \overline{m}^{(1,2)}_t-\Delta \overline{m}^{(1,2)}-\Delta \overline{u}^{(1,2)}=0 & \text{ in } Q, \medskip\\ \overline{u}^{(1,2)}(x,t)= \overline{m}^{(1,2)}(x,t)=0 & \text{ on } \Sigma, \medskip\\ \displaystyle{\overline{u}^{(1,2)}(x,T)=\frac{\delta G}{\delta m}(x,1)(\overline{m}^{(1,2)}(x,T))} &\text{ in } \Omega, \medskip\\ \overline{u}^{(1,2)}(x,0)=0 & \text{ in } \Omega, \medskip\\ \overline{m}^{(1,2)}(x,0)=\overline{m}^{(1,2)}(x,T)=0& \text{ in } \Omega.\\ \end{cases} \end{equation} Let $(v,\rho)$ be a solution to the following system \begin{equation} \begin{cases} v_t-\Delta v= F^{(1)}_1(x)\rho & \text{ in } Q, \medskip\\ -\rho_t-\Delta \rho-\Delta v=0 & \text{ in } Q, \medskip\\ v(x,t)=\rho(x,t)=0 & \text{ on } \Sigma, \medskip\\ v(x,0)= 0& \text{ in } \Omega. \end{cases} \end{equation} By Lemma $\ref{key}$, we have \begin{equation} \int_Q \quad( F^{(2)}_1(x)-F^{(2)}_2(x))m^{(1)} _2m_2^{(1)} \rho \ dxdt =0. \end{equation} Next, by a similar argument in the proof of Step~I, we can derive that $( F^{(2)}_1(x)-F^{(2)}_2(x))m^{(1)} _2=0$ for all $m^{(1)} _2 $ as long as it is a solution to $\eqref{linearization}$. We choose $m^{(1)} _2(x,0)\in C^{2+\alpha}(\Omega)$ and it is positive in $\Omega $. Since $m$ satisfies \begin{equation} \begin{cases} \p_{t}m^{(1)} _2-\Delta m_2^{(1)} -\Delta u_2^{(1)}=0 & \text{ in } Q,\medskip\\ m^{(1)} _2(x,t)=0 & \text{ on } \Sigma, \medskip\\ m^{(1)} _2(x,0)>0& \text{ in } \Omega,\\ \end{cases} \end{equation} and $u\in C^{1+\frac{\alpha}{2},2+\alpha}(Q)$, we have $ m_2^{(1)}$ cannot be zero in any open set of $Q$. Therefore, we have $$ F^{(2)}_1(x)-F^{(2)}_2(x)=0. $$ \bigskip \noindent{Step~III.}~Finally, by mathematical induction and repeating similar arguments as those in the proofs of Steps I and II, one can show that $$F^{(k)}_1(x)-F^{(k)}_2(x)=0 ,$$ for all $k\in\mathbb{N}$. Hence, $F_1(x)=F_2(x).$ The proof is complete. \end{proof} \section*{Acknowledgements} The work of was supported by the Hong Kong RGC General Research Funds (projects 12302919, 12301420 and 11300821), the NSFC/RGC Joint Research Fund (project N\_CityU 101/21), and the France-Hong Kong ANR/RGC Joint Research Grant, A-CityU203/19.
1,116,691,501,411
arxiv
\section{Introduction} Weyl semimetals (WSMs) have created vast interest in recent years due to their novel electronic and transport properties \cite{hu2019transport,RevModPhys.90.015001}, such as very high electron mobilities \cite{shekhar2015extremely}, Fermi arcs on the surface \cite{wan2011topological,jia2016weyl}, extremely large magnetoresistance \cite{son2013chiral,shekhar2015extremely}, anomalous Hall effect \cite{burkov2014anomalous,Shekhar9140}, and the anomalous Nernst effect \cite{ikhlas2017large,sakai2018giant}. WSMs also exhibit unconventional optical properties, such as large and quantized photo-currents \cite{taguchi2016photovoltaic,chan2017photocurrents,de2017quantized,osterhoudt2017colossal}, second-harmonic generation \cite{morimoto2016topological,wu2017giant}, and Kerr rotation \cite{feng2015large,higo2018large}. These properties can lead to more efficient electronic and photonic applications \cite{hu2019transport}. WSMs are an especial class of the topological materials, characterized by the crossings of singly degenerate energy bands near the Fermi energy, leading to the formation of pairs of Weyl nodes \cite{Xu15,Xu2015,hasan17}. WSMs provide a platform for manipulating and understanding the physics of the chiral Weyl fermions \cite{huang2015observation,yan2017topological}. Inversion symmetry (IS) or time-reversal symmetry (TRS) must be broken to obtain Weyl nodes/WSMs \cite{RevModPhys.90.015001,yan2017topological,AJR2P}. WSMs with broken IS have been investigated extensively \cite{Xu15,Xu2015,shekhar2015extremely}, while WSMs with broken TRS, known as magnetic WSMs, were recently discovered in experiments\cite{ikhlas2017large,belopolski2019discovery}. Magnetic WSMs created much interest because, in this class of WSMs, the properties can be manipulated using a magnetic field as an external degree of freedom. Heusler alloys have emerged as an important class of materials to investigate the Weyl physics and its consequences \cite{liu2017nonmagnetic,belopolski2019discovery,manna2018colossal,ernst2019anomalous,dulal2019weak,nakajima2015topological,kim2018beyond,Shekhar9140}. In Heusler compounds, we either look at half-Heusler or inverse Heusler compounds (IS breaking) \cite{liu2017nonmagnetic}, at magnetic compounds (TRS breaking) \cite{belopolski2019discovery,manna2018colossal,ernst2019anomalous,dulal2019weak}, or compounds with both IS and TRS breaking \cite{nakajima2015topological,kim2018beyond,Shekhar9140}. In most magnetic Heuslers, the magnetization direction can be changed quite easily. Since the location of Weyl nodes in the momentum space depends on the direction of magnetization, Heusler compounds can prove to be useful to understand the physics of Weyl fermions. Combined with their extensive tunability, Heusler WSMs are a promising platform for practical topological applications \cite{hu2019transport,RevModPhys.90.015001}. Co\textsubscript{2}MnGa has been theoretically predicted and experimentally proven to be a WSM. The Weyl nodes lie close to the Fermi energy \cite{belopolski2019discovery,chang2017topological,belopolski2019discovery}, and transport measurements have shown large anomalous Hall and Nernst effects \cite{sakai2018giant,reichlova2018large,guin2019anomalous,takashi2019signs,markou2019thickness,park2020thickness}. Besides Co$_2$MnGa, other Heusler compounds have also been predicted to be WSM \cite{kubler2016weyl,chang2016room,wang2016time,chadov2017stability}. Although, for most of the proposed Heusler WSMs, the Weyl nodes lie away from the Fermi energy, which reduces the topological properties of these materials \cite{kubler2016weyl,chang2016room,wang2016time,ernst2019anomalous,chadov2017stability,manna2018colossal}. By tuning the Fermi energy, it can coincide with the energy of the nodes, which can significantly improve the properties \cite{wang2016time,kushwaha2018magnetic,yang2019magnetic}. Co\textsubscript{2}TiSn, a Heusler compound that has a high Curie temperature and shows half-metallic behavior \cite{barth2010itinerant,barth2011anomalous,ooka2016magnetization,bainsla2016spin,shigeta2018pressure}, has been proposed as a WSM candidate \cite{chang2016room,wang2016time,ernst2019anomalous}. Co\textsubscript{2}TiSn has 26 valence electrons and has Weyl nodes with chemical potential a few hundred meV above the Fermi energy \cite{chang2016room,wang2016time,ernst2019anomalous}. ~The number of valence electrons must be increased to make the nodes chemical potential coincide with the Fermi energy \cite{ernst2019anomalous}. To achieve this, Ti, which has 2 electrons in its valence 3\textit{d} orbital, can be substituted with V, which has 3 electrons in the 3\textit{d} orbital. Wang et al. suggested that the doping of 10\% V in place of Ti (i.e., Co$_2$Ti$_{0.9}$V$_{0.1}$Ga) to obtain the WSM phase \cite{wang2016time}. However, efforts to synthesize thin films Co$_2$Ti$_{0.9}$V$_{0.1}$Sn have not been successful, and the most stable composition was found to be Co$_2$Ti$_{0.6}$V$_{0.4}$Sn (i.e., 40\% V doped) \cite{hu2019unconventional}. Transport measurements for Co\textsubscript{2}TiSn and Co\textsubscript{2}Ti\textsubscript{0.6}V\textsubscript{0.4}Sn thin films show anomalous Hall and Nernst effects \cite{hu2018anomalous,hu2019unconventional}. Interestingly, the anomalous Nernst angle and coefficient for the doped compound (i.e., Co$_2$Ti$_{0.6}$V$_{0.4}$Sn) were significantly higher than the undoped one \cite{hu2018anomalous}. In this manuscript, we investigated the effect of V doping on the electronic structure of Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn (x = 0.0, 0.2, 0.4, 0.6, 0.8 and 1.0). The position of nodal lines, responsible for Weyl node formation in Co$_2$TiSn, changes as a function of V substitution in place of Ti. With increasing V in place of Ti, nodal lines and Fermi arcs get shifted towards the Fermi level close to the mid composition (i.e Co$_2$Ti$_{0.6}$V$_{0.4}$Sn). The intrinsic anomalous Hall conductivity obtained from the theory for the 50\% V doped composition (Co$_2$Ti$_{0.5}$V$_{0.5}$Sn) is nearly twice as compared to the undoped composition, due to the existence of the Fermi arcs very close ($\approx$ 7-23 meV) to the Fermi energy. \par \section{Methods} The calculations were performed using the full-potential Korringa$–$Kohn$–$Rostoker (KKR) Green's function method, as implemented in the SPRKKR package \cite{ebert2005munich} and pseudo-potential based density functional theory (DFT) as implemented in Quantum ESPRESSO (QE)\cite{giannozzi2009quantum} and Vienna ab-initio simulation package (VASP)\cite{kresse1993ab, kresse1996efficient}. The exchange-correlation potential is approximated through PBE-GGA functional \cite{perdew1996generalized}. A k-mesh consisting of $(22 \times 22 \times 22)$ k-points was used, and the angular momentum cut-off number was chosen to be $l_{max} = 3$. The Fermi energies were determined using the Lloyd formula \cite{ebert2011calculating}. For the calculations of the Bloch spectral functions spin-orbit coupling (SOC) has been taken into account. The band structures were calculated for both the spin-polarized, non spin-orbit coupling case as well as the case with spin-orbit coupling, taken as the a scalar relativistic correction to the original Hamiltonian. The ground state energies obtained when the magnetization quantization directions are kept in the [001], [110] and [111] directions are found to be the same within the limits of the methodology, indicating that the magnetization directions, and therefore the positions of the Weyl nodes, can be changed easily. This matches with the literature on Co$_2$TiSn, Co$_2$VSn and similar Heusler compounds \cite{chang2016room,wang2016time}. The disorder was taken into account through the coherent potential approximation (CPA) in the SPR-KKR calculations \cite{soven1967coherent,soven1969contribution}. Optimized pseudopotentials \cite{hamann2013optimized} are used in the calculations and the kinetic energy cutoff for the planewave is taken as 80 $Ry$. The electronic integration over the Brillouin zone (BZ) is approximated by the Gaussian smearing of 0.005 $Ry$ both for the self-consistent (sc) and non-self-consistent (nsc) calculations. The Monkhorst-Pack \textbf{k}-grid of $8\times8\times8$ are considered for the Brillouin zone integration for the DFT band structure calculations. The Wannier interpolated bands, the anomalous Hall conductivity (AHC), normalized Berry curvature and the Fermi arcs were calculated using Wannier90 \cite{marzari1997maximally, souza2001maximally, pizzi2020wannier90} and WannierTool \cite{wu2018wanniertools} starting from the the plane wave based pseudo potential DFT band structures. The transition metal $d$-orbitals are used in the energy selective downfolding projections for the wannier90 calculations. The AHC calculation is carried out with a dense \textbf{k}-grid of $75\times75\times75$. Further, through the adaptive refinement technique a fine mesh of $5\times5\times5$ is added around the points wherever the mod of the Berry curvature ($\abs{\Omega(\textbf{k})}$) exceeds 100 Bohr$^2$. All the calculated structures are optimized with tight convergence threshold both for the energy ($10^{-10} Ry$) and Feynman Hellman force ($10^{-10} Ry/Bohr$). The self consistent calculations are converged with the energy cut-off of $10^{-8} Ry$. \par \section{Results and Discussion} \subsection{Optimisation of lattice parameters} \begin{figure}[t] \includegraphics[width=\linewidth]{figure-new-1.png} \caption{Total energy vs. volume of the unit cell for Co$_2$Ti$_{1-x}$V$_{x}$Sn (x = 0.0, 0.2, 0.4, 0.6, 0.8, 1.0). The Birch-Murnaghan equation of state is used to determine the equilibrium values \cite{birch1947finite}.} \label{fig:figure-1} \end{figure} The lattice parameters for all the compositions were obtained by varying the parameters and calculating the respective ground state energies. The equilibrium values were found using the Birch-Murnaghan equation of state \cite{birch1947finite} fit for the total energy as a function of the unit cell volume. The plots of total energy vs. the unit cell volume are shown in Fig. \ref{fig:figure-1}. The calculated lattice parameters values, 6.104 \AA (for x=0.0), 6.087 \AA (for x=0.2), 6.083 \AA (for x=0.4), 6.065 \AA (for x=0.6), 6.050 \AA (for x=0.8)~and 6.04 \AA (for x=1.0) are in well agreement with the experimentally reported lattice parameters 6.076\AA, 6.051 \AA, 6.040 \AA, 6.034\AA, 6.014\AA, 5.98 \AA~for x = 0.0, 0.2, 0.4, 0.6, 0.8, and 1.0 respectively, which follow similar trend with change in the composition\cite{dunlap1982conduction,pendl1996investigation}. \par \begin{figure}[htbp] \includegraphics[width=1\linewidth]{figure-new-2.png} \caption{(a)Crystal structure of Co\textsubscript{2}TiSn. Co atoms are represented by green spheres, Ti atoms by blue spheres, and Sn atoms by yellow spheres. (b) Brillouin zone showing the high-symmetry points and the k-path followed in the band structures for the primitive unit cell. (c),(e) Calculated band structures of Co\textsubscript{2}TiSn and Co\textsubscript{2}VSn with spin-orbit coupling in the [110] quantization direction. (d),(f)Calculated band structures of Co\textsubscript{2}TiSn and Co\textsubscript{2}VSn with spin-orbit coupling in the [001] quantization direction. The red circles indicate the location of the crossings in the Co-Y (Y = Ti, V) hybridized 3d bands.} \label{fig:figure-2} \end{figure} \subsection{Band structure calculations for stoichiometric Co\textsubscript{2}TiSn and Co\textsubscript{2}VSn Heusler compounds} The band structures calculated using plane wave based pseudo-potential for the end (stoichiometric) compositions, with spin-orbit coupling (SOC), are shown in Fig. \ref{fig:figure-2}. The band structures for both the stoichiometric compositions, Co$_2$TiSn and Co$_2$VSn, have been calculated using the Heusler (L2$_1$) cubic structure with space group $Fm\Bar{3}m$. In the conventional unit cell of this structure, Co occupies the 8c (1/4, 1/4, 1/4) Wyckoff position, Ti (or V) occupies 4b (1/2, 1/2, 1/2), and Sn is at 4a (0, 0, 0), as shown in Fig. \ref{fig:figure-2}(a) for Co$_2$TiSn as an example. \begin{figure*}[t] \includegraphics[width=\linewidth]{figure-new-3.png} \caption{Bloch spectral function plots of Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn for the primitive unit cell. The majority and minority spin states are represented by the blue and red lines, respectively. The nodal line can be seen shifting downwards with respect to the Fermi level as the concentration of V increases, tuning with the Fermi level at x = 0.2 and x = 0.4.} \label{fig:figure-3} \end{figure*} Fig. \ref{fig:figure-2} shows the spin-orbit coupled band structures along the high symmetry lines in the BZ for Co$_2$TiSn and Co$_2$VSn with magnetization oriented along the [110] and [001] directions. Red circles mark the nodal lines of interest which may form the Weyl nodes. An analysis of the hybridisation and symmetry of these nodal lines is detailed in the appendix. In Co$_2$TiSn, the nodal line remains unaffected when the magnetization is oriented in the [001] direction (Fig. \ref{fig:figure-2}(d)), but very slight gaps occur when it is oriented in the [110] direction (Fig. \ref{fig:figure-2}(c)). In the band structure for the [110] magnetization direction (Fig. \ref{fig:figure-2}(c)), the nodal line crossing along $\Gamma-W$ has a very small gap, not visible in the figure, while the crossings along $\Gamma-X$ and $\Gamma-K$ remain unaffected. The crossings of the nodal line bands with the surrounding band along $\Gamma-W$ and $\Gamma-K$ also have gaps, according to the magnetization direction, as can be seen next to the red circles in the figures (Fig. \ref{fig:figure-2}(c) and \ref{fig:figure-2}(d)). In Co$_2$VSn, these nodal lines lie entirely below the Fermi energy, as seen in Fig. \ref{fig:figure-2}(e) and \ref{fig:figure-2}(f). \subsection{Bloch spectral functions for Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn} For disordered compounds, it is difficult to determine the $E$ versus $\textbf{k}$ dispersion relations using methods for periodic ordered systems, such as the plane wave pseudo potential based DFT calculations. One approach is to construct a supercell to add the substituted element in the required ratio and calculate the dispersion relations. However, this results in complex band structures with additional bands due to symmetry. These supercell bands can then be unfolded to get an effective band structure\cite{popescu2010effective,popescu2012extracting}. Using this approach to calculate the relations for a range of compositions is cumbersome and is feasible only for specific compositions. Here, we use Bloch spectral functions to represent the electronic structure. The Bloch spectral function $A_{B}(\mathbf{k},E)$, defined as the Fourier transform of the Green's function $G(\mathbf{r},\mathbf{r'},E)$, can be written as \begin{multline} A_{B}(\mathbf{k},E) = - \frac{1}{\pi N} \Im Tr \lbrace \sum_{n,n'}^{N} e^{i\mathbf{k}(\mathbf{R_{n}}-\mathbf{R_{n'}})} \\ \times \int_{\Omega} d^{3}r G(\mathbf{r}+\mathbf{R_{n}},\mathbf{r}+\mathbf{R_{n'}},E) \rbrace \end{multline} This function can be interpreted as the $\textbf{k}$-resolved density of states \cite{ebert2011calculating}. The Bloch spectral function (BSF) plots are shown in Fig. \ref{fig:figure-3}. The nodal line can be seen shifting downward with respect to the Fermi energy as the concentration of V increases. At x = 0.4, the point of highest energy of the nodal line tunes with the Fermi energy, along $\Gamma-K$. This has been also seen in the Fermi surface plots (Fig. \ref{fig:figure-4}) where for $x = 0.4$, the two bands touch at the extremities, along $\Gamma-K$. For higher V doped composition e.g. x= 0.6, the nodal lines lie entirely below the Fermi energy.This means that for all compositions having V concentration in the range of $0.6 \leq$x$ \leq1$, the nodal lines lie entirely below the Fermi level. A distinct feature of the BSF plots of the substituted compounds are the broadening of the majority bands. This broadening occurs in the energy range -0.5 eV to 2.0 eV. These are the same Co-X (X = Ti, V) hybridized 3\textit{d} bands which form the nodal lines of interest. In the substituted compounds, the bands in this energy range are primarily formed by the 3\textit{d} states on the 4b Wyckoff position, which contain the Ti and V atoms, and have a negligible contribution from Co atoms. The Co atoms in the substituted compounds have more contribution in the states below the nodal lines. The broadening in the bands occurs due to the randomly substituted additional 3\textit{d} electron of the V atom. Since the band corresponding to the additional electron has a different energy, random fluctuations are induced in the energy window of the 3\textit{d} states. The new electronic state manifests as an intermediate state, which can be seen becoming more well defined as the concentration of V increases. \subsection {Fermi surface} \begin{figure*}[htb] \includegraphics[width=1\linewidth]{figure-new-4.png} \caption{Fermi surface plots of Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn with chemical potential set at the Fermi energy for the spin polarized calculations without SOC}. Majority spin states are presented in blue. Since all the compositions are half-metallic, there are no minority spin states at the Fermi level. On the right is a diagram of the Brillouin zone (BZ) in the conventional unit cell setting showing the high-symmetry lines and points, along with the Fermi surface cross-section (cut through the BZ at $k_z = 0$). The dotted shaded square region shows the first BZ. \label{fig:figure-4} \end{figure*} The evolution of the Fermi surface with respect to the V concentration can be seen in Fig. \ref{fig:figure-4}. These calculations were done by using the primitive unit cell, however, the data is represented in the conventional settings ($k_x$, $k_y$, $k_z$ axis) of the BZ for better presentation. The Fermi surface plots show the electronic states in the $xy$-plane of the BZ, at $k_z = 0$, lying on the Fermi energy. On the right is a schematic diagram showing the high-symmetry points in the Brillouin zone and a cross-section of the Brillouin zone at $k_z = 0$. The intermediate state formed by the addition of the 3\textit{d} V electron can be seen becoming more well-defined along the $X-\Gamma$ and $W-K$ directions as the concentration of V increases. The bands which form the nodal lines can be seen clearly in the Fermi surface plots; one has a Fermi surface around the $\Gamma$ point and the other around the $K$ point. At $x = 0.2$, the nodal line tunes with the Fermi energy along $\Gamma-W$, as can be seen in both the BSF and Fermi surface plots.At $x = 0.4$, the Fermi surface plot shows the two bands touching at the extremities, along $\Gamma-K$. For $0.6 \leq$x $\leq1$, the nodal lines lie entirely below the Fermi energy. \subsection{Density of states and magnetization} \begin{figure*}[t] \includegraphics[width=\linewidth]{figure-new-5.png} \caption{Density of states of Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn (x=0.0, 0.2, 0.4, 0.6, 0.8 and 1.0). For each compound, the positive y-axis represents the density of the majority spin states, and the negative y-axis represents the minority spin states.} \label{fig:figure-5} \end{figure*} The stoichiometric compounds Co$_2$TiSn and Co$_2$VSn have been reported as half-metallic ferromagnets \cite{doi:10.1063/1.1853899,hickey2006Fermi,kandpal2007calculated,barth2010itinerant,aguayo2011density}. To investigate the half- metallic character as a function of chemical disorder, we performed density of states (DOS) calculations as a function of x in Co \textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn. The evolution of the density of states (DOS) is given in Fig. \ref{fig:figure-5}. The Fermi energy always lies in the minority spin band gap for all compositions, indicating half-metallic behavior throughout. The magnitude of the gap increases slightly with the increase in V concentration, and the gap for the stoichiometric compounds is in good agreement with the literature. The band gap ($\Delta$E) for compositions x=0.0, 0.2, 0.4, 0.6, 0.8 and 1.0 is found to be 0.491 eV, 0.505 eV, 0.518 eV, 0.518 eV, 0.532 eV and 0.546 eV, respectively. This reveals that the $\Delta$E is slightly increasing with increasing V substitution in place of Ti. Having obtained the evaluation of nodal lines and half-metallic character, we turn our discussion regarding the magnetic behavior of Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn alloys. The magnetic moments calculated for all the compounds are given in Table \ref{table:table-1}. Interestingly the magnetic moment of Co increases initially and after that, it starts decreasing, which is in very well agreement with the experimental findings (Table \ref{table:table-1}) \cite{dunlap1982conduction, pendl1996investigation}. The magnetic moment of Co is oriented antiparallel with the Ti moment and parallel with the V moment. Ti has a low magnetic moment of around 0.1 $\mu_{B}$ per atom, while V has a higher moment ranging from 0.73 to 0.88 $\mu_{B}$ per atom. The total magnetic moment per formula unit obtained here shows an increasing trend with V substitution and found 3$\mu_{B} $, which is well agreement with previous reported literature\cite{shukla2020destruction}. Thus our all results are in accordance with the observed behavior in the experiments and explain the unusual large anomalous Nernst coefficient for the Co$_2$Ti$_{0.6} $V$_{0.4}$Sn as compared to the stoichiometric compound Co$_2$TiSn \cite{hu2018anomalous}. \begin{table}[htbp] \centering \begin{tabular}{ c c c c c c c c c } \toprule \multirow{2}{*}{} & Experimental ($\mu_{B}$) & \multicolumn{7}{c}{Calculated ($\mu_{B}$)} \\ $x$ & $\mu_{Co}$ \cite{pendl1996investigation} & $\mu_{Co}$ && $\mu_{Ti}$ && $\mu_{V}$ && $\mu_{total}$ \\ \midrule $0.0$ & 0.98 & 1.078 && -0.117 && - && 2.03 \\ $0.2$ & 0.98 & 1.097 && -0.116 && 0.719 && 2.23 \\ $0.4$ & 1.07 & 1.097 && -0.126 && 0.830 && 2.43 \\ $0.6$ & 0.91 & 1.091 && -0.132 && 0.873 && 2.63 \\ $0.8$ & 0.88 & 1.089 && -0.145 && 0.878 && 2.82 \\ $1.0$ & 0.60 & 1.094 && - && 0.851 && 3.00 \\ \bottomrule \end{tabular} \caption{Magnetic moments for Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn (x=0.0, 0.2, 0.4, 0.6, 0.8 and 1.0). The moments for individual atoms are given in units of $\mu_{B}$ per atom, and the total moment is given in $\mu_{B}$ per formula unit.} \label{table:table-1} \end{table} \subsection{Weyl nodes, Fermi arcs and Berry curvatures} \begin{figure*}[t] \includegraphics[width=0.7\linewidth]{figure-new-6.png} \caption{Calculated Weyl nodes and Fermi arcs for the are shown for Co$_2$TiSn with chemical potential 278 mev above the Fermi energy. (a) shows the Weyl nodes of two opposite Chern numbers (WP+ and WP-) connected by Fermi arcs in the momentum space in the primitive Brillouin zone (shown in inset)}. (b) Normalized Berry curvatures show source and sink type of flux and the flow chart of the average position of Wannier charge centers (WCC) obtained by the Wilson-loop method applied on a sphere that encloses the two nodes of opposite chirality. \label{fig:fermi-arcTi} \end{figure*} \begin{figure*}[t] \includegraphics[width=0.7\linewidth]{figure-new-7.png} \caption{Calculated Weyl nodes and Fermi arcs are shown for Co$_2$VSn with chemical potential 227 mev below the Fermi energy. (a) shows the Weyl nodes of two opposite Chern numbers (WP+ and WP-) connected by Fermi arcs in the momentum space in the primitive BZ}. (b) Normalized Berry curvatures show source and sink type of flux and the flow chart of the average position of Wannier charge centers (WCC) obtained by the Wilson-loop method applied on a sphere that encloses the two nodes of opposite chirality. \label{fig:fermi-arcV} \end{figure*} We combined the Wannier function basis set based technique starting from pseudo potential plane wave DFT band structure using the energy selective downfolding methodology to extract the Fermi arcs and the normalized Berry curvature. We have used the primitive cell, containing one formula unit of the atoms, for this calculations for the sake of simplicity. The calculated pairs of Weyl nodes with opposite chirality are shown in the table \ref{table:table-2}. To understand the texture of the Weyl points and nodal lines in details, we have plotted these in the Fig.\ref{fig:fermi-arcTi}-\ref{fig:fermi-arcTiV}. The Fig.\ref{fig:fermi-arcTi}(a) shows the Fermi-arc of the Co$_2$TiSn, calculated with chemical potential 278 meV above the Fermi energy. From this figure, it is clearly evident that a pair of Weyl nodes of opposite chirality (+1 and -1) designated by WP+ and WP- respectively, located in the momentum space (-0.274, -0.157, -0.448) and (0.274, 0.157, 0.448) respectively. To clarify the nature of the above mentioned Weyl nodes, we have plotted the normalized Berry curvatures (Fig.\ref{fig:fermi-arcTi}(b)). \begin{figure*}[t] \includegraphics[width=0.7\linewidth]{figure-new-8.png} \caption{Calculated Weyl nodes and Fermi arcs are shown for the Co$_2$Ti$_{0.5}$V$_{0.5}$Sn in the twice supercell in primitive setting}. (a) shows the two equivalent pairs of Weyl nodes with two opposite charality [WP1+ (WP2+) and WP1- (WP2-)] of +1 and -1 respectively. Insets show the WCC for the two paris separately, running in the opposite direction in the K-space.(b) and (c) show the normalized Berry curvatures for opposite chirality (source and sink type of flux) for WP1 and WP2 respectively. \label{fig:fermi-arcTiV} \end{figure*} The normalized Berry curvatures indicate that the flux at these two Weyl points (WP) are opposite in chiral nature, WP$-$ is sink type, where the flux is moving inward whereas, the WP$+$ is more like source type, where the flux comes outward from the point. These analysis confirm the opposite chiral nature of these two Weyl nodes. Moreover, we also calculated the flow chart of the average position of Wannier charge centers (WCC) obtained by the Wilson-loop method applied on a sphere that encloses these two nodes of opposite chirality and we found that the WCC is moving in the two opposite directions, one is from south to north (WP-) and another is from north to south (WP+), confirming the opposite chiral nature. We did the similar exercise for the Co$_2$VSn as shown in Fig.\ref{fig:fermi-arcV}. The Co$_2$VSn Weyl nodes are shown for the chemical potential 227 meV below the Fermi energy and we found that the WP of opposite chiralities are located in the (-0.273,-0.150,-0.441) and (0.273,0.150,0.441) with Chern numbers -1 and +1 respectively, as shown in the bottom panel of Fig.\ref{fig:fermi-arcV}(b). Point to be noted that, the main features of the Fermi arcs and the normalized Berry curvatures of Co$_2$VSn and Co$_2$TiSn are very similar, which is consistent with the result found from the band structure calculations, which show very similar energy dispersion. The main difference is that the chemical potential for the Fermi arcs, is above and below of the Fermi energy for the Co$_2$TiSn and Co$_2$VSn, respectively. \begin{table}[htbp] \centering \resizebox{0.47\textwidth}{!}{% \begin{tabular}{ c | c c c | c | c } \toprule \multirow{3}{*}{} & \multicolumn{3}{c}{Coordinates} & Chemical & Chern \\ Compound & \multicolumn{3}{c}{in K-space} & Potential & number \\ & \multicolumn{3}{c}{} & (meV) & \\ \midrule Co$_2$TiSn & -0.270 & -0.167 & -0.448 & 278 & -1 (WP-) \\ & 0.274 & 0.157 & 0.448 & 278 & +1 (WP+)\\ \midrule Co$_2$Ti$_{0.5}$V$_{0.5}$Sn & -0.083 & -0.368 & 0.223 & 7 & -1 (WP1-) \\ & 0.142 & -0.160 & 0.345 & 18 & +1 (WP1+)\\ & 0.095 & 0.180 & -0.406 & 14 & -1 (WP2-)\\ & 0.035 & 0.371 & -0.217 & -23 & +1 (WP2+)\\ \midrule Co$_2$VSn & -0.273 & -0.150 & -0.441 & -227 & -1 (WP-) \\ & 0.273 & 0.150 & 0.441 & -227 & +1 (WP+)\\ \bottomrule \end{tabular}} \caption{Representative coordinates in the momentum space in the primitive unit cell setting of the Weyl nodes with their chemical potentials with respect to the Fermi energy together with corresponding Chern number. The Co$_2$TiSn and Co$_2$VSn calculations are done with the primitive cell contaning one fourmula unit of atoms, while the Co$_2$Ti$_{0.5}$V$_{0.5}$Sn calculations are done with the two times supercell of the primitive cell to accommodate $50\%$ doping level.} \label{table:table-2} \end{table} In the $50\%$ V doped compound, due to the reduction of some symmetry elements due to V substitution within the same crystal structure, the WP pairs do not appear symmetrically in the momentum space, as highlighted in the table \ref{table:table-2}. It should be noted that, due to the two times supercell of the primitive cell is being used to accommodate $50\%$ doping in the DFT band structure calculations followed by Wannier interpolation, the number of bands and the Weyl points band crossing near the Fermi energy is also twice compared to that of the undoped compounds. However, if we consider any one pair of the Weyl nodes, say WP1$\pm$, the main features of the Fermi arcs are very similar to that of the two stoichiometric compositions, which ensures the consistency of the calculations. In the Fig.\ref{fig:fermi-arcTiV}(a), we have shown two pairs of Weyl nodes, designated by WP1 and WP2, and the connected nodal lines between the opposite chiral WP1 (and WP2) points having Chern number +1 and -1, designated by WP1+ (WP2+) and WP1- (WP2-) respectively, which are appearing very close to the Fermi energy. The Fig.\ref{fig:fermi-arcTiV}(b)-(c) show the normalized Berry curvatures of two opposite chirality Weyl points for the both WP1 and WP2, respectively. Important to note that, for the doped compound the Weyl points are appearing very close to the Fermi energy, however not exactly at the same chemical potentials, resulting the Fermi arcs connected through these Weyl points forming some sort of nodal lines (as evident from the Fig.\ref{fig:fermi-arcTiV}(a)). The chemical potentials of the corresponding Weyl points were mentioned in the table \ref{table:table-2}. The nature of the two pairs of the Weyl nodes are clear from the sink and source type of normalized Berry curvatures and the oppositely running WCC as shown in the inset of Fig.\ref{fig:fermi-arcTiV}(a). From these analysis we showed that via chemical doping we can tune the nodal line position in the energy spectrum as well as the nature of the Fermi arcs. \subsection{Anomalous Hall conductivity} The anomalous Hall effect/conductivity is direct consequence of the Berry curvature of the electronic band structure near the Fermi level, which act as pseudo-magnetic field in momentum space.The Berry curvature near the Fermi level introduces a transverse momentum in electron motion and derives large anomalous Hall conductivity(AHC). So in our case the middle composition is interest of research due to the fact that Weyl nodes are situated near the Fermi level.To substantiate our compositional dependent theoretical analysis on variation of position of Weyl point in band structure, we theoretically calculated the AHC with the expectation that large AHC should exhibit around the mid composition. To calculate the intrinsic anomalous Hall conductivity (AHC) for the pure and doped systems, the conventional unit cells have been considered (unless otherwise specified). The Bloch wave functions are projected into maximally localized Wannier functions in order to compute the intrinsic AHC. The intrinsic AHC is proportional to the Brillouin zone (BZ) summation of the Berry curvature over all occupied states \cite{pizzi2020wannier90, kubler2012berry,PhysRevB.74.195118} \begin{equation} \sigma^{xy}= -\frac{e^2}{\hbar} \sum_n \int_{BZ} \frac{d\textbf{k}}{(2\pi)^3} \Omega_{n,z}(\textbf{k}) f_n(\textbf{k}), \label{eq:ahc} \end{equation} where $f_n(\textbf{k})$ is the Fermi distribution function for the band $n$, $\Omega_{n,z}(\textbf{k})$ is the $z$ component of the Berry curvature at the wave vector $\textbf{k}$. The Berry curvature is related to the Berry connection ($A_n(\textbf{k})$) as \begin{equation} \Omega_n(\textbf{k})= \nabla_\textbf{k} \times A_n(\textbf{k}), \label{eq:curvature} \end{equation} where "$n$" is the band index and $A_n(\textbf{k})$ in terms of cell-periodic Bloch states $\ket{u_{n\textbf{k}}} = e^{-i \textbf{k.r}}\ket{\psi_{n\textbf{k}}}$ is defined as $ A_n(\textbf{k}) = \expval{i\nabla_\textbf{k}}{u_{n\textbf{k}}} $ \cite{pizzi2020wannier90}. Co$_2$TiSn possesses the fcc $L_{21}$ (space group $\#$ 225) structure which has three mirror planes, $m_x$, $m_y$ and $m_z$ in the absence of any net magnetic moment. These mirror planes protect the gap-less nodal lines in the band structure in $k_x$ = 0, $k_y$ = 0, and $k_z$ = 0 planes\cite{wang2016time, chang2016room}. To compute AHC, the spin-orbit coupling is introduced and the direction of the magnetization has been set along (001). Therefore, due to symmetry breaking of mirror planes the nodal lines in the $k_x$ = 0 and $k_y$ = 0 planes will exhibit a finite band gap while the gap-less nodal line will survive only along the magnetization direction (in the $k_z$ = 0 plane) \cite{wang2016time, chang2016room}. However, due to helical distribution of Berry curvature around this gap-less nodal line, in the mirror plane, the total flux is zero and thereby it does not contribute to the intrinsic AHC \cite{ernst2019anomalous}. Concurrently, the Berry curvature around the broken nodal lines is oriented along the direction of magnetization and contributes to the intrinsic AHC \cite{ernst2019anomalous}. Our calculated intrinsic AHC value (99.38 $S/cm$ and 92.40 $S/cm$ for the primitive cell) at $E_F$ for the pure system is in excellent agreement with the value (100 $S/cm$) reported in literature \cite{ernst2019anomalous}. In addition to this, as it is shown in Fig.~\ref{fig:figure-6} the intrinsic anomalous Hall conductivity is almost constant in the vicinity of $E_F$. \begin{figure} \includegraphics[width=0.8\linewidth]{figure-new-9.png} \caption{ (a) Comparison of Wannier interpolated band structure (red) with the full electronic band structure (blue) of Co$_2$TiSn. The Fermi energy is set to 0 $eV$. (b) The calculated intrinsic anomalous Hall conductivity at different energies. The conductivity is found to be constant in the vicinity of $E_F$ ($E_F$ $\pm$ 0.2 $eV$).} \label{fig:figure-6} \end{figure} Earlier, we have seen that the band crossing points of the pure compound (Co$_2$TiSn) shift downwards with respect to the Fermi level ($E_F$) as the concentration of $V$ increases (Fig. \ref{fig:figure-2}), and particularly, two of such crossing points which lie in the unoccupied region (for the stoichiometric composition Co$_2$TiSn) come close to $E_F$ as composition approaches toward 50\% V doped Co$_2$TiSn. Hence, it is expected that the intrinsic AHC will be enhanced for the 50\% V doped Co$_2$TiSn compositions due to strong energy dispersion $\sim 2.5$ times higher than the value that obtained near $E_F$ of the pure compound \cite{ernst2019anomalous}. Considering that the calculation with x= 0.4 doping concentration is computationally very expensive, we have chosen x= 0.5 to calculate intrinsic AHC, which is expected to provide us maximum value. While simulating AHC as a function of energy, around 0.25 $eV$ above the $E_F$, we get $\sim$ 1.9 times higher AHC than that of the AHC at $E_F$. The same has been further confirmed when the bands are filled along the $+ve$ energy (w.r.t $E_F$), which is achieved in the form of V doping. The calculated AHC along the direction of magnetization at $E_F$ in 50 \% V doped Co$_2$TiSn is found to be 196.84 $S/cm$, nearly twice of the AHC in the pure system(Co$_2$TiSn). Hence, the higher AHC value (in 50 $\%$ doped system as well as at $\sim$ 0.25 $eV$ above the $E_F$ in the pure compound) is attributed to the presence of nodal lines that are very close to $E_F$. \par \section{Conclusion} To summarise, we performed $\textit{ab-initio}$ calculations on the Co-based Heusler compounds Co$\textsubscript{2}Ti \textsubscript{1-x}V\textsubscript{x}$Sn with x = 0.0, 0.2, 0.4, 0.6, 0.8 and 1.0. We have calculated the band structures, Bloch spectral functions and DOS using KKR-GF methods. We have also calculated the Fermi arcs, normalized Berry curvatures, WCC and intrinsic AHC for the x = 0.0, 0.5 and 1.0 compositions using Wannier90 interpolation of the plane wave pseudo-potential band structures. We found that nodal lines shift with V substitution and the point of highest energy of the nodal line responsible for Weyl nodes tunes with the Fermi energy for Co\textsubscript{2}Ti\textsubscript{0.6}V\textsubscript{0.4}Sn. For composition between x= 0.6 and 1 the nodel line lie entirely below the Fermi energy. We observed a half-metallic character for the entire range of composition. The magnetic moment on each Co atom as a function of V concentration increases linearly up to x=0.4 and thereafter, it starts decreasing.A detailed investigation reveals that the signature of the Weyl nodes and the Fermi arcs are more prominent near the Fermi energy for the 50$\%$ V doped compound in comparison to the stochiometric compounds, which emphasize the importance of the chemical doping in the present series of compounds. The intrinsic AHC was found to increase by nearly twice for the 50\% doped system as compared to the undoped composition. Our study suggests that Co\textsubscript{2}Ti\textsubscript{1-x}V\textsubscript{x}Sn series of Heusler alloys in general and Co\textsubscript{2}Ti\textsubscript{0.6}V\textsubscript{0.4}Sn composition in particular is important to investigate Weyl physics and various exotic transport phenomena. \par \section{Acknowledgments} SS thanks Science and Engineering Research Board of India for financial support through the award of Ramanujan Fellowship (grant no: SB/S2/RJN-015/2017), Early Career Research Award (grant no: ECR/2017/003186). SK thanks the Department of Science and Technology (DST), Govt. of India for providing INSPIRE research funding (Grant No.DST/INSPIRE/04/2016/000431; IFA16-MS91). S.W.D and J.M would like to thank CEDAMNF project financed by the Ministry of Education, Youth and Sports of Czech Republic, Project No. CZ.02.1.01/0.0/0.0/15.003/0000358 and also for the support by the GA\v{C}R via the project 20-18725S. KKD and GKS acknowledge the DST-INSPIRE scheme for support through a fellowship. Payal Chaudhary and Surasree Sadhukhan contributed equally to this work. \par \medskip *[email protected]
1,116,691,501,412
arxiv
\section{Introduction} \label{sec:intro} \label{sec1} \noindent Severe disturbances in the near Earth's space environment are caused by solar activity. The eruptive events like solar flares and coronal mass ejections (CMEs) are often associated with radio emissions which originate in the solar corona. Such radio emissions or solar radio bursts (SRBs) describe various physical processes that happens in the solar corona and interplanetary medium (IPM). Based on the drifting rates and morphology as seen in the dynamic spectrograms, SRBs are primarily classified into five classes vis. Type - I to V \citep{Wild1950, Wild1963}. In this article, we study fast drifting Type III bursts, slow drifting Type II bursts and broad-band Type IV bursts as they provide clue to predict space weather \citep{Kundu1965, McLean1985, Pick2004}.\\\\ Type III bursts are the most intense, frequently observed and fast drifting bursts in the solar corona and IPM. Most of the time, they occur in association with X ray and / or $H_{\alpha}$ flares \citep{Cane1988,White2007}. Type III bursts occur as isolated bursts that lasts in 1 - 3s, in groups that last in 10 minutes and as storms that lasts in few hours. The flare accelerated electrons that are travelling along open magnetic field lines setup the plasma oscillations (also known as Langmuir waves) during their passage in the corona and IPM and subsequent conversion of those oscillations into electromagnetic waves produces the Type III bursts \citep{Ginzburg1958,Zheleznyakov1970,Melrose1980, Mercier1975,Pick1986, Sasikumar2013, Dayal2019, Mah2020, Reid2014, Saint_Hilaire_2012}. In this article, we have carried out a statistical study of Type III bursts.\\\\ Type II bursts were discovered by \citet{Pay1947} and they are generated in association with CMEs that are moving at super-Alfvenic speeds \citep{Nelson1985,Cliver1999,Nindos2008,Nindos2011,Vrsnak2008}. Type II bursts are the signatures of particle acceleration caused by shock waves in the solar corona and IPM \citep{Gop2019}. Type IIs are slow drifting ($\rm \approx 1~MHz~s^{-1}$) bursts and it is widely accepted that they are originated due to the plasma emission mechanism \citep{McLean1985,Nelson1985}. Type II bursts often show fundamental and harmonic components with a frequency ratio $\approx 1:2$, and some times these components show band-splitting \citep{Vrs2001, Vas2014, Har2014, Har2015, Kis2016}. Type IV bursts are broadband quasi-continuum emissions that are mostly associated with the flares/CMEs \citep{Boi1957, Ste1982, Gary1985, Ger1986, Gop1987, Sas2014, Car2017}. Type IV bursts are further divided into two sub-categories: (i) stationary type IV bursts that occur during impulsive phase of the flares and the radio source location remains at same height, and (ii) moving type IV bursts which generally associated with CMEs. Imaging observations of moving Type IV bursts confirm that location of a radio source moves outward in the solar corona with speed of $\rm \approx 200 - 1500~ km~ s^{-1}$ \citep{Sas2014, Vas2016, Liu2018, Vas2019}. Their origin can be due to either plasma emission or gyro synchrotron emission mechanism \citep{Sas2014,Car2017}. Both Type II and Type IV bursts show a significant correlation with space weather hazards \citep{White2007, Vor2020}. \\\\ Type II, Type III, and Type IV bursts provide clues to predict the space weather hazards because: (i) Type III bursts are triggered by solar flares, (ii) Type II burst originate from shocks observed in the solar corona and IPM, and (iii) moving Type IV bursts are associated with the core of the CMEs \citep{Sas2014}. It is worth mentioning that enhancement of X ray and EUV radiation due to the flares changes the conditions of the ionosphere and increases total electron content (TEC). Note that TEC of the ionosphere is a crucial parameter related to the frequency of radio waves which experience transmission or reflection from the ionosphere \citep{Car2020, Sel2015}. The CMEs and co-rotating interaction regions (CIRs) are responsible for geomagnetic storms and increased activity in the ionosphere. Note that they impact the satellite communication, telecommunication, Global Navigation Satellite Systems (GNSS). Also, the solar energetic particles damage the satellites and the radiation is hazardous for the astronauts and crew of the flights. \\\\ We make a note that X ray and EUV flux and energetic particles from the solar flares influence the ionosphere within hours and impacts the HF communication whereas for CMEs to reach Earth takes 1 - 5 days depending on their speed and direction of arrival. The observations shown in this article suggests that the radio observations carried out using ground based e-CALLISTO network can be used to predict the space weather hazards.\\\\ A Compound Astronomical Low frequency Low cost Instrument for Spectroscopy and Transportable Observatory (CALLISTO; \url{http://www.e-callisto.org/}) is a radio spectrometer designed to monitor the radio transient emissions 24 hours a day \citep{Benz2009,Zuc2012, Sas2018}. It is designed to operate in the frequency range 10 MHz -- 870 MHz and this frequency range probe the solar corona in the heliocentric distance range $\rm \approx 1 - 3~R_\odot$ \citep{Poh2007}. We make a note that there are $> 152$ stations operating around the globe and form an e-CALLISTO network. Presently about 52 of them regularly provide data to a server (1 frame in every 15 minutes) located at the University of Applied Sciences (FHNW) in Brugg/Windisch, Switzerland. Therefore, routine detection of Type II, Type III and Type IV bursts along with other space based observations (like GOES X-ray flux, Extreme Ultraviolet, and white light coronagraph images) are useful for space weather forecasting agencies \citep{Prieto2020}. \\\\ This article is organized as follows. In Section 2, we describe the data used in this article. Section 3 describes a statistical study of Type III bursts (both isolated and group of Type III burst). This section also deals with the space weather implication of Type II, Type III and Type IV bursts. The summary and conclusion of the article are presented in Section 4. \section{Observation} \noindent In this study, we used the dynamic spectrograms observed using e-CALLISTO network \citep{Benz2005,Benz2009}. Different stations operate over different frequency bands depending on the radio frequency interference (RFIs) and instrumental limitations. To begin with, we have prepared a list of Type III bursts using an online catalog ({\url{ftp://ftp.swpc.noaa.gov/pub/warehouse/}}) during 2010 - 2017 and manually cross-checked the bursts with the help of quick looks of the dynamic spectrograms that are available at e-CALLISTO website ({\url{http://soleil.i4ds.ch/solarradio/callistoQuicklooks/}}). To cover the observations of 24 hours, we used data from different CALLISTO spectrometers that are listed in the Table \ref{Table1}. Different columns of Table \ref{Table1} indicate the station-id, hosted country, corresponding longitude and latitude of the station, and the range of operating frequency, respectively. \\\\ \begin{table}[!htb] \centering \caption{Description of CALLISTO Spectrometers used in this work. Note that a burst can be observed by more than one spectrometer} \label{Table1} \begin{tabular}{|c|l| l | c| c|c|} \hline \bf{SN} & \bf{File ID} & \bf{Country} & \bf{Lat ($^\circ$)} & \bf{Long ($^\circ$)} & Frequency range (MHz) \\ \hline 1 & BLEN7M & Switzerland & 46.94 & 7.45 & 170--870\\ \hline 2 & BIR & Ireland & 53.09 & -7.90 & 10--400 \\ \hline 3 & GAURI & India & 16.61 & 77.51 & 45--410 \\ \hline 4 & GLASGOW & UK & 55.90 & -4.30 & 45--80.9 \\ \hline 5 & GREENLAND & Greenland & 67.00 & -50.72 & 10--110 \\ \hline 6 & BLENSW & Switzerland & 47.34 & 8.11 & 10 --90 \\ \hline 7 & ROSWELL-NM & New Mexico & 33.39 & -104.52& 15--80.9\\ \hline 8 & SSRT & Siberia & 6.43 & 2.88 & 45--450\\ \hline 9 & KRIM & Ukraine & 44.95 & 34.10 & 250--350\\ \hline 10 & OOTY & India & 11.41 & 76.69 & 45--450 \\ \hline 11 & HUMAIN & Belgium & 50.20 & 5.25 & 45--410 \\ \hline 12 & ALMATY & Kazakhstan & 43.22 & 76.85 & 45--400\\ \hline 13 & MRT & Mauritius & -20.16 & 57.74 & 45--450\\ \hline 14 & RWANDA & Rwanda & -1.94 & 30.06 & 45--80.9 \\ \hline 15 & ESSEN & Germany & 51.39 & 6.97 & 20--80.9 \\ \hline 16 & OSRA & Czech Rep. & 49.90 & 14.78 & 150--870\\ \hline 17 & TRIEST & Italy & 45.64 & 13.77 & 220--445\\ \hline 18 & ALASKA & Alaska & 64.20 & -149.49& 210--450\\ \hline 19 & DENMARK & Denmark & 55.67 & 12.56 & 45--100 \\ \hline 20 & eC71 & Switzerland & 47.24 & 8.92 & 48--90 \\ \hline 21 & KASI & South Korea & 36.35 & 127.38 & 45--450 \\ \hline 22 & PERTH & Indonesia & -31.65 & 115.86 & 45--870\\ \hline 23 & DARO & Germany & 51.77 & 6.62 & 30--90 \\ \hline 24 & Heiterswil-CH& Switzerland & 47.30 & 9.13 & 45--82\\ \hline 25 & INDONESIA & Indonesia & -6.84 & 107.93 & 45-80.9 \\ \hline 26 & ROZTOKY & Slovakia & 49.40 & 21.48 & 45--400 \\ \hline \end{tabular} \end{table} In Figure \ref{Fig1a}, left and right panels show the dynamic spectrogram of group of Type III bursts (henceforth Type IIIg bursts) observed using a CALLISTO spectrometer located at Kigali, Rwanda (observed at 45 - 80 MHz) and at Royal Observatory of Belgium (ROB; observed at 45 - 437 MHz), respectively. We make a note that since the observations at Rwanda are carried out over a narrow bandwidth when compared it with the ROB, the frequency resolution of former instrument is better than the latter and thus the left panel shows an intense and finer features. However, ROB observes the weak features of the burst beyond 80 MHz.\\\\ \begin{figure}[!ht] \centering \includegraphics[scale=.43]{Figure1a.eps} \includegraphics[scale=.43]{Figure1b.eps} \caption{The dynamic spectrogram shows group of Type III bursts observed on 14 January 2015. The left and right panels show the spectrograms observed using a CALLISTO spectrometer located at Kigali, Rwanda (45 - 80 MHz) and Royal Observatory of Belgium (45 - 437 MHz), respectively. This event is associated with a flare of class C1.9.} \label{Fig1a} \end{figure} For all the bursts, we measured the start and stop time, and lower ($F_{L}$) and upper ($F_{U}$) frequencies manually. Although, different radio stations are operated over different range of operating frequencies (see Table \ref{Table1}), we selected $F_{L}$ and $F_{U}$ to be the lowest and highest frequencies among the simultaneous observations carried out by different stations, respectively. Since Type III bursts are triggered by solar flares and some of the flares are accompanied by CMEs, we have carried out a statistical analysis of them. The details of solar flares and CMEs and the associated radio bursts are taken from Heliophysics Event Catalogue ({\url{http://hec.helio-vo.eu/hec/hec\_gui.php}}). If a radio burst is present between onset and end times of the flare then we treat that to be flare associated. Otherwise, the burst is treated as non-flare associated. \section{Results and Discussions} \subsection{Statistical study of Type III bursts} Many authors have attempted to study a correlation between occurrence of the total number of type III bursts and Sunspot number (SSN) \citep{Lobzin2011, Huang2018, Mah2020}. Using the L-band observations, \citet{Huang2018} have studied 2384 SRBs observed during 1997 - 2016 and reported that occurrence of SRBs closely track the solar cycle. In the article, we have identified 12971 Type III bursts (during 2010-2017) that are reported by Space Weather Prediction Center (SWPC) of National Oceanic and Atmospheric Administration (NOAA) and explored their relationship with the solar cycle. \begin{figure}[!ht] \centering \includegraphics[scale=.99]{Figure2.eps} \caption{Occurrence of Type III bursts and the solar activity. Occurrence rate of Type III bursts is well correlated with two peaks of the Sunspots observed during solar cycle 24.} \label{Fig2} \end{figure} Figure \ref{Fig2} shows the monthly averaged number of Type III bursts (i.e., number of Type III bursts observed in a year / 12) observed in different years. The green curve shows the revised version of the Sunspot number that are available at {\url{http://www.sidc.be/silso/datafiles}} \citep{Clette2016}. From Figure \ref{Fig2}, it is evident that occurrence of Type III bursts highly correlates with the solar cycle. It is noteworthy that number of Type III bursts follow the two peaks of Sunspots that occurred during maximum period of solar cycle 24 (SC24; i.e., in the years 2012 and 2014). Also, a fewer number of bursts that are observed during the solar minimum (i.e. in the years 2010 and 2017) are due to the lower solar activity.\\\\ Among the 12971 Type III bursts, we have randomly selected 619 intense type III bursts by visual inspection (including 221 isolated Type III and 398 Type IIIg bursts) and carried out a statistical study. We found that out of 619 Type III bursts, $\approx$65\% bursts are associated with soft and/or $H_\alpha$ flares while $\approx$ 45\% of them are accompanied by CMEs (see Table \ref{tab:summary}). A detailed table of type III bursts (that are used for statistical study) can be accessed from {\url{http://www.e-callisto.org/GeneralDocuments/Supplementary_material.pdf}}. The remaining non-flare associated bursts presumably originated due to the weak energy releases that are present in the solar corona \citep{Ramesh2010,Ramesh2013,Saint_Hilaire_2012,Sasikumar2013,Mugundhan2017,James2017,James2018,Sharma2018, Mah2020}. Earlier \citet{Dayal2019} reported that out of 238 Type III bursts that are observed during $\approx$ 02:30 UT -- 11:30 UT in 2014, 88 bursts are associated with the GOES X ray flares / $H_\alpha$ flares. \citet{Mah2020} reported that out of 1531 type III bursts, 426 bursts are associated with GOES X ray / $H_\alpha$ flares. The non-flare associated Type III bursts may be triggered by $H_{\alpha}$ ejecta, X-ray bright points, soft X-ray transient brightening, and soft X-ray or/and extreme-UV (EUV) jets etc \citep[more references therein]{Alissandrakis2015}. \begin{table}[h!] \caption{Summary of observations}. \label{tab:summary} \centering \begin{tabular}{lc lc lc} \hline\hline & Isolated & Group of & Isolated + Group of \\ Parameter & Type III bursts & Type III bursts & Type III bursts \\ \hline Studied Type III bursts & 221 & 398 & 619 \\ Flare associated & 135 & 267 & 402\\ CME accompanied & 90 & 192 & 288 \\ Flare associated (\%) & 61 & 67 & 65\\ CME accompanied (\%) & 40 & 48 & 45 \\ \hline \end{tabular} \end{table}\\ Further, we have divided the total bursts into two categories and studied their characteristics separately. The first category comprises a total of 221 isolated type III bursts (with duration $<$ 60 seconds) and the second category comprises of 398 groups of type III bursts (with duration $\approx$ 1 min -- 10 min). Figure \ref{Fig3} shows a CALLISTO spectrogram obtained at Bleien, Switzerland of an intense Type IIIg burst at the early phase of flare of class X6.9 which is followed by another Type IIIg burst occurred during its decay phase. Note that Type IIIg burst observed at 08:05 UT is associated with an ascending phase of flare while the burst at 08:17 UT is associated with decay phase of the flare. The GOES X ray light curves are shown in black and red colors in Figure \ref{Fig3}. \begin{figure}[h!] \centering \includegraphics[scale=.5]{Figure3.eps} \caption{Dynamic spectrogram show two groups of Type III bursts. The one at 08:01 UT occurred during the ascending phase of X6.9 solar flare. Type IIIg that occurred at 08:17 UT is triggered by the flare during its decay phase. The red and black curves shows the GOES X ray flux.} \label{Fig3} \end{figure}\\ For all the isolated Type III bursts we measured the start time ($t_i$), stop time ($t_f$), upper frequency ($F_U$) and lower frequency ($F_L$) of the bursts and measured their drift rates using, \begin{equation} \frac{df}{dt}=\left|\frac{F_{U}-F_{L}}{t_f-t_i}\right| \label{ED} \end{equation} Figure \ref{Fig4} illustrates the distribution of drift rates for different lower and upper frequencies. From the plot it is found that at lower frequencies drift rates are smaller compared with the higher frequencies. One possible explanation is, when the flare accelerated electrons travel along the open magnetic field lines which diverges with the radially increasing distance, duration of the radio bursts increases and thus lower drift rates are expected at lower frequency (i.e., in outer layers of corona). \citet{Reid2014} interpret that faster drift rates in the deep solar corona is due to rapid change of frequency of the background plasma with the increasing distance. In this study, we have found that drift rates of the isolated bursts vary in the range $\rm 0.70 - 99~ MHz~s^{-1}$. Earlier \citet{Zhang2018} have studied 1389 simple isolated Type III bursts observed using the Nan\c{c}ay decameter array over the frequecy range 10 MHz - 80 MHz. The authors have reported that drift rates of the Type III bursts range from $\rm 2 ~MHz~s^{-1}$ to $\rm 16~MHz~s^{-1}$, with a median value of $\rm 6.94~ MHz~s^{-1}$. In Figure \ref{Fig4}, both color and the size of the marker indicate different drift rates for a given lower and upper frequency. \begin{figure}[ht!] \centering \includegraphics[scale=.99]{Figure4.eps} \caption{Distribution of drift rates of isolated Type III bursts measured for a given lower and upper frequencies. Note that larger size of the marker indicate higher drift rates.} \label{Fig4} \end{figure} We have studied the duration (i.e. stop time minus start time) of 398 Type IIIg bursts. The left panel of Figure \ref{Fig5} shows the way duration of Type IIIg bursts are varied. It is also evident that duration of Type IIIg bursts are independent of lower and upper frequency cut-offs. Different colors and sizes of the markers indicate duration of Type IIIg bursts for a given lower and upper frequencies. The histogram in right panel shows that 388 Type IIIg bursts (i.e. 97.5 \%) lasted within 4 minutes of time. \begin{figure}[h!] \centering \includegraphics[scale=.55]{Figure5a.eps} \includegraphics[scale=.55]{Figure5b.eps} \caption{The left panel shows the time duration over which Type IIIg bursts are lasted. The color and size of the marker indicate the time interval over which they lasted. Note that larger the size of the marker means longer duration. The right panel shows number of Type III bursts vs the time interval over which they are observed.} \label{Fig5} \end{figure} \subsection{Space weather implication} \noindent In this article, we investigated two events that caused the radio blackouts in different parts of Earth. Using those events we attempt to explain the way radio emissions can help to predict the space weather hazards. \subsubsection{The 6th September 2017 event} On 6 September 2017, an intense and large group of Type III burst is observed at Greenland station, and its emission coincided with the early phase of large flare of class X9.3 that peaked at 12:02 UT as shown in Figure \ref{Fig6}. The explosion of this flare was stronger and the most intense in SC24. The eruption is caused by a complex Active Region (AR) 12673 located at S09W34. The green and red curves on Figure \ref{Fig6} are the GOES X-ray light curves. \\\\ The group of Type III bursts started at 11:57 UT and disappeared at 12:12 UT in the frequency range of 18 MHz -- 100 MHz. Type IIIg bursts are observed during both ascend and decay phase of the flare. Following Type IIIg, two Type II bursts and a stationary Type IV burst are observed. The first Type II burst is observed from 12:02:37 to 12:09:31 UT and in the frequency range 18 MHz - 100 MHz. The measured drift rate of the burst is $\approx 0.2~ MHz~s^{-1}$. The second Type II burst is observed between 12:13:02-12:21:57 UT and in the frequency range 18 MHz - 60 MHz. The measured drift rate is $\approx 0.1~ MHz~s^{-1}$. Another interesting observation is both Type II bursts show the fundamental and harmonic emissions. Also, a Type IV burst is observed at 12:14 UT. \\\\ Here, we would like to emphasize the following points: (i) Type IIIg bursts are triggered by flare accelerated electrons, (ii) both type II bursts are triggered by CME shocks, and (iii) Type IV burst is associated with CME. In other words, the radio bursts that are observed in the Figure \ref{Fig6} are basically signatures of flares and/or CMEs. Therefore, such observations can be used to forecast the space weather even in the absence of white light coronagraph observations. Also, since there are $> 152$ CALLISTO stations are operating around the globe to monitor the transient emissions from the solar corona, e-CALLISTO network is a powerful tool to forecast the space weather hazards. Note that the CMEs associated with the radio emission shown in Figure \ref{Fig6} is Earth-directed and a nearly symmetrical halo with an estimated sky-plane velocity of 1571 km s$^{-1}$ as observed by SOHO coronagraph at 12:24 UT. Also, the CME caused a significant compression to the day side Earth's magnetosphere and prompted a severe (G4) geomagnetic storm ($\rm K_{pmax}= 8.3$ and $\rm Dst_{min}= -124$ nT) that reached the Earth on 7-- 8 September 2017 (see Figure \ref{Fig6}). \begin{figure}[h!] \centering \includegraphics[scale=.5]{Figure6a.eps} \includegraphics[width=0.7\textwidth]{Figure6b.eps} \caption{The top panel shows radio emissions that are observed on 6 September 2017. The red and green curves show a GOES X ray flux during the radio observations. It shows Type IIIg burst during rising and decay phase of the X ray flare and are followed by two Type II and a stationary Type IV burst. Lower panel shows a radio blackout that occurred over Europe, Africa and the Atlantic Ocean on 6 September 2017 due to the enhancement of X-ray flux and UV radiation at the Earth. Credited to SWPC.} \label{Fig6} \end{figure} Furthermore, intense radio emissions shown in Figure \ref{Fig6} are associated with major impulsive increases with X-rays and EUV emission. \citet{Sato2019} have reported the impact of the 6 September 2017 on GNSS signals and inferred that the bursts are strong enough to give rise sudden disturbances on space weather environment. The EUV emission is caused a prompt ionization enhancement in the Earth's upper atmosphere. Also the energetic particles from the Sun arrived at the Earth within few hours after the flare and resulted a large enhancement of high energy proton levels and caused a shortwave radio blackout over Europe, Africa and the Atlantic Ocean as shown in lower panel of Figure \ref{Fig6}. Moreover, a rapid and comprehensive ionization of the equatorial upper atmosphere has disrupted HF communications while emergency managers were struggling to provide critical recovery services (e.g., \citealp{NCEI2017}). Some of these issues were reported by the Hurricane Weather Net (HWN), and the French Civil Aviation Authority (DGAC) \citep{Redmon2018}. \subsubsection{The 7th March 2010 event} Type IIIg burst and a stationary type IV burst shown in Figure \ref{Fig7} is observed by Badary observatory (SSRT site), Siberia, Russian Federation on 7 March 2012. These radio emissions are triggered by GOES X5.4 flare and its onset and peak times are 00:02 UT and 00:24 UT, respectively. This flare is associated with active region AR11429 located at N18E31. The second flare is triggered by GOES X1.3 class flare and its onset and peak times are 01:05 UT and 01:14 UT, respectively. This flare also associated with an active regions AR11429 located at N15E26. The green and red curves in Figure \ref{Fig7} show the X ray light curves observed by GOES satellite.\\\\ Type IIIg burst observed at 00:56 UT in the frequency range of 90 MHz -- 320 MHz is followed with an intense Type IV burst. This Type IIIg is triggered by X5.4 class flare during its decay phase. Another Type IIIg burst occurred at 01:58 UT is triggered by X1.3 flare during its decay phase. These flares are associated with two CMEs launched from AR11429 region. The estimated speeds of first and second CME are $\approx 2200$ and $\approx 1800~ km~s^{-1}$, respectively \citep{Patsourakos2016}. This event caused radio blackouts in Asia and Australia, and Indian and Pacific Oceans on 07 March 2012 at 04:21 UT (Figure \ref{Fig7}) and is followed by the aurora. The main phase of the magnetic storm starts at $\approx$ 02:00 UT on 7 March 2012 and reached a maximum at $\approx$ 05:15 UT, with Dst$=$ -74 nT and $\rm K_{pmax}= 6.0$ \citep{Tsurutani2014}. The ionospheric disturbances caused by this storm is described by \citet{Krypiak2019}. \begin{figure}[h!] \centering \includegraphics[scale=.5]{Figure7a.eps} \includegraphics[width=0.7\textwidth]{Figure7b.eps} \caption{The top panel shows radio emissions observed on 7 March 2012 by Badary station in Russia. The red and green curves show a GOES X ray flux during the radio observations. It shows Type IIIg bursts during rising and decay phase of the X ray flare and a stationary type IV burst. Lower panel shows a radio blackout that occurred over Asia, Australia, and Indian and Pacific Oceans on 7 March 2012 due to the enhancement of X-ray flux and UV radiation at the Earth. Credited to SWPC.} \label{Fig7} \end{figure} \section{Summary and conclusion} \noindent In this study, we report a statistical analysis of Type III radio bursts and their correlation with the Sunspot number. During solar cycle 24, we have identified 12971 Type III bursts (during 2010-2017) by making use of the reports of Space Weather Prediction Center (SWPC) of National Oceanic and Atmospheric Administration (NOAA). We found that the occurrence of Type III bursts well correlates with the Sunspot number. Further, we have randomly selected 619 Type III bursts comprising 221 isolated and 398 group of bursts and carried out a statistical study of Type III bursts using the dynamic spectrograms observed by e-CALLISTO network. We found that $65 \%$ of them are flare associated and $45\%$ of the bursts are accompanied by a CME. The remaining non-flare associated Type III bursts are believed to originate from weak energy release events in the solar atmosphere. We have also found that drift rates of isolated bursts vary in the range $\rm 0.70 - 99~ MHz~s^{-1}$. This study confirms that drift rates at high frequency are larger than the lower frequencies. It can be due to the following facts: (i) electrons that are travel along the open magnetic field lines diverge with the radially increasing distance, therefore duration of the radio bursts increases resulting lower drift rates, and (ii) faster drift rates in the deep solar corona is due to rapid change of frequency of the background plasma with the increasing distance \citep{Reid2014} . We have studied the duration of Type IIIg bursts and found that $95.5\%$ of them are lasted within $4$ minutes and they are independent of lower and upper frequency cut-offs. \\ The meter wave solar radio bursts are launched in the same layer of the solar atmosphere where geo-effective disturbances initiate and hence, they can be potential signatures to forecast the space weather hazards. In this study we show two of such events that have caused radio blackouts near Earth. As previously mentioned, type III bursts are signatures of solar flares and Type II and Type IV bursts are the signatures of CMEs. The enhanced radiation at X ray and EUV wavelengths along with the energetic particles reach the Earth within few hours and influences the ionosphere and impacts the HF communications. Note that it takes 1-5 days for a CME to impact the earth depending on the speed and direction. Ground based e-CALLISTO network with more than 152 stations located at various longitudes is capable of observing solar radio transient emissions 24 hours a day. Furthermore, different types of radio bursts are associated with various solar transients and therefore, they can play a crucial role in predicting the space weather. \acknowledgments This work was supported by International Science Programme (ISP) through Rwanda Astrophysics, Space and Climate Science Research Group (RASCSRG). We thank FHNW, Institute for Data Science in Brugg/Windisch, Switzerland for hosting the e-Callisto network; SOHO/LASCO; NOAA; GOES; SWPC and WDC-SILSO, Royal Observatory of Belgium, Brussels to make their data available online. The author (C. Monstein) thanks the ISSI - Bern International Team of ‘Why Ionospheric Dynamics and Structure Behave Differently in The African Sector’ (the team leaders E. Yizengaw \& K. Groves) for valuable discussions about part of the results that are included in this paper. We thank both the referees for providing useful and encouraging comments and suggestions which helped in improving the manuscript. \bibliographystyle{aasjournal
1,116,691,501,413
arxiv
\section{Introduction} Recently, the dependence of the clustering properties of galaxies on luminosity was investigated by Benoist {et al.~} (1996) and Willmer, da Costa \& Pellegrini (1998) using volume-limited subsamples drawn from the SSRS2 (da Costa {et al.~} 1994, 1998). Analysis based on the two-point correlation function, in redshift and real space, and counts-in-cells showed that luminous galaxies ($L > L^*$) are more clustered than sub--L$^*$ galaxies. The effect is particularly strong for the very luminous galaxies with \MB $\leq -21$ (hereafter VLGs), for which the correlation length approaches that of $R \ge 0$ galaxy clusters. This result might simply imply that the bright galaxies are preferentially located in clusters. This would be the case, for instance, if these galaxies were primarily cD galaxies, as suggested by Hamilton (1988) in his analysis of the CfA1 catalog. Another possibility is that very bright galaxies are located in loose groups which are expected, on theoretical grounds (e.g. Hamilton \& Gott 1988), to have clustering properties intermediate between galaxies and clusters, although no strong observational evidence currently exists in support to this conjecture. Previous attempts to measure the correlation function of loose groups in the CfA and SSRS surveys with a standard algorithm (Huchra \& Geller 1982) have led to the conclusion that the amplitude is comparable to --or even lower than-- that of galaxies (Jing \& Zhang 1988, Maia \& da Costa 1990, Ramella et al. 1990). Progress has recently been made by Girardi et al. (1998) analyzing a much larger group catalog derived from CfA2 and SSRS2. These authors show compelling evidence that groups are indeed more clustered than galaxies with a correlation length of the order of 10~~h$^{-1}$ Mpc, yielding a relative bias of $\approx$ 3. They also find evidence that the correlation amplitude increases with luminosity (mass) of the group. Therefore the clustering properties of VLGs might also be explained if VLGs are in very rich groups. Alternatively, if VLGs are not preferentially in rich groups or poor clusters, an intriguing possibility is that they are associated to massive dark halos and that their large correlation length reflects the relative bias of these halos to the underlying mass distribution. Semi--analytical models of galaxy evolution such as those of Kauffmann, White \& Guiderdoni (1993) are now able to make some predictions about the luminosity function of galaxies in halos of different mass, despite the uncertainties related to the galaxy--dark halo connection. An apparent generic prediction is that each halo would contain a dominant galaxy surrounded by fainter galaxies (Kauffmann, Nusser \& Steinmetz 1997). If this is the case, then it would imply that at least some cases of dark matter concentrations can be detected and used to investigate the halo--galaxy connection. Here we investigate in more detail the characteristics of very luminous galaxies (hereafter VLGs) and compare their clustering properties to those of galaxy clusters. In section 2, we describe the properties of this galaxy population from available data in the literature and investigate the environment in which they reside. In section 3, we examine their distribution relative to the large--scale structures and their auto--correlation and VLG--cluster cross--correlation properties. Our main conclusions are summarized in section 4. \section{Characteristics of Very Luminous Galaxies} \subsection{VLG Sample} The VLG sample was drawn from the complete magnitude--limited SSRS2 south catalog, which contains about 3400 galaxies with $m_B(0) \leq 15.5$ (da Costa et al. 1994, 1998), covering the region $ b \leq -40^o$ and $ -40^o \leq \delta \leq -2.5^o$. Absolute magnitudes were estimated from luminosity distances, assuming $H_0 = 100$ and $q_0 = 0.5$. Velocities were corrected for the motion of Local Group and apparent magnitudes corrected for K--dimming according to the different morphological types. Our VLG sample consists of 113 galaxies with $ M_B~\leq -21$ ($L > 4 L^*$). The redshift distribution for these galaxies is shown in figure 1. We should point out that we excluded from our sample a few cases of strongly interacting systems, or of galaxies nearby bright stars, which had been preliminarily selected in the catalog because their magnitude had been significantly overestimated (a few VLGs we have included might still be affected by this problem, e.g. VLG~108). Together with the SSRS2 magnitude errors ($\sim 0.3$), there is some uncertainty in our VLG catalog limited at $M \le -21$, but this has no impact in our conclusions. \subsection {Individual Properties} In order to look for possible explanations for the strong clustering exhibited by the VLGs (see Benoist {et al.~} 1996 and discussion below) we first examine the characteristics of the individual galaxies that comprise this population. For this purpose, we have used the morphological classification as available in the SSRS2 catalog (da Costa et al. 1998) and have searched the NASA Extragalactic Database (NED) for additional information on these galaxies. Our first finding is that the morphological composition of our VLG sample does not differ from the whole SSRS2 sample. In the bright sample there are 39 (35\%) early--type galaxies ($T < 0$) and 74 (65\%) late-types. These fractions are consistent, for example, to the 35\% of early types found in the \MB $ \leq -19.0$ volume--limited subsample, which does not show the enhanced correlation exhibited by the VLGs. It is worth stressing here that morphological classification of these bright galaxies --at least the separation between early and late types-- is quite reliable (da Costa et al. 1998). From the NED we also find that only four galaxies are cDs, which, as mentioned, would be the most natural explanation for the observed strong clustering, as in that case one would be measuring the amplitude of the cluster--cluster correlation. In order to illustrate the properties of VLG galaxies, we summarize in table 1 the information gathered from the NED for all the VLG galaxies. The table includes: in column (1) the identification number; in column (2) the catalog name; in column (3) the ESO name (Lauberts 1980), whenever available; in columns (4) and (5) the B1950 right ascension and declination; in column (6) the apparent magnitude as listed in the SSRS2 catalog; in column (7) the B-R color (Lauberts \& Valentjin 1989; de Vaucouleurs et al. 1991), whenever available; in column (8) the infrared luminosity at $60 \mu$m in solar units (with $H_0 = 100$~km~s$^{-1}$~Mpc$^{-1}$); in column (9) the morphological type either from the SSRS2 catalog or from the literature if more detailed classification is available; in column (10) the heliocentric radial velocity; in column (11) an indication of other properties such as infrared emission (ir), AGN (an), radio source (rs). We call attention that for the 13 VLG galaxies with radial velocities less than 10,000 \kms, for which the information should be the most complete, Table 1 shows that late--spirals is the dominant morphological type, often with bars and rings. Moreover, 6 out of the 13 nearby VLGs are also detected by {\em IRAS}, typically with $L_{IR} \sim 10^{10}$ in solar luminosity units. For the population as a whole we find that a large number show other interesting characteristics: 1) there are 30 peculiar galaxies (Arp and Madore 1987); 2) 42 galaxies are also in the IRAS Point Source Catalog and/or in the Faint Source Catalog (Moshir et al. 1990); 3) we find an apparent overabundance of barred and ring galaxies. Out of 26 galaxies with detailed morphological information 13 contain bar/ring structure, while 7 are intermediate cases; 4) 14 galaxies are radio--sources; 5) only 5 galaxies are AGNs (3 Seyferts and 2 Liners; Maia {et al.~} 1996, NED), showing that the VLGs have luminosities that exceed some nearby AGN--like galaxies. In the VLG sample, we find that 54 are included in the ESO catalog for which $B$ and $R$ magnitudes are available. In figure 2 we compare the color distribution of this subsample with that of the $M \le -19$ sample, renormalized to the total number of VLGs. While VLGs have a slightly larger spread in color, the 2 distributions are similar. The above results indicate that the sample of SSRS2 galaxies drawn on the basis of their large blue luminosity is ``special'' relative to galaxies of lower luminosity both in terms of their internal characteristics as well as their clustering properties. \subsection {Relation VLGs--Clusters} As suggested earlier, an obvious explanation for the large clustering strength of bright galaxies would be that we are simply picking up cluster members, thus measuring the amplitude of the cluster--cluster correlation function (\eg Hamilton 1988). Although, as previously mentioned, the VLGs are not cDs, they could still reside preferentially in rich clusters. Even though the volume covered by the SSRS2 does not include very rich clusters, we have examined the ACO catalog (Abell, Corwin \& Olowin 1989), which can be considered complete out to a distance of 200 ~h$^{-1}$ Mpc, searching for VLGs with a projected separation less than 1.5~h$^{-1}$ Mpc~ from cataloged $R \ge 0$ cluster centers and a radial velocity difference less than 1200 \kms~ with respect to the cluster mean velocity. We find only 12 galaxies which can be considered as candidate cluster members. We have also extended our search to the poor clusters in the volume, using the ACO supplementary list and the Edinburgh--Durham cluster catalog (Nichol et al. 1992), with a measured redshift. In this search we include clusters beyond the redshift limit of the bright galaxy sample. Using the same criteria as that used for the rich clusters we find 7 additional galaxies which could be associated with known poor clusters. Therefore, at best only 19 VLGs may reside in previously known rich and poor clusters, out of which 16 are early--type galaxies. This result confirms that very luminous galaxies are not preferentially in $R \ge 0$ or rich clusters. In figure \ref{fig:3} we show the spatial distribution of the VLGs and the ACO clusters in the region, in the declination range $-40^o \le \delta \le -2.5^o$. In order to have a preliminary understanding of the VLG location relative to the large--scale structures, we have resorted to a simple percolation analysis. Adopting a percolation parameter $s = 0.5$, which corresponds to a search radius $r_s = 12.6$ ~h$^{-1}$ Mpc~ and to a space density enhancement of $1.9$, we have detected 7 structures with more than 3 members, containing about 42\% of the bright galaxies. Among them there are two main structures. One is at a mean redshift $z \sim 0.055$. It consists of 20 members, out of which only 5 are possible members of rich clusters, all belonging to the Pisces--Cetus region. The second association, with 9 members, is at a mean redshift $z \sim 0.054$, and none of its galaxies are found in rich clusters. Comparing their coordinates with maps of the large--scale structures, based on the ACO cluster distribution (Tully 1986, Tully {et al.~} 1992), both these associations correspond to dense regions of the Pisces--Cetus Supercluster. As a final note, we point out that eliminating the VLGs which may be in clusters does not significantly change the correlation function of the sample (see section 3). \subsection {Environment of VLGs} As we have seen, there is no compelling evidence that VLGs reside preferentially in clusters of galaxies. However, this does not fully answer the more general question about the type of environment in which these galaxies reside. Unfortunately, this is not an easy task because the majority of the VLGs are at large distances, where fainter neighbors would not be included in the SSRS2 sample because of its relative bright magnitude limit. On the other hand, the use of radial velocity databases is dangerous because of their unknown incompleteness. Therefore, we can only attempt a preliminary investigation of the question. First of all, we have used the catalog of groups recently identified within the SSRS2 sample by Girardi et al. (1998). This catalog contains groups with mean positions out to 12,000 \kms, but includes members out to 15,000 \kms. With this procedure we should in principle be able to detect groups including nearby VLGs: at 12,000 \kms~ we can detect a group with a VLG and 2 companions brighter than $\sim -19.9$ or even fainter at smaller distance (there are 27 VLGs within $V \le 12,000$\kms, and 45 VLGs within $V \le 15,000$\kms). We find 14 VLGs in 13 loose groups; of these, 3 are in known clusters (and have already been included in the 19 mentioned above), 3 are in known groups --2 in Hickson compact groups 12 and 91 (see Hickson 1982), one in an Arp--Madore group--, and 8 are in new groups, hereafter referred as SSRS2 groups. The richness of the 8 loose groups (i.e. those which are not associated with rich clusters or compact groups) is quite low; 4 groups have 3 members, 2 groups have 5 members, and 1 has 7 members, including 2 VLGs. The identification of groups in the total VLG sample is more difficult, and is necessarily incomplete. Using the whole SSRS2 catalog, we have selected galaxies with a projected separation of 1.5 ~h$^{-1}$ Mpc~ and within $1200$ km/s from a very luminous galaxy. This is not a real group finding algorithm but it suffices for our purposes. In this way we have found 22 galaxies in group candidates with at least 3 members (including the VLG); most of them are known systems we have already taken into account: only 4 are new candidate systems, which have not been identified in the catalogs of clusters or groups. In addition, we have also searched VLG galaxies in the compact group catalog by Barton et al. (1996). We find two systems which include nearby VLGs: their no.81 (which corresponds to HCG91) and no.88, which corresponds to the core of A4038. Both these cases are also in the SSRS2 group catalog (Girardi et al. 1998). However, as we have stated the number of systems we can identify is biased by the relatively bright magnitude limit of the SSRS2, which could lead us to miss fainter galaxies. Therefore, we have also searched the literature (\eg NED) to identify known systems of galaxies from binaries to groups. Of course, one should be cautious in interpreting the results because of the incompleteness of these data sets. From this search we find for example other 2 VLGs in southern compact groups from the automated catalog by Prandoni, Iovino \& MacGillivray (1994), 13 pairs and 4 triplets (2 pairs belonging to systems previously found). Combining all the above results we may conclude that out of the 113 VLGs we have identified 12 in rich clusters, 7 in poor clusters, 24 in generic groups with at least 3 members and 11 in interacting pairs or possible binaries, for a total of 54 VLGs in some type of galaxy association. In table 2, we list all VLGs that have been identified with some system ranging from binaries to rich cluster. In column (1) we give the identification number, in column (2) the catalog name, in column (3) the system type (binary, triplet, group, poor cluster, rich cluster), with an eventual note on the VLG (cD, Seyfert, interacting), in column (4) the system name (ACO or EDCC name for clusters, Hickson no. for compact groups, Arp--Madore, ESO or Vorontsov--Velyaminov no. for the others, indicating simply SSRS2 for groups listed only in the Girardi et al. (1998) catalog or for candidate groups found by percolation in this work), in column (5) the velocity dispersion of the system as measured from the SSRS2, in column (6) the number of galaxies used to estimate the velocity dispersion. As apparent from table 2, there is generally only one VLG per system. Two exceptions are A4008, which contains 4 VLGs, and the SSRS2 loose group which includes VLG~37 and VLG~38. From the above identified systems, extended X--ray emission, consistent with the VLG position, was detected by ROSAT for galaxies in A4038 and S0141. The object VLG 86, which is a Seyfert 1, was detected as a point source in X by ROSAT (B\"ohringer, private communication). At the position of VLG 65, ROSAT detected also X--ray extended emission typical of a group or cluster. This is an interesting object, because even if it is relatively nearby ($z \sim 0.03$), it is not found in any cluster or group catalog. We identified it in the SSRS2 through percolation as a group of 3 galaxies (including the VLG) and in a note in the MCG (Vorontsov--Velyaminov \& Arhipova 1968) referring to this galaxy we read: ``Here is the main member of a cluster, located at its end. The other members are of type E and S0 and are much fainter, but visibly spiral and [16-17 mag].'' The remaining 59 VLGs have not been associated to any particular known system. However, after eye--examination of these galaxies using the Digitized Sky Survey (DSS), we estimate that only about 16 galaxies might be isolated. Among the other galaxies, about 20 have at least a companion, and show clear distorted morphologies and/or evidence of interaction, 5 appear to be surrounded by faint satellites and 18 might be in groups (often with members much fainter than the VLG, as in the case of VLG~65), although usually no redshift information is available. This is illustrated in figure 4 for four such cases: VLG~24 is surely not a typical case, being an elliptical galaxy presumably at the center of a group; VLG~41 is probably a member of a triplet/small group of galaxies; VLG~44 is clearly interacting with a large, irregular galaxy; VLG~77 is a Liner, probably interacting with a small satellite. Another evidence that VLGs are unlikely to be isolated systems, comes from the examination of the small subsample of nearby VLGs ($V < 10,000$ \kms, a distance at which a typical $M^* \sim -19.5$ galaxy is brighter than m=15.5 and is therefore included in the SSRS2). Images for these galaxies are shown in figure 5. Most of them are not isolated. For example, VLG~20 does not belong to any known cluster or group, but we have identified around it (through the percolation technique previously described) other 7 fainter SSRS2 galaxies, giving a velocity dispersion of $\sim 900$ km/s (see table 2). Close inspection of the images for the only three nearby VLGs, VLG~27, VLG~29, and VLG~61, which, from our analysis, would be classified as ``isolated", show instead that they have companions: VLG~27, a peculiar barred late--type spiral, seems to be surrounded by satellites very close it as well as brighter galaxies at larger distances; VLG~29 (listed in the Arp--Madore catalog of Southern Peculiar galaxies) has a disk galaxy as a probable companion; VLG~61 has many nearby satellites. From the above evidence we can conclude that VLGs are generally found in environments of high local galaxy density, but not in rich clusters. Moreover, both eye-inspection of the images of nearby VLGs and their surroundings (within 1 ~h$^{-1}$ Mpc~ at the VLG redshift), and the number of members in SSRS2 loose groups found to include a VLG, show no evidence that VLGs are preferentially in rich groups. \void{ This is an important point, as the clustering properties we will examine in the next section suggest that VLGs are indeed associated to massive dark halos.} \section{Large--scale Distribution} \subsection {Correlation function} The original reason for focusing our attention in the sample of VLGs was the strong bias showed by this population relative to fainter galaxies (Benoist {et al.~} 1996). This is illustrated in figure \ref{fig:test} where we show the correlation function for galaxies brighter than $ M = -21$ within 168 ~h$^{-1}$ Mpc. For comparison, we also show the fit to the cluster correlation function (Cappi \& Maurogordato 1992) determined for $R \ge 0$ ACO clusters (Abell et al. 1989), $ \xi_{cc} = (s/19.5)^{-1.8}$. We remind that the correlation function of clusters depends on their richness (see Mann et al. 1993, Croft et al. 1997) For $R \ge 1$ clusters Peacock \& West (1992) find $21 \pm 1.3$ ~h$^{-1}$ Mpc, but some authors claim that the correlation amplitude of Abell clusters is amplified by projection effects, and give an estimate of $\sim 14$ ~h$^{-1}$ Mpc~ for APM clusters (Dalton et al. 1994), which would then be comparable to the the value found for VLGs. From the figure we see that very bright galaxies have a correlation length comparable to that measured for poor clusters. The correlation length of \MB $\leq -21$ galaxies is $r_0 = 16 \pm 2$ ~h$^{-1}$ Mpc, while the zero--crossing of the correlation function is beyond $\sim 40$ ~h$^{-1}$ Mpc. In Benoist {et al.~} (1996), it was possible to prove that the dependence of the correlation amplitude on luminosity was real, by considering different luminosity-limited samples within the same volume. Unfortunately, the same direct test is not possible for the $M \le -21$ sample, because of the relatively small number of galaxies. Therefore, one might argue that the large correlation amplitude is due to sampling effects, caused either by virialized systems in the volume considered or by fluctuations of the mean background density, on scales comparable to the sample size; in fact, examining figure \ref{fig:3}, where we show the spatial distribution of the bright galaxies, in the declination range $-40^o \le \delta \le -2.5^o$, at least two concentrations can be seen at large distances, at $\alpha \sim 23^h 30^m$ and at $\alpha \sim 1^h$, corresponding to large--scale structures seen in redshift space, as discussed in section 2.3. There are, however, two lines of argument showing that our results are not a consequence of sampling effects. The first, based on circumstantial evidences, is that: 1) as we have seen the bright galaxies seem to have peculiar characteristics; 2) their clustering properties extend a trend already visible for fainter galaxies (see figure 5 of Benoist et al. 1996); 3) similar results were found by Hamilton (1988) in his analysis of the CfA1 catalog, i.e. probing a different region of the sky, and by Park et al. (1994) from the analysis of the CfA2 power spectrum. A more direct argument is based on the following facts: 1) the correlation amplitude does not vary significantly if one considers sub-samples defined by removing the main concentrations; 2) by using a smaller volume; 3) or by splitting the sample into two ranges of right ascension, probing different structures. Therefore, the large correlation amplitude of very bright galaxies is probably a genuine property. We also point out that the amplitude of the correlation is not affected by removing galaxies identified in the previous section as possibly belonging to rich clusters. It is also important to emphasize that although a large fraction of the VLGs may be in systems of galaxies, currently there is no evidence for the amplitude of the SSRS2 group--group correlation function (Girardi et al. 1998) to be as large as that obtained for the VLGs. On the basis of the evidence presented in the previous section, VLGs are not preferentially in clusters, and probably not in rich groups (this last claim is obviously based on our analysis of nearby VLGs). We therefore suggest that VLGs are a highly biased population, possibly associated with dark halos with masses comparable to clusters. \subsection{VLG--Cluster cross--correlation function} An additional evidence in support of the above conjecture is the VLG-cluster cross-correlation function shown in figure \ref{fig:cross}a. Despite the small number of clusters (we have found only 28 $R \ge 0$ ACO clusters within the volume considered), we find $\xi_{gc} = \left( r / 16 \pm 2 \right) ^{-1.7 \pm 0.4}$. For comparison we also show the galaxy--cluster cross-correlation determined by Lilje \& Efstathiou (1988), $\xi_{gc} = \left( r / 8.8 \right) ^{-2.2}$, based on the Lick counts of galaxies. While the slopes differ at the $1 \sigma$ level, $r_0$ for bright galaxies is significantly larger, by a factor of about 2, even though the uncertainties are large (for example, Seldner \& Peebles 1977 find a significantly larger amplitude for the galaxy--cluster cross--correlation than Lilje \& Efstathiou 1988). An estimate of the relative bias between the VLGs and clusters can be computed from the ratio $\alpha = J_3 (gc) / J_3 (cc)$, where $J_3 (s) = \int _0 ^s x^2 \xi(s) ds$, $J_3 (gc)$ is the integral of the galaxy--cluster cross--correlation, and $J_3 (cc)$ is the integral of the cluster correlation function (see Alimi {et al.~} 1988). The dependence of $\alpha$ on separation $s$ is shown in figure \ref{fig:cross}b from which we determine that galaxies with \MB $ \leq -21$ are less clustered by a factor of $b_{21}/b_{cc} \sim 0.8$ relative to clusters, which appears to be constant in the range 5--50 ~h$^{-1}$ Mpc. This high bias is comparable to that measured for radio--galaxies (Peacock \& Nicholson 1991, Mo, Peacock \& Xia 1993, Loan, Wall \& Lahav 1997), and which has been explained as being due to the fact that radio--galaxies reside preferentially in clusters. However, as we have seen the VLGs are neither radio-sources nor are located in rich clusters. Furthermore, while radio--galaxies are representative of early--type galaxies, our results show that late--type galaxies also contribute to the effect. \subsection{Dimensionless Correlation Function} In order to further compare the clustering properties of the bright galaxies with those of clusters and other systems we use the scaling relation proposed by Szalay \& Schramm (1985), who noted that the cluster correlation amplitude increases with their mean spatial separation. As discussed by Bahcall \& West (1992) the ``universal dimensionless correlation function" \begin{equation} A_i = r_0 ^\gamma \sim (\alpha d_i)^{1.8} \label{eq:unicor} \end{equation} \noindent where $d_i = n^{-1/3}$ is the mean inter-particle distance and $\alpha \sim 0.5$, seems to hold for a number of systems up to superclusters, but not for normal galaxies. However, our results suggest that in this respect $M \le -21$ galaxies should differ from typical $M^*$ galaxies, as their mean density is much lower ($n \sim 6 \times 10^{-5}$~h$^3$~Mpc$^{-3}$ in the SSRS2), and they are more strongly correlated. In figure \ref{fig:universal} we show the correlation length $r_0$ for different volume-limited subsamples of the SSRS2, as a function of their mean inter-particle distance $d_i = n^{-1/3}$, where $n$ is the mean density of the subsample. Filled hexagons refer to SSRS2 volume--limited subsamples: each subsample includes galaxies in a one magnitude range, from $-17 \le M_B \le -16$ (lowest point) to $-21 \le M_B \le -22$, with a step of 0.5 magnitudes (for details, see Benoist {et al.~} 1996). Recalling that for the SSRS2, \MB$^* \sim -19.5$, it is clear from figure \ref{fig:universal} that faint galaxies ($L < L^*$) show a significant departure from the relation, as one could expect {\em a priori} (see Peebles 1993), whereas brighter galaxies ($L > L^*$) approach it asymptotically, which is a much less trivial result. This is in fact the first time that it is possible to cover a sufficiently large range of luminosities to allow a comparison of the scaling relation of galaxies and clusters. Galaxies brighter than \MB $=-20$ approximately follow a scaling relation with $\alpha \sim 0.6$, slightly higher but, given the uncertainties, consistent with the value of $\alpha \sim 0.4$ determined by Bahcall \& West (1992) for a variety of systems. Szalay \& Schramm (1985; see also Luo \& Schramm 1992) argued that the universal dimensionless correlation function followed by clusters could be explained if structures formed by a truly scale--invariant process, leading to a fractal--like distribution, while the observed deviation of galaxies from this relation could be explained by non--linear gravitational clustering at small scales. We take a different point of view and argue that the observed behavior is a clear evidence for biasing. First, because the results of Benoist {et al.~} (1996) show that the correlation length does not depend on the depth of the sample as expected for fractals (see also Cappi et al. 1998). Second, the clear trend of very bright galaxies to lie close to the relation obeyed by clusters suggests a continuity of the clustering properties of bright galaxies and clusters. This would be a natural consequence of different biasing amplitudes for systems with different mass scales (e.g. Peebles 1993). Our results show that the universal dimensionless correlation function can be explained as a consequence of the increase of the correlation amplitude with the mass (and therefore rarity) of the system, as expected if the distribution of luminous objects is biased with respect to the underlying matter distribution (Kaiser 1984; Bardeen {et al.~} 1986). The $r_0 - d$ relation should thus be regarded as an empirical tool to compare the biasing of different classes of objects (see for example the more rigorous approach of Bernardeau \& Schaeffer 1992). \section{Discussion and Conclusions} From our analysis of the SSRS2, we have shown that very luminous galaxies (\MB $ \leq -21$ or $L \gtrsim 4L^*$) are strongly correlated ($r_0 = 16 \pm 2$ ~h$^{-1}$ Mpc) and the zero--crossing of the correlation function is beyond 40~h$^{-1}$ Mpc. Furthermore, we find that luminous galaxies with $L > L^*$ appear to follow a universal dimensionless correlation similar to that of galaxy clusters, which implies a common biasing mechanism. Independently of their clustering properties we have found that $L \gtrsim 4L^*$ galaxies seem to form a special population. They are observed in locally high galaxy density regions, but are not predominantly in clusters nor apparently in rich groups. Instead, they tend to be in interacting pairs, some exhibit tidal distortions, while others have faint satellite galaxies in their surroundings. VLGs resemble another class of galaxies: the ultra--luminous infrared galaxies, where the high luminosity appears to be triggered by interaction with other galaxies (e.g. Melnick \& Mirabel 1990; Clements et al. 1996; Duc et al. 1997). Is there an evolutionary link between these systems? Is it possible that a massive galaxy undergoes a period of intense star formation, shrouded by dust, and becomes finally visible as a VLG in the optical? A simple test of this hypothesis will be possible with a large and complete sample of infrared ultra--luminous galaxies (which should have a higher correlation amplitude than normal IRAS galaxies). The fact that the brightest galaxies in the sample are not found in clusters but have clustering properties similar to $R=0$ clusters suggests that they may reside within massive dark halos. We cannot completely exclude that VLGs are preferentially in rich groups, which should be more strongly clustered than loose groups as a whole, because most VLGs are at large distances, where fainter members are not included in the SSRS2. However, as discussed above, at the present time we find no evidence for that, as most groups containing a nearby VLG are of low richness. While many VLGs have companions, the total luminosity of these systems appear to be far less than that typical of clusters. If this is the case, it implies that there are large variations in the M/L ratio and/or in the luminosity function of galaxies forming in dark halos. Further observational work is clearly needed to fully characterize this galaxy population. Clarifying the nature of these objects may contribute to our understanding of the halo--galaxy connection and the mechanisms responsible for galaxy evolution. \acknowledgements This research has made use of the NASA / IPAC Extragalactic Database (NED) which is operated by the Jet Propulsion Laboratory, California Institute of Technology, under contract with the National Aeronautics and Space Administration. The Digitized Sky Surveys were produced at the Space Telescope Science Institute under U.S. Government grant NAG W-2166. The images of these surveys are based on photographic data obtained using the Oschin Schmidt Telescope on Palomar Mountain and the UK Schmidt Telescope. The plates were processed into the present compressed digital form with the permission of these institutions. LNdC thanks his SSRS2 collaborators for allowing to use the data prior to publication and the hospitality of the Institut d'Astrophysique and the Observatoire de Paris--Meudon. AC thanks the hospitality of the Observatoire de Nice. We wish also to thank H. B\"ohringer for his list of X--ray detection in the fields of very luminous galaxies, and the referee M. Ramella for his useful comments.
1,116,691,501,414
arxiv
\section{Practitioner's guide to our approach}\label{apx:guide} \subsection{General implementation} Given a dataset of tuples $S = \{(x_i,y_i,m_i) \}_{i=1}^n$ where $x_i$ represents the covariates, $y_i$ is the target and $m_i$ are the expert labels, we want to construct a classifier $h: \mathcal{X} \to \mathcal{Y}$ and rejector function $r: \mathcal{X} \to \{-1,1 \}$. Our method for predicting on a new example $x \in \mathcal{X}$ given expert context $z \in \mathcal{Z}$ that only the expert can observe, a function class $\mathcal{H}$ where $h \in \mathcal{H}:\mathcal{X} \to \mathbb{R}^{|\mathcal{Y}|+1}$ (an example would be the set of deep networks with $|\mathcal{Y}|+1$ output units) , and an expert $M: \mathcal{Z} \to \mathcal{Y}$ is summarized below in Algorithm \ref{alg:our_method}. \begin{algorithm}[H] \DontPrintSemicolon \SetAlgoLined \textbf{Input}: training data $S = \{(x_i,y_i,m_i) \}_{i=1}^n$, function class $\mathcal{H}$, example $x$, Expert $M$ and expert input $z$ \\ $g_1, \cdots, g_{|\mathcal{Y}|},g_\bot \gets \arg \min_{\mathbf{g} \in \mathcal{H}} \sum_{i \in S} L_{CE}^\alpha(\mathbf{g},x_i,y_i,m_i)$ \\ prediction $= 0$ \\ $r(x) \gets \textrm{sign}(-\max_{y \in \mathcal{Y}}g_y(x) + g_\bot(x))$ \\ \eIf{$r(x)=0$}{ $h(x) \gets \arg\max_{y \in \mathcal{Y}} g_y(x)$ \\ prediction $\gets h(x)$ }{ $m \gets M(z)$ (expert query)\\ prediction $\gets m$ } \textbf{Return}: prediction \caption{Our proposed method for prediction on a new example $x \in \mathcal{X}$ with expert input $z \in \mathcal{Z}$} \label{alg:our_method} \end{algorithm} Where the loss $L_{CE}^\alpha$ used in algorithm is the following: \begin{align*} L_{CE}^\alpha(h,r,x,y,m)=& -( \alpha \cdot \mathbb{I}_{m = y} + \mathbb{I}_{m \neq y} )\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \nonumber \bot}\exp(g_{y'}(x))} \right) \\&- \mathbb{I}_{m = y} \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \end{align*} Practically, integrating an expert decision maker into a machine learning model amounts to two modifications in training: increasing the output size of the function class in consideration by an additional output unit representing deferral and training with the loss $L_{CE}^\alpha$ instead of the cross entropy loss. We show how to implement $L_{CE}^\alpha$ in PyTorch below: \begin{lstlisting}[language=Python] def deferral_loss_L_CE(outputs, target, expert, k_classes, alpha): ''' outputs: model outputs target: target labels expert: expert agreement labels for batch k_classes: cardinality of target Y ''' batch_size = outputs.size()[0] defer_position = outputs = torch.nn.functional.softmax(outputs, dim=1) loss = -expert*torch.log2(outputs[range(batch_size),k_classes]) - (alpha*expert + (1-expert)) * torch.log2(outputs[range(batch_size), labels]) return torch.sum(loss)/batch_size \end{lstlisting} \subsection{Choice of $\alpha$} The choice of the hyperparameter $\alpha$ has sizable influence on system performance. Naive validation over $\alpha$ requires re-training on the entire training set from scratch over the search space. We find that a simple validation strategy often works as well as re-training from scratch especially in scenarios where there is little gain in adapting to the expert but there are major gains in being able to defer correctly. The strategy first requires splitting the training set into two sets $S_{T1}$ and $S_{T2}$ where $S_{T1}$ is larger than $S_{T2}$ (e.g. an 80-20 split), access to a validation set $S_V$ and a set of possible values $\mathcal{A}$ for $\alpha$ (an evenly spaced grid over $[0,10]$ is more than sufficient). The strategy then proceeds in two steps: \begin{itemize} \item \textbf{Step 1:}Train on $S_{T1}$ with $L_{CE}^1$ (i.e. setting $\alpha =1$) to maximize system performance on $S_V$. One may find more success instead training on $S_{T1}$ with the \emph{cross-entropy loss} (however with the model having an extra output) to maximize \emph{classifier} performance on $S_V$ rather than system performance. Call the resulting model of this first step $M_1$ \item \textbf{Step 2:} For each $\alpha \in \mathcal{A}$, fine-tune on $S_{T2}$ starting from model $M_1$ to maximize system performance on $S_V$ measuring it with the rejector $r(x) = \mathbb{I}\{-\max_{y \in \mathcal{Y}}g_y(x) + g_\bot(x) \geq \tau\} $ where the threshold $\tau$ is chosen to maximize performance on $S_V$ post-hoc. The resulting model $M_1'$ and $\tau^*$ that obtains best system performance across all choices of $\alpha$ and choices of $\tau$ is the final model. \\ \item \textbf{Inference time:} Use the rejector defined by $r(x) = \mathbb{I}\{-\max_{y \in \mathcal{Y}}g_y(x) + g_\bot(x) \geq \tau^* \} $ and proceed as in Algorithm \ref{alg:our_method}. \end{itemize} \textit{Note} that system performance here refers to metrics measured with respect to the machine+expert system with deferral while classifier performance refers to metrics measured as if the system never deferred. \section{Experimental Details and Results}\label{apx:experiments} All experiments were run on a Linux system with an NVIDIA Tesla K80 GPU on PyTorch 1.4.0. \subsection{CIFAR-10}\label{apx:cifar10} \textbf{Implementation Details.} We employ the implementation in \url{https://github.com/xternalz/WideResNet-pytorch} for the Wide Residual Networks. To train, we run SGD with an initial learning rate of 0.1, Nesterov momentum at 0.9 and weight decay of 5e-4 with a cosine annealing learning rate schedule \cite{loshchilov2016sgdr}. We train for a total of 200 epochs for all experiments, at this point the network has perfectly fit the training set, we found that early stopping based on a validation set did not make any difference and similarly training for more than 200 epochs also did not hurt test accuracy. \textbf{Expert Accuracy.} In Table \ref{table:cifar10-expert-acc} we show the accuracy of the expert on the deferred examples versus the classes the expert can predict $k$. We can see that our method $L_{CE}^{.5}$ has higher expert accuracy than all other baselines except at $k=1,2$ where coverage is very high. This contrasts with Figure \ref{fig:covvsacc} that shows the classifier accuracy on non-deferred accuracy where $L_{CE}^{.5}$ had lower accuracy for each expert level compared to Confidence and $L_{CE}^1$. Hence there is a clear trade-off between choosing the hyper-parameter $\alpha <1$ and $\alpha=1$. For $\alpha<1$, the model will prefer to always defer to the expert if it is correct, this is advantageous in this setup as the expert is perfect on a subset of the data and uniformly random on the other. However, for $\alpha=1$, the model will compare the confidence of the expert and the model essentially performing the computation of the Bayes rejector $r^B$ as shown by the consistency of the loss $L_{CE}^1$; note that for $\alpha \neq 1$ the loss $L_{CE}$ is no longer consistent. \begin{table}[H] \caption{Accuracy of the expert on deferred examples shown for the methods and baselines proposed with varying expert competence (k) on CIFAR-10.} \label{table:cifar10-expert-acc} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lcccccccccr} \toprule Method /\ Expert (k) & 1 & 2 & 3&4& 5&6&7&8&9&10 \\ \midrule $L_{CE}^1$ & 73.65 & 86.01 &73.66 &87.41 &88.81 &94.7 &96.67 &98.72 &98.65 &100 \\ $L_{CE}^{.5}$ & 86.44 & 90.96 &\textbf{92.65} &\textbf{91.67} & \textbf{93.71} &\textbf{96.32} &\textbf{97.61} &98.77 &\textbf{99.24} &\textbf{100} \\ Confidence & \textbf{87.5} & \textbf{92.74 }&88.88 &88.3 &92.8 & 94.56 &96.76 &\textbf{98.89} &98.89 &100 \\ OracleReject & 85.3 & 90.49 &88.23 &91.13 &89.33 &93.61 &95.45& 96.82 &98.45 &100 \\ \bottomrule \end{tabular}} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \textbf{Increasing data size.} In table \ref{table:cifar10-increasing-data} we show the accuracy of the classifier and the coverage of the system for our method compared to the baseline Confidence for expert $k=5$. We can see that when data is limited, our method retains high classification accuracy for the classifier versus the baseline. This is due in fact to the low coverage of our method compared to Confidence, as data size grows the coverage our method increases as now the classifier's performance improves and the system can now safely defer to it more often. On the other hand, the baseline remains at almost constant coverage, not adapting to growing data sizes. \begin{table}[H] \caption{Accuracy of the classifier on non-deferred examples shown for our method $L_{CE}^1$ and baseline Confidence with varying training set size for expert $k=5$ on CIFAR-10.} \label{table:cifar10-increasing-data} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lcccccccr} \toprule Method /\ Data size (thousands) & 1 & 2 & 3&5& 8&10&20&50 \\ \midrule $L_{CE}^1$ (classifier) & \textbf{62.84} & \textbf{71.51} & \textbf{72.63} & \textbf{75.03 }& 80.1 &\textbf{82.11} & 86.44 & \textbf{95.42} \\ Confidence (classifier) & 50.31 & 59 & 66.3 &70.12 &\textbf{80.33 }&78.67 & \textbf{87.01} & 92.45 \\ \midrule $L_{CE}^1$ (coverage) & 25.7 & 35.87 & 40.42 &49.62 &46.38 &46.51 & 50 &71.35 \\ Confidence (coverage) & \textbf{69.32} &\textbf{ 72.93} & \textbf{71.99} & \textbf{75.05} & \textbf{73.09} & \textbf{65.9 }& \textbf{74.16} & \textbf{72.12} \\ \bottomrule \end{tabular}} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsection{CIFAR-10H} \textbf{Class-wise Accuracy of Expert.} Table \ref{table:cifar10h-expertacc} shows the average accuracy of the synthetic \texttt{CIFAR10H} \cite{peterson2019human} expert on each of the 10 classes. We can see that the expert has very different accuracies for the classes which gives an opportunity for an improvement. \textbf{Results.} Table \ref{table:cifar10h-complete} shows full experimental results for the CIFAR-10H results. \begin{table}[H] \caption{Accuracy of the \texttt{CIFAR10H} \cite{peterson2019human} expert on each of the 10 classes } \label{table:cifar10h-expertacc} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lcccccccccc} \toprule Class & 1 & 2 & 3&4&5&6&7&8&9&10 \\ \midrule Accuracy &95.15&97.23&94.75&91.58&90.51&94.90&96.22&97.91&97.33&96.74\\ \bottomrule \end{tabular}} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \begin{table}[H] \caption{Complete results of table \ref{table:cifar10h} comparing our proposed approaches and baseline.} \label{table:cifar10h-complete} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lcccr} \toprule Method & System Accuracy & Coverage & Classifier Accuracy & Expert Accuracy \\ \midrule $L_{CE}$ impute & \textbf{96.29}$\pm$0.25 & 51.67$\pm$1.46 &\textbf{ 99.2} $\pm$ 0.08 &\textbf{93.18 }$\pm$ 0.48 \\ $L_{CE}$ 2-step & 96.03$\pm$0.21 & 60.81$\pm$0.87 & 98.11 $\pm$ 0.22 &92.77 $\pm$ 0.58\\ Confidence & 95.09$\pm$0.40 & \textbf{79.48}$\pm$5.93 & 96.09 $\pm$ 0.42 &90.94 $\pm$ 1.34\\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsection{CIFAR-100}\label{apx:expcifar100} \textbf{Results.} In figure \ref{fig:cifar100-kvssystem} we plot the accuracy of the combined algorithm and expert system versus $k$, the number of classes the expert can predict. We can see that our method dominates the baseline over all k. In table \ref{table:cifar100-all-results} we show expert, classifier and system accuracy along with coverage of both methods. Our approach $L_{CE}^1$ obtains both better expert and classifier accuracy however gets lower coverage than Confidence. \begin{figure}[H] \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={k (classes expert can predict)}, ylabel={Accuracy}, xmin=0, xmax=100, ymin=78, ymax=95, xtick={0,10,20,30,40,50,60,70,80,90,100}, ytick={78,80,82,84,86,88,90,92,94}, legend pos=north west, ymajorgrids=true, grid style=dashed, ] \addplot[ color=blue, mark=square, ] coordinates { (0,79.28)(10,78.48)(20,79.37)(30,79.96)(40,80.75)(50,81.92)(60,83.67)(70,85.15)(80,88.63)(90,90.31)(100,94.74) }; \addlegendentry{Confidence} \addplot[ color=black, mark=*, ] coordinates { (0,79.28)(10,78.67)(20,79.43)(30,81.02)(40,82.09)(50,83.8)(60,85.15)(70,87.58)(80,90.23)(90,91.81)(100,94.59) }; \addlegendentry{$L_{CE}^{1}$ (ours)} \end{axis} \end{tikzpicture} \caption{Comparison of the developed method $L_{CE}^1$ on CIFAR-100 versus the confidence baseline. k is the number of classes the expert can predict} \label{fig:cifar100-kvssystem} \end{figure} \begin{table}[H] \caption{Accuracy of the expert on deferred examples shown for the methods and baselines proposed with varying expert competence (k) on CIFAR-100.} \label{table:cifar100-all-results} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lcccccccccr} \toprule Method /\ Expert (k) & 10 & 20 & 30&40& 50&60&70&80&90&100 \\ \midrule $L_{CE}^1$ (system) & \textbf{78.67} & \textbf{79.43} &\textbf{81.02} &\textbf{82.09 }&\textbf{83.8} &\textbf{85.15} &\textbf{87.58} &\textbf{90.23} &\textbf{91.81 }&94.59 \\ Confidence (system) &78.48 & 79.37 & 79.67 &80.75 &81.92 & 83.67 & 85.15 & 88.63 & 90.31 & \textbf{94.74} \\ \midrule $L_{CE}^1$ (coverage) & 89.19 & 82.44 & 84.79 &71.66 & 74.52 & 65.72 & 62.23 &59.37 &52.15 &49.07 \\ Confidence (coverage) & \textbf{99.17} & \textbf{95.47}& \textbf{93.96} & \textbf{86.64} & \textbf{86.71 }& \textbf{80.67} & \textbf{79.56} & \textbf{75.36 }&\textbf{ 72.39} & \textbf{63.32} \\ \midrule $L_{CE}^1$ (classifier) & \textbf{82.35} & \textbf{84.03} & \textbf{84.07} & \textbf{85.29} &\textbf{86.44} &\textbf{87.78} &\textbf{90.13} &\textbf{91.89} &\textbf{92.4} &94.59\\ Confidence (classifier) & 78.99& 80.66&81.79 &84.75 &84.62 & 87.30 &88.75 & 90.97 &92.07 &\textbf{94.97} \\ \midrule $L_{CE}^1$ (expert) & \textbf{47.36} & \textbf{57.8} &\textbf{68.87} &\textbf{73.99} &\textbf{76.06} &\textbf{79.65} &\textbf{83.37} &\textbf{87.79} &\textbf{91.16} &\textbf{94.57} \\ Confidence (expert) & 18.07 & 52.09 & 51.49 &54.79 &64.4 & 68.55 & 71.13 & 82.11 &85.70 &94.30 \\ \bottomrule \end{tabular}} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsection{Hate Speech experiments}\label{apx:exphate} \textbf{Implementation details.} We train all models with Adam for 15 epochs and select the best performing model on the validation set. \textbf{Results.} Table \ref{table:hatespeech-all-results} shows complete results of our method, baselines, expert and classifier. The performance of our method and the baselines all achieve comparable results. \begin{table}[H] \caption{Detailed results for our method and baselines on the hate speech detection task \cite{davidson2017automated}. sys: system accuracy, class: classifier accuracy, disc: system discrimination, AAE-biased: Expert 2 that has higher error rate for AAE group, non-AAE biased: Expert 3 that has higher error for non AAE tweets } \label{table:hatespeech-all-results} \vskip 0.15in \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lccc|ccr} \toprule Method/Expert & & Fair & & & AAE-biased & \\ \midrule & sys & class & disc & sys & class & disc \\ \midrule $L_{CE}^1$ (ours) & 93.36 $\pm$ 0.16 & \textbf{95.60} $\pm$ 0.44 & \textbf{0.294} $\pm$0.03 & 92.91 $\pm$ 0.17 &\textbf{ 94.67} $\pm$ 0.61 & \textbf{0.37} $\pm$ 0.06 \\ Confidence & 93.22 $\pm$0.11 & 94.49 $\pm$ 0.12 & 0.45 $\pm$ 0.02 & 92.42 $\pm$ 0.40 & 94.56 $\pm$ 0.40 & 0.41 $\pm$ 0.02 \\ Oracle & \textbf{93.57} $\pm$0.11 & 94.87 $\pm$0.22 & 0.32 $\pm$0.02 & \textbf{93.22} $\pm$0.11 & 94.49 $\pm$0.12 & 0.449 $\pm$0.024 \\ \midrule Expert & 89.76 & -- & 0.031 & 84.28 & -- & 0.071 \\ Classifier & 88.26 & 88.26 & 0.226 & 88.26 & 88.26 & 0.226\\ \bottomrule \end{tabular}} \end{sc} \end{small} \vskip 0.1in \begin{small} \begin{sc} \begin{tabular}{lccr} \toprule Method/Expert & & non-AAE biased & \\ \midrule & sys & class & disc \\ \midrule $L_{CE}^1$ (ours) & 90.42 $\pm$ 0.38 & \textbf{94.04} $\pm$0.81 & 0.231 $\pm$0.04\\ Confidence & 90.60 $\pm$0.13 & 93.68 v0.24 & 0.15 $\pm$0.03\\ Oracle & \textbf{91.09} $\pm$ 0.12 & 92.57 $\pm$0.15 & \textbf{0.15} $\pm$0.02\\ \midrule Expert & 80.4 & -- & 0.084\\ Classifier & 88.26 & 88.26 & 0.226\\ \bottomrule \end{tabular} \end{sc} \end{small} \vskip -0.1in \end{table} \subsection{Baseline Implementation}\label{apx:madras} \textbf{Description of \cite{madras2018predict} approach.} A different approach to our method, is to try directly to approximate the system loss \eqref{eq:system_loss_general}, this was the road taken by \cite{madras2018predict} in their differentiable model method. Let us introduce the loss used in \cite{madras2018predict}: \begin{equation} L(h,r,M) = \mathbb{E}_{(x,y) \sim \mathbf{P},m \sim M|(x,y)}\left[ (1-r(x,h(x))) l(y,h(x)) + r(x,h(x)) l(y,m)\right] \label{eq:madras_loss} \end{equation} where $h: \mathcal{X} \to \Delta^{|\mathcal{Y}|-1}$ (classifier), $r: \mathcal{X} \times \Delta^{|\mathcal{Y}|-1} \to \{0,1\}$ (rejector) and the expert $M: \mathcal{Z} \to \Delta^{|\mathcal{Y}|-1}$. \cite{madras2018predict} considers only binary labels and uses the logistic loss for $l(.,.)$ and thus requires the expert to produce uncertainty estimates for it's predictions instead of only a label; we can extend this to the multiclass setting by using the cross entropy loss for $l$. It is clear that the loss \eqref{eq:madras_loss} is non-convex in $r$, hence to optimize it \cite{madras2018predict} estimates the gradient through the Concrete relaxation \cite{maddison2016concrete,jang2016categorical}. However, in the code of \cite{madras2018predict} found at \url{https://github.com/dmadras/predict-responsibly}, the authors replace $r(x)$ by it's estimated probability from it's model. \cite{madras2018predict} considers an additional parameter $\gamma_{defer}$ found in the code, however it is not clear what effect this parameter has as we found it's description in the paper did not match the code. In detail, let $r_0,r_1 : \mathcal{X} \to \mathbb{R}$ and $r(x) = \arg \max_{i \in \{0,1\}} r_i$, the loss \cite{madras2018predict} considers is: \begin{equation} \tilde{L}(h,r,M) = \mathbb{E}_{(x,y) \sim \mathbf{P},m \sim M|(x,y)}\left[ \frac{ \exp(r_0(x))}{\exp(r_0(x)) + \exp(r_1(x))} l(y,h(x)) + \frac{\exp(r_1(x))}{\exp(r_0(x)) + \exp(r_1(x))} l(y,m)\right] \label{eq:madras_loss_ours} \end{equation} All terms in loss \eqref{eq:madras_loss_ours} are on the same scale which is crucial for the model to train well. We explicitly have two functions $r_0$ and $r_1$ defining $r$ even though $r$ is binary; this is for ease of implementation. Another key detail of \cite{madras2018predict} approach, is that the classifier is independently trained of the rejector by stopping the gradient from $r$ to backpropagate through $h$. This no longer allows $h$ to adapt to the expert, $h$ is trained with the cross entropy loss on it's own concurrently with $r$. \textbf{CIFAR-10 details.} In our CIFAR-10 setup, the dataset $S$ contains only the final prediction $m$ of the expert $M$, thus to compute $l(y,m)$ we set $l(y,m) = - \log(1-\epsilon)$ if $y=m$ and $l(y,m) = - \log(\frac{1}{|\mathcal{Y}|})$ if $y \neq m$ (simulating a uniform prediction in accordance with our expert behavior) with $\epsilon = 10^{-12}$. One could instead train a network to model the expert's prediction, we found this approach to fail as there is a big amount of noise in the labels caused by the expert's random behavior. \textbf{Results on CIFAR-10.} For expert $k<8$, we found that the \cite{madras2018predict} baseline to almost never defer to the expert and when $k=8,9$ at the end of training (200 epochs) the rejector never defers but the optimal system is found in the middle of training ($\sim$100 epochs). The optimal systems achieve 46.27 and 40.22 coverage, 98.81 and 98.89 expert accuracy on deferred examples and 89.38 and 89.40 classifier accuracy on non-deferred examples respectively for $k=8,9$. The classifier alone for the optimal systems achieve $\sim$86 classification accuracy on all of the validation set for both experts, notice that there is not much difference between the classification accuracy on all the data and non-deferred examples, while for our method and other baselines there is a considerable increase. This indicates that the rejector is only looking at the expert loss and ignoring the classifier What is causing this behavior is that as the classifier $h$ trains, it's loss $l(y,h(x))$ eventually goes to $0$, however the loss of the expert $l(y,m)$ is either $0$ or equal to $-\log(0.1)$, hence the rejector will make the easier decision to never defer. At initial epochs, we have a non-trivial rejector as the classifier $h$ is still learning, and the coverage progressively grows till $100\%$ over training. Essentially, what \cite{madras2018predict} approach is trying to do is choosing between the lower cost between expert and classifier: a cost-sensitive learning problem at it's heart. Therefore, one can use the losses developed here to tackle the problem better; we leave this to future investigations. Another potential fix is to learn the classifier and rejector on two different data sets. \begin{table}[h] \caption{System accuracy of our implementation of \cite{madras2018predict} and our method and baselines with varying expert competence (k) on CIFAR-10.} \label{table:cifar10-madras} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{lcccccccccr} \toprule Method /\ System accuracy (k) & 1 & 2 & 3&4& 5&6&7&8&9&10 \\ \midrule $L_{CE}^{.5}$ & \textbf{90.92} & \textbf{91.01} &\textbf{91.94} &\textbf{92.69} &\textbf{93.66} &\textbf{96.03} &\textbf{97.11 }&\textbf{98.25} &\textbf{99} &\textbf{100} \\ $L_{CE}^{1}$ & 90.41 & 91.00 &91.47 &92.42 &93.4 &95.06 &96.49 &97.30 &97.70 &100 \\ Confidence & 90.47 & 90.56 &90.71 &91.41 &92.52 &94.15 &95.5 &97.35 &98.05 &100 \\ OracleReject & 89.54 & 89.51 &89.48 &90.75 &90.64 &93.25 &95.28& 96.52 &98.16 &100 \\ \cite{madras2018predict} & 90.40 & 90.40 &90.40 &90.40 &90.40 &90.40 &90.40& 94.48 &95.09&100 \\ \bottomrule \end{tabular}} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsection{CheXpert Experiments}\label{apx:exp_chexpert} \begin{figure}[h] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_classifier.pdf} \caption{classifier AU-ROC on non-deferred examples vs coverage for expert $q=0.7,p=1$ with 100\% of training data.} \label{fig:classauc_vs_cov_toy} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_trainsize-0.1_classifier.pdf} \caption{classifier AU-ROC on non-deferred examples vs coverage for expert $q=0.7,p=1$ with 10\% of training data.} \label{fig:classauc_vs_cov_toy_10} \end{subfigure} \caption{Plot of classifier AU-ROC on non-deferred examples versus coverage for (a) for systems learned with 100\% of training data (b) and learned with 10\% of training data. Noise at low coverage is due to reduced data size.} \label{fig:plots_classauc_ap_toyexpert} \end{figure} \clearpage \section{Deferred Proofs and Derivations}\label{apx:proofs} \subsection{Section \ref{sec:surrog}} \subsubsection{Binary Setting} As we eluded to in the body of the paper, we can extend the losses introduced by \cite{cortes2016learning} to our setting for binary labels. Let $\mathcal{Y}=\{-1,+1\}$ and $r,h: \mathcal{X} \to \mathbb{R}$ where we defer if $r(x)\leq 0$, for generality we assume $l_{exp}(x,y,m) = \max(c,\mathbb{I}_{m \neq y})$ as this allows to treat rejection learning as an immediate special case. Following the derivation in \cite{cortes2016learning}, let $u \to \phi(-u)$ and $u \to \psi(-u)$ be two convex function upper bounding $\mathbb{I}_{u\leq 0}$ and let $\alpha,\beta >0$, then: \begin{flalign} &\nonumber L_c(h,r,x,y,m) =\mathbb{I}_{h(x)y\leq 0} \mathbb{I}_{r(x) > 0} + \max(c,\mathbb{I}_{m \neq y}) \mathbb{I}_{r(x) \leq 0}&\\ \nonumber&\leq\max\left\{\mathbb{I}_{\max\{h(x)y,-r(x)\}\leq 0} , \max(c,\mathbb{I}_{m \neq y}) \mathbb{I}_{r(x) \leq 0} \right\}\\ \nonumber&\overset{(a)}{\leq}\max\left\{\mathbb{I}_{ \frac{\alpha}{2}(h(x)y-r(x))\leq 0} , \max(c,\mathbb{I}_{m \neq y}) \mathbb{I}_{ \beta r(x) \leq 0} \right\}\\ &\overset{(b)}{\leq}\max\{\phi\left( \frac{-\alpha}{2}(h(x)y-r(x))\right) , \max(c,\mathbb{I}_{m \neq y}) \psi\left( -\beta r(x) \right) \} \label{surogate:MH}\\ &\leq\phi\left( \frac{-\alpha}{2}(h(x)y-r(x))\right) + \max(c,\mathbb{I}_{m \neq y}) \psi\left( -\beta r(x) \right) \label{surogate:PH} \end{flalign} step $(a)$ is by noting that $max(a,b)\geq \frac{a+b}{2}$, step $(b)$ since $\phi(u)$ and $\psi(u)$ upper bound $\mathbb{I}_{u\leq0}$. Both the right hand sides of equations \eqref{surogate:MH} and \eqref{surogate:PH} are convex functions of both $h$ and $r$. When $\phi$ and $\psi$ are both the exponential loss we obtain the following loss with $\beta(x,y,m): \mathcal{X} \times \mathcal{Y}^2 \to \mathbb{R}^+$: \begin{align*} \nonumber &L_{SH}(h,r,x,y,m):= \exp\left( \frac{\alpha}{2}(r(x)-h(x)y)\right) + (c+\mathbb{I}_{m \neq y}) \exp\left( -\beta(x,y,m) r(x) \right) \end{align*} we will see that it will be necessary that $\beta$ is no longer constant for the loss to be consistent while in the standard case it sufficed to have $\beta$ constant \cite{cortes2016learning}. The following proposition shows that for an appropriate choice of $\beta$ and $\alpha$ we can make $L_{SH}$ consistent. \begin{proposition} Let $c(x) = c - c\mathbb{P}(Y \neq M|X=x) + \mathbb{P}(Y \neq M|X=x)$, for $\alpha = 1$ and $\beta=\sqrt{\frac{1-c(x)}{c(x)}}$, $\inf_{h,r}\mathbb{E}_{x,y,m}[L_{SH}(h,r,x,y,m)]$ is attained at $(h^*_{SH},r^*_{SH})$ such that $sign(h^B)=sign(h^*_{SH})$ and $sign(r^B)=sign(r^*_{SH})$. \end{proposition} \begin{proof} Denote $\eta(x)=\mathbb{P}(Y=1|X=x)$ and $q(x,y)=\mathbb{P}(M=1|X=x,Y=y)$, we have: \begin{flalign*} \inf_{h,r}\mathbb{E}_{x,y,m}[L_{SH}(h,r,x,y,m)] &= \inf_{h,r} \mathbb{E}_{x}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{SH}(h,r,x,y,m)] &\\ &= \mathbb{E}_{x}\inf_{h(x),r(x)}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{SH}(h(x),r(x),x,y,m)] \end{flalign*} Now we will expand the inner expectation: \begin{flalign} &\label{prop:inner_loss_exp}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{SH}(h(x),r(x),x,y,m)]&\\\nonumber&= \eta(x) q(x,1) ( \exp\left( \frac{\alpha}{2}(r(x)-h(x))\right) + c \exp\left( -\beta r(x) \right) ) \\ &\nonumber+(1-\eta(x)) q(x,-1) ( \exp\left( \frac{\alpha}{2}(r(x)+h(x))\right) + (1) \exp\left( -\beta r(x) \right) )\\ &\nonumber+\eta(x) (1-q(x,1)) ( \exp\left( \frac{\alpha}{2}(r(x)-h(x))\right) + (1) \exp\left( -\beta r(x) \right) )\\ &\nonumber+(1-\eta(x)) (1-q(x,-1)) ( \exp\left( \frac{\alpha}{2}(r(x)+h(x))\right) + c \exp\left( -\beta r(x) \right) ) \end{flalign} The Bayes optimal solution for our original loss in the binary setting is: \begin{flalign*} &h^B(x)= \eta(x) -\frac{1}{2} &\\ &r^B(x)= |\eta(x) -\frac{1}{2}| - (\frac{1}{2} -c - \mathbb{P}(M \neq Y |X=x)) \end{flalign*} \textbf{Case 1:} if $\eta(x)=0$, writing $v=r(x),u=h(x)$ then term \eqref{prop:inner_loss_exp} becomes: \begin{equation*} q(x,-1)( \exp\left( \frac{\alpha}{2}(v+u)\right) + 1 \exp\left( -\beta v \right) ) +(1-q(x,-1))( \exp\left( \frac{\alpha}{2}(v+u)\right) + c \exp\left( -\beta v \right) ) \end{equation*} then to minimize the above it is necessary that the optimal solutions are such that $u^*<0,v^*>0$ which agree with the sign of the original Bayes solution. \textbf{Case 2:} if $\eta(x)=1$, then term \eqref{prop:inner_loss_exp} becomes: \begin{equation*} q(x,1)( \exp\left( \frac{\alpha}{2}(v-u)\right) + c \exp\left( -\beta v \right) ) +(1-q(x,1))( \exp\left( \frac{\alpha}{2}(v-u)\right) + (1) \exp\left( -\beta v \right) ) \end{equation*} then to minimize the above it is necessary that the optimal solutions are such that $u^*>0,v^*>0$ which agree with the sign of the original Bayes solution. \textbf{Case 3:} $\eta(x)\in(0,1)$, for ease of notation denote the RHS of equation \eqref{prop:inner_loss_exp} as $L_{\psi}(u,v)$, note that $L_{\psi}(u,v)$ is a convex function of both $u$ and $v$ and therefore to find the optimal solution it suffices to take the partial derivatives with respect to each and set them to $0$. For $u$: \begin{flalign*} &\frac{\partial_{\psi}(u,v)}{\partial u} = 0 &\\ &\iff - \eta(x) \frac{\alpha}{2} \exp\left( \frac{\alpha}{2}(v-u^*)\right) + (1-\eta(x))\exp\left( \frac{\alpha}{2}(v+u^*)\right) = 0 \\ &\iff - \eta(x) \frac{\alpha}{2} \exp\left( \frac{-\alpha}{2}u^*\right) + (1-\eta(x)) \frac{\alpha}{2}\exp\left( \frac{\alpha}{2}u^*\right) = 0 \\ &\iff u^* = \frac{1}{\alpha} \log(\frac{\eta(x)}{1- \eta(x)}) \end{flalign*} we note that $u^*$ has the same sign as the minimizer of the exponential loss and hence has the same sign as $h^B(x)$. Plugging $u^*$ and taking the derivative with respect to $v$: \begin{flalign*} &\frac{\partial_{\psi}(u^*,v)}{\partial v} = 0 &\\ &\iff \eta(x) \frac{\alpha}{2} \exp\left( \frac{\alpha}{2}(v^*-u^*)\right) + (1-\eta(x))\exp\left( \frac{\alpha}{2}(v^*+u^*)\right)\\ &- \beta c(\eta(x)q(x,1) + (1-\eta(x))(1-q(x,-1)) \exp(-\beta v^*) \\&-(1-\eta(x))q(x,-1) \beta \exp(-\beta v^*) -\eta(x)(1-q(x,1)) \beta \exp(-\beta v^*) = 0 \\ &\iff \eta(x) \frac{\alpha}{2} \exp\left( \frac{\alpha}{2}(v^*-u^*)\right) + (1-\eta(x))\exp\left( \frac{\alpha}{2}(v^*+u^*)\right) \\&- \beta (c -c\mathbb{P}(M\neq Y|X=x) + \mathbb{P}(M\neq Y|X=x) ) \exp(-\beta v^*) =0 \end{flalign*} Appealing to the proof of Theorem 1 in \cite{cortes2016boosting} we obtain that: \begin{equation*} v^* = \frac{1}{\alpha/2 + \beta} \log \left( \frac{c(x)\beta}{\alpha} \sqrt{\frac{1}{\eta(x)(1-\eta(x))}} \right) \end{equation*} Furthermore by the proof of Theorem 1 in \cite{cortes2016boosting}, the sign of $v^*$ matches that of $r^B(x)$ if and only if: \[ \frac{\beta}{\alpha} = \sqrt{\frac{1-c(x)}{c(x)}} \] \end{proof} \subsubsection{Multiclass setting} \textbf{Proposition 1.} \textit{ \noindent $\tilde{L}_{CE}$ is convex and is a consistent loss function for $\tilde{L}$: \begin{center} let $\bm{\tilde{g}}=\arg \inf_{\mathbf{g}} \mathbb{E}\left[ \tilde{L}_{CE}(\mathbf{g},\mathbf{c}) |X=x\right]$, then: $\arg \max_{i \in [K+1]}\bm{\tilde{g}}_i = \arg \min_{i \in [K+1]} \mathbb{E}[c(i)|X=x]$ \end{center} } \begin{proof} Writing the expected loss: \begin{flalign*} \inf_{\mathbf{g}}\mathbb{E}_{x,\mathbf{c}}[\tilde{L}_{CE}(\mathbf{g},x,\mathbf{c})] &= \inf_{\mathbf{g}} \mathbb{E}_{x}\mathbb{E}_{\mathbf{c}|x}[\tilde{L}_{CE}(\mathbf{g},x,\mathbf{c})] = \mathbb{E}_{x}\inf_{\mathbf{g}(x)}\mathbb{E}_{\mathbf{c}|x}[\tilde{L}_{CE}(\mathbf{g}(x),x,\mathbf{c})] \end{flalign*} Now we will expand the inner expectation: \begin{flalign} &\mathbb{E}_{\mathbf{c}|x}[\tilde{L}_{CE}(\mathbf{g}(x),x,\mathbf{c})] \nonumber = - \sum_{y \in [K+1]} \mathbb{E}[\max_j c(j) - c(y)|X=x] \log\left( \frac{\exp(g_y(x))}{\sum_k \exp(g_k(x))} \right) \end{flalign} The loss $\tilde{L}_{CE}$ is convex in the predictor, so it suffices to differentiate with respect to each $g_y$ for $y \in \mathcal{Y}^\bot$ and set to 0. \begin{flalign*} &\nonumber\frac{\partial L_{CE}}{\partial g_y^*} = 0 &\\ &\nonumber\iff \mathbb{E}[\max_j c(j) - c(y)|X=x] - \frac{\exp(g_y^*(x))}{\sum_k \exp(g_k(x))} \sum_{i \in [K+1]} \mathbb{E}[\max_j c(j) - c(i)|X=x] = 0 \\ & \iff \frac{\exp(g_y^*(x))}{\sum_k \exp(g_k(x))} = \frac{ \mathbb{E}[\max_j c(j) - c(y)|X=x]}{\sum_{i \in [K+1]} \mathbb{E}[\max_j c(j) - c(i)|X=x]} \end{flalign*} From this we can deduce: \begin{flalign*} h(x) &= \arg\max_{y \in [K+1]} g_y^*(x)= \arg \max_{y \in [K+1]} \frac{\exp(g_{y}^*(x))}{\sum_{y \in [K+1]}\exp(g_{y}^*(x))}&\\& = \arg \max_{y \in [K+1]} \frac{ \mathbb{E}[\max_j c(j) |X=x] -\mathbb{E}[ c(y)|X=x]}{\sum_{i \in [K+1]} \mathbb{E}[\max_j c(j) - c(i)|X=x]} \\ &= \arg \min_{y \in [K+1]} \mathbb{E}[ c(y)|X=x] =\tilde{h}^B(x) \end{flalign*} \end{proof} \textbf{Proposition 2.} \textit{ \noindent The minimizers of the loss $L_{0{-}1}$ \eqref{eq:01_reject_loss} are defined point-wise for all $x\in \mathcal{X}$ as: \begin{align} &h^B(x) = \arg \max_{y \in \mathcal{Y}}\eta_y(x) \nonumber \\ &r^B(x)= \mathbb{I}_ {\max_{y \in \mathcal{Y}}\eta_y(x) \leq \mathbb{P}(Y = M|X=x) } \end{align} }\begin{proof} When we don't defer, the loss incurred by the model is the misclassification loss in the standard multiclass setting and hence by standard arguments \cite{friedman2001elements} we can define $h^B$ point-wise regardless of $r$: \begin{flalign*} h^B(x) = \arg \inf_{h} \mathbb{E}_{y} [\mathbb{I}_{h \neq y}] = \arg \max_{y \in \mathcal{Y}}\eta_y(x) \end{flalign*} Now for the rejector, we should only defer if the expected loss of having the expert predict is less than the error of the classifier $h^B$ defined above, define $r^B: \mathcal{X} \to \{0,+1 \}$ as: \begin{flalign*} r^B(x) &= \mathbb{I}_{\mathbb{E}[\mathbb{I}_{M \neq Y}|X=x] \leq \mathbb{E}[\mathbb{I}_{h^B(x) \neq Y}|X=x] }\\ &=\mathbb{I}_{ \mathbb{P}(Y \neq M) \leq (1-\max_{y \in \mathcal{Y}}\eta_y(x)) } \\ &= \mathbb{I}_{\mathbb{P}(Y = M) \geq \max_{y \in \mathcal{Y}}\eta_y(x) } \end{flalign*} \end{proof} \textbf{Theorem 2.} \textit{ \noindent The loss $L_{CE}$ is a convex upper bound of $L_{0{-}1}$ and is consistent:\\ $\inf_{h,r}\mathbb{E}_{x,y,m}[L_{CE}(h,r,x,y,m)]$ is attained at $(h^*_{CE},r^*_{CE})$ such that $h^B(x)=h^*_{CE}(x)$ and $r^B(x)=r^*_{CE}(x)$ for all $x \in \mathcal{X}$. } \begin{proof} The fact that $L_{CE}$ is convex is immediate as $\mathbb{I}_{m = y}\geq 0$ and the cross entropy loss is convex. Now we show that $L_{CE}$ is an upper bound of $L_{0{-}1}$: \begin{align} \nonumber & L_{0{-}1}(h,r,x,y,m)= \mathbb{I}_{h(x)\neq y } \mathbb{I}_{r(x) = 0} + \mathbb{I}_{m \neq y} \mathbb{I}_{r(x) = 1}&\\ &\overset{(a)}{\leq}- \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) - \mathbb{I}_{m= y} \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \label{eq:multiclass_entropy_loss} \end{align} To justify inequality $(a)$, consider first if $r(x)=0$, then if $\mathbb{I}_{h(x)\neq y }=1$ we know that $\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \leq \frac{1}{2}$ giving $-\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \geq 1$, moreover all the terms in the RHS of $(a)$ are always positive. On the other hand if $r(x)=1$, then again $\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \leq \frac{1}{2}$ as we decided to reject and since also giving $-\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \geq 1$. Finally note that $ L_{0{-}1}(h,r,x,y,m)\leq 1$. We will now show that the optimal rejector minimizing the upper bound \eqref{eq:multiclass_entropy_loss} is in fact consistent. Denote $q_m(x,y)=\mathbb{P}(M=m|X=x,Y=y)$ and $\eta_y(x) = \mathbb{P}(Y=y|X=x)$, we have: \begin{flalign*} \inf_{h,r}\mathbb{E}_{x,y,m}[L_{CE}(h,r,x,y,m)] &= \inf_{h,r} \mathbb{E}_{x}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{CE}(h,r,x,y,m)] \\&= \mathbb{E}_{x}\inf_{h(x),r(x)}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{CE}(h(x),r(x),x,y,m)] \end{flalign*} Let us expand the inner expectation: \begin{flalign} \nonumber&\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{CE}(h(x),r(x),x,y,m)]&\\\nonumber &= \mathbb{E}_{y|x} \left[ -\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right)- \sum_{m \in \mathcal{Y}} \mathbb{I}_{m = y} \nonumber \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \right] \\\nonumber &=- \sum_{y \in \mathcal{Y}} \eta_y(x) \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \\&- \sum_{y \in \mathcal{Y}} \eta_y(x) \sum_{m \in \mathcal{Y}} q_m(x,y)\mathbb{I}_{m = y} \nonumber \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \\\nonumber &\overset{(a)}{=}- \sum_{y \in \mathcal{Y}} \eta_y(x) \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) - \sum_{y \in \mathcal{Y}} \eta_y(x) q_y(m,y) \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \\ &\overset{(b)}{=}- \sum_{y \in \mathcal{Y}} \eta_y(x) \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \nonumber\\&- \mathbb{P}(Y=M |X=x) \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \label{prop:y|x_cross_loss_withq} \end{flalign} In step $(a)$ all terms that differed on $y$ and $m$ disappear, in step $(b)$ we have: \begin{flalign*} &\sum_{y \in \mathcal{Y}} \eta_y(x) q_y(m,y) = \sum_{y \in \mathcal{Y}} \mathbb{P}(M=y,Y=y|X=x) = \mathbb{P}(Y=M|X=x) \end{flalign*} For ease of notation denote the RHS of equation \eqref{prop:y|x_cross_loss_withq} as $L_{CE}(g_1,\cdots,g_{|\mathcal{Y}|},g_\bot)$, note that it is a a convex function, hence we will take the partial derivatives with respect to each argument and set them to $0$. For any $g_\bot$, and for $i \in \mathcal{Y}$ we have : \begin{flalign} &\nonumber\frac{\partial L_{CE}(g_1^*,\cdots,g_{|\mathcal{Y}|^*},g_\bot)}{\partial g_i^*} = 0 &\\ &\label{prop:optimal_g}\iff \frac{\exp(g_{i}^*(x))}{\sum_{y' \in \tilde{\mathcal{Y}}}\exp(g_{y'}^*(x))} = \frac{\eta_i(x)}{1 + \mathbb{P}(Y=M|X=x)} \end{flalign} The optimal $h^*$ for any $g_\bot$ should satisfy equation \eqref{prop:optimal_g} for every $i \in \mathcal{Y}$, however since exponential is an increasing function we get that the optimal $h^*$ in fact agrees with the Bayes solution as: \begin{flalign*} \arg\max_{y \in \mathcal{Y}} g_y^*(X)&= \arg \max_{y \in \mathcal{Y}} \frac{\exp(g_{y}^*(x))}{\sum_{y \in \mathcal{Y}}\exp(g_{y}^*(x)) + \exp(g_\bot(x))}&\\& = \arg \max_{y \in \mathcal{Y}} \frac{\eta_y(x)}{1 + \mathbb{P}(Y=M|X=x)} = h^B(x) \end{flalign*} Plugging $h^*$ and taking the derivative with respect to the optimal $g_\bot^*$: \begin{flalign} &\nonumber\frac{\partial L_{CE}(g_1^*,\cdots,g_{|\mathcal{Y}|}^*,g_\bot^*)}{\partial g_\bot^*} = 0 &\\ & \nonumber \iff \frac{\exp(g_{\bot}^*(x))}{\sum_{y' \in \mathcal{Y}}\exp(g_{y'}^*(x))} = \frac{ \mathbb{P}(Y=M|X=x)}{1+ \mathbb{P}(Y=M|X=x)} \end{flalign} Note note that $r^*(x)=1$ only if $\mathbb{P}(Y=M|X=x) \geq \max_{y \in \mathcal{Y}} \eta_y(x) $ which agrees with $r^B(x)$ \end{proof} \subsection{Section \ref{sec:theory}} \textbf{Proposition 3.}\textit{ \noindent $L_{mix}$ is realizable $(\mathcal{H},\mathcal{R})$-consistent for classes closed under scaling but is not classification consistent. } \begin{proof} We first prove that $L_{mix}$ is realizable $(\mathcal{H},\mathcal{R})$-consistent. Let $\mathbf{P}$ and $M$ be such that there exists $h^*,r^* \in \mathcal{H} \times \mathcal{R}$ that have zero error $L(h^*,r^*)=0$. Assume that $(\hat{h},\hat{r})$ satisfy \[\left| \mathbb{E}[L_{mix}(\hat{h},\hat{r},x,y,m)] - \inf_{h \in \mathcal{H}, r \in \mathcal{R}} \mathbb{E}[L_{mix}(h,r,x,y,m)] \right| \leq \delta\] Let $u>0$, we have: \begin{align} \nonumber &\mathbb{E}[L(\hat{h},\hat{r},x,y,m)] \\&\leq \nonumber 2\mathbb{E}[L_{mix}(\hat{h},\hat{r},x,y,m)] \quad \textrm{ (factor of 2 is upper bound)}\\ \nonumber &\leq 2\mathbb{E}[L_{mix}(uh^*,ur^*,x,y,m)]+ 2\delta \quad \textrm{(by assumption and closed under scaling) }\\ \nonumber &= 2\mathbb{E}[L_{mix}(uh^*,ur^*,x,y,m)|r^*=1]\mathbb{P}(r^*=1) + 2\mathbb{E}[L_{mix}(uh^*,ur^*,x,y,m)|r^*=0]\mathbb{P}(r^*=0) + 2\delta \quad \textrm{ }\\ \nonumber &= 2\mathbb{E}[-\log\left(\frac{\exp(ug_{y}(x))}{\sum_{y' \in \mathcal{Y}}\exp(ug_{y'}(x))} \right) \frac{\exp(ur_{0}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))} + \mathbb{I}_{m \neq y} \frac{\exp(ur_{1}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))}|r^*=0]\mathbb{P}(r^*=0) \\&+ 2\mathbb{E}[-\log\left(\frac{\exp(ug_{y}(x))}{\sum_{y' \in \mathcal{Y}}\exp(ug_{y'}(x))} \right) \frac{\exp(ur_{0}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))} \nonumber \\ &+ \mathbb{I}_{m \neq y} \frac{\exp(ur_{1}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))}|r^*=1]\mathbb{P}(r^*=1) + 2\delta \quad \textrm{ } \label{eq:proof_mixexp_realizable} \end{align} Let us examine each term in the RHS of \eqref{eq:proof_mixexp_realizable}, when $r^*=1$ we have $r_1(x)>r_0(x)$ hence: \[\lim_{u \to \infty}\frac{\exp(ur_{0}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))} = 0 \] Furthermore it most be that $\mathbb{I}_{m \neq y} = 0$ as we decided to defer. When $r^*=0$, we have $r_0(x) \geq r_1(x)$ hence: \[\lim_{u \to \infty}\frac{\exp(ur_{1}(x))}{\sum_{i \in \{0,1\}}\exp(ur_{i}(x))} = 0 \] moreover we have $h^*(x)=y$ by optimality of $(h^*,r^*)$ (as we did not defer) and realizability thus: \[ \lim_{u \to \infty} \log\left(\frac{\exp(ug_{y}(x))}{\sum_{y' \in \mathcal{Y}}\exp(ug_{y'}(x))} \right) = 0 \] We can conclude that taking the limit as $u \to \infty$ on the RHS of \eqref{eq:proof_mixexp_realizable} and applying the monotone convergence theorem (swap of expectation and limit) we get: \begin{align*} &\mathbb{E}[L(\hat{h},\hat{r},x,y,m)] \leq 2 \delta \end{align*} taking $\delta = \epsilon/2$ completes the proof. We now move to looking at the Bayes solution of $L_{mix}$, denote $q_m(x,y)=\mathbb{P}(M=m|X=x,Y=y)$, we have: \begin{flalign*} \inf_{h,r}\mathbb{E}_{x,y,m}[L_{mix}(h,r,x,y,m)] = \mathbb{E}_{x}\inf_{h(x),r(x)}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{mix}(h(x),r(x),x,y,m)] \end{flalign*} Let us expand the inner expectation: \begin{flalign} \label{eq:proof_mixofexp_notconsistent}&\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{mix}(h(x),r(x),x,y,m)]=& \\ & - \sum_{y \in \mathcal{Y}} \eta_y(x) \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} }\exp(g_{y'}(x))} \right) \frac{\exp(r_{0}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} + \mathbb{P}(Y \neq M |X=x) \frac{\exp(r_{1}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} \nonumber \end{flalign} Denote the RHS of \eqref{eq:proof_mixofexp_notconsistent} by $L_{mix}(g_1,\cdots, g_{|\mathcal{Y}|},r_0,r_1)$, it is a convex function in $g_i$ for all $i \in \mathcal{Y}$, consider any $r_0,r_1$, we have : \begin{flalign} &\frac{\partial L_{mix}(g_1^*,\cdots,g_{|\mathcal{Y}|^*},r_0,r_1)}{\partial g_i^*} = 0 \label{prop:optimal_g_mix_of_exp}\iff \frac{\exp(g_{i}^*(x))}{\sum_{y' \in \mathcal{Y}}\exp(g_{y'}^*(x))} = \eta_i(x) \end{flalign} Since the optimal $h^*$ for any $r_0,r_1$ \emph{does not depend} on the form of $r_0$ and $r_1$ we conclude that \eqref{prop:optimal_g_mix_of_exp} gives the optimal choice of $h$. We now need to find the optimal choice of $r_0(x)$ and $r_1(x)$ to minimize $L_{mix}(g_1^*,\cdots,g_{|\mathcal{Y}|^*},r_0,r_1)$ which takes the following form: \begin{equation*} L_{mix}(g_1^*,\cdots,g_{|\mathcal{Y}|^*},r_0,r_1) = \textrm{H}(h^B(x)) \frac{\exp(r_{0}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} + \mathbb{P}(Y \neq M |X=x) \frac{\exp(r_{1}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} \end{equation*} where $\textrm{H}(X)$ is the Shannon entropy of the random variable $X$, here by $\textrm{H}(h^B(x))$ we refer to the entropy of the probabilistic form of $h^B(x)$ according to \eqref{prop:optimal_g_mix_of_exp} . Clearly the optimal $r_0^*$ and $r_1^*$ have the following behavior for a given $x \in \mathcal{X}$: \[ \begin{cases} r_0(x)= \infty, r_1(x) = - \infty \quad if \ \textrm{H}(h^B(x)) < \mathbb{P}(Y \neq M |X=x) \\ r_0(x)= - \infty, r_1(x) = \infty \quad if \ \textrm{H}(h^B(x)) \geq \mathbb{P}(Y \neq M |X=x) \end{cases} \] This does not have the form of $r^B(x)$, as this rejector compares the entropy of $h^B(x)$ instead of it's confidence to the probability of error of the expert which will not always be in accordance. \end{proof} \textbf{Theorem 2.}\textit{ \noindent For any expert $M$ and data distribution $\mathbf{P}$ over $\mathcal{X} \times \mathcal{Y}$, let $0<\delta<\frac{1}{2}$, then with probability at least $1-\delta$, the following holds for the empirical minimizers $(\hat{h}^*,\hat{r}^*)$: \begin{align} L_{0{-}1}(\hat{h}^*,\hat{r}^*) &\leq L_{0{-}1}(h^*,r^*) + \mathfrak{R}_n(\mathcal{H}) + \mathfrak{R}_{n}(\mathcal{R}) + \mathfrak{R}_{n \mathbb{P}(M \neq Y)/2}(\mathcal{R}) \nonumber \\ & + 2\sqrt{\frac{\log{(\frac{2}{\delta})}}{2n}} +\frac{\mathbb{P}(M\neq Y)}{2} \exp\left(- \frac{n \mathbb{P}(M \neq Y)}{8} \right) \nonumber \end{align} } \begin{proof} Let $\mathcal{L}_{\mathcal{H},\mathcal{R}}$ be the family of functions defined as $\mathcal{L}_{\mathcal{H},\mathcal{R}}=\{(x,y,m) \to L(h,r,x,y,m); h \in \mathcal{H}, r \in \mathcal{R} \}$ with $ L(h,r,x,y,m):= \mathbb{I}_{h(x)\neq y} \mathbb{I}_{r(x) = -1} + \mathbb{I}_{m \neq y} \mathbb{I}_{r(x) = 1}$. Let $\mathfrak{R}_n(\mathcal{L}_{\mathcal{H},\mathcal{R}})$ be the Rademacher complexity of $\mathcal{L}_{\mathcal{H},\mathcal{R}}$, then since $L(h,r,x,y,m) \in [0,1]$, by the standard Rademacher complexity bound (Theorem 3.3 in \cite{mohri2018foundations}), with probability at least $1-\delta/2$ we have: \begin{equation*} L_{0{-}1}(\hat{h}^*,\hat{r}^*) \leq L^S_{0{-}1}(\hat{h}^*,\hat{r}^*) + 2 \mathfrak{R}_n(\mathcal{L}_{\mathcal{H},\mathcal{R}}) + \sqrt{\frac{\log{(\frac{2}{\delta})}}{2n}} \end{equation*} We will now relate the complexity of $\mathcal{L}_{\mathcal{H},\mathcal{R}}$ to the individual classes: \begin{flalign} \nonumber&\mathfrak{R}_n(\mathcal{L}_{\mathcal{H},\mathcal{R}})=\nonumber \mathbb{E}_{\boldsymbol{\epsilon}}[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i \mathbb{I}_{h(x_i)\neq y_i} \mathbb{I}_{r(x_i) = -1} + \epsilon_i\mathbb{I}_{m_i \neq y_i} \mathbb{I}_{r(x_i) = 1} ]& \nonumber\\ \nonumber&\overset{(a)}{\leq} \mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i \mathbb{I}_{h(x_i)\neq y_i} \mathbb{I}_{r(x_i) = -1}\right] \\\nonumber&+ \mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i\mathbb{I}_{m_i \neq y_i} \mathbb{I}_{r(x_i) = 1} \right] \\ \nonumber&\overset{(b)}{\leq} \mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i \mathbb{I}_{h(x_i)\neq y_i} \right] +\nonumber \mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i \mathbb{I}_{r(x_i) = -1} \right] \\& + \mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i\mathbb{I}_{m_i \neq y_i} \mathbb{I}_{r(x_i) = 1} \right] \nonumber \\ &\leq \frac{1}{2} \mathfrak{R}_n(\mathcal{H}) + \frac{1}{2} \mathfrak{R}_n(\mathcal{R}) +\mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i\mathbb{I}_{m_i \neq y_i} \mathbb{I}_{r(x_i) = 1} \right] \label{th1:exp_term} \end{flalign} step $(a)$ follows as the supremum is a subadditive function , step $(b)$ is the application of Lemma 2 in \cite{desalvo2015learning} to $\mathbb{E}_{\boldsymbol{\epsilon}}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i \mathbb{I}_{h(x_i)\neq y_i} \mathbb{I}_{r(x_i) = -1}\right]$ which says that the Rademacher complexity of a product of two indicators functions is upper bounded by the sum of the complexities of each class, now we will take a closer look at the last term in the RHS of inequality \eqref{th1:exp_term}. Denote $n_m^S = \sum_{j \in S} \mathbb{I}_{y_j\neq m_j} $ and define the random variable $S_m = \{i: y_i \neq m_i \}$, we have that $n_m^S \sim \textrm{Binomial}(n,\mathbb{P}(M \neq Y))$ and $\mathbb{E}[n_m^S|S_m]=n \mathbb{P}(M\neq Y)$, hence: \begin{flalign*} &\mathbb{E}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1}^m \epsilon_i\mathbb{I}_{m_i \neq y_i} \mathbb{I}_{r(x_i) = 1} \right] \\&=\mathbb{E}\left[ \sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{m} \sum_{i=1 \ s.t. \ y_i \neq m_i}^m \epsilon_i \mathbb{I}_{r(x_i) = 1} \right] &\\ &= \mathbb{E}\left[ \frac{n_m^S}{m}\sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{n_m^S} \sum_{i=1 }^{n_m^S} \epsilon_i \mathbb{I}_{r(x_i) = 1} \right] \textrm{(by relabeling)} \\ &\overset{(a)}{=} \mathbb{E}\left[ \mathbb{E}_{\boldsymbol{\epsilon}} \left[\frac{n_m^S}{m}\sup_{(h,r)\in \mathcal{H} \times \mathcal{R}} \frac{1}{n_m^S} \sum_{i=1 }^{n_m^S} \epsilon_i \mathbb{I}_{r(x_i) = 1}|S_m \right] \right] \\ &\overset{(b)}{=} \mathbb{E}\left[ \frac{n_m^S}{m} \hat{\mathfrak{R}}_{S_m}(\mathcal{R}) \right] \\ & \overset{(c)}{=} \mathbb{P}(n_m^S < \frac{n \mathbb{P}(A)}{2}) \mathbb{E}\left[ \frac{n_m^S}{m} \hat{\mathfrak{R}}_{S_m}(\mathcal{R})|n_m^S < \frac{n \mathbb{P}(A)}{2} \right] + \mathbb{P}(n_m^S \geq \frac{n \mathbb{P}(A)}{2}) \mathbb{E}\left[ \frac{n_m^S}{m} \hat{\mathfrak{R}}_{S_m}(\mathcal{R})|n_m^S \geq \frac{n \mathbb{P}(A)}{2} \right] \\ & \overset{(d)}{\leq} \frac{\mathbb{P}(M\neq Y)}{2} \exp\left(- \frac{n \mathbb{P}(M \neq Y)}{8} \right)+ \mathfrak{R}_{n \mathbb{P}(M \neq Y)/2}(\mathcal{R}) \end{flalign*} In step $(a)$ we conditioned on the dataset $S_m$, in step $(b)$ we used the definition of the empirical Rademacher complexity $ \hat{\mathfrak{R}}_{S_m}(\mathcal{R})$ on $S_m$, step $(c)$ we introduce the event $A= \{ M \neq Y\}$, step $(d)$ follows from a Chernoff bound on $n_m^S$ and since the Rademacher complexity is bounded by $1$ and is non-increasing with respect to sample size. We can now proceed with inequality \eqref{th1:exp_term}: \begin{flalign*} &\mathfrak{R}_n(\mathcal{L}_{\mathcal{H},\mathcal{R}}) \overset{(a)}{\leq} \frac{1}{2} \mathfrak{R}_n(\mathcal{H}) + \frac{1}{2} \mathfrak{R}_n(\mathcal{R}) + \frac{\mathbb{P}(M\neq Y)}{2} \exp\left(- \frac{n \mathbb{P}(M \neq Y)}{8} \right) + \mathfrak{R}_{n \mathbb{P}(M \neq Y)/2}(\mathcal{R}) & \nonumber \\ \end{flalign*} step $(a)$ follows as the Rademacher complexity of indicator functions based on a certain class is equal to half the Rademacher complexity of the class \cite{mohri2018foundations}. The final step is to note by Hoeffding's inequality we have with probability at least $1- \delta/2$: \begin{flalign*} & L^S(h^*,r^*) \leq L(h^*,r^*) + \sqrt{\frac{\log{(\frac{2}{\delta})}}{2n}}& \end{flalign*} Now since $(\hat{h}^*,\hat{h}^*)$ are the empirical minimizers we have that $ L^S(\hat{h}^*,\hat{r}^*) \leq L^S(h^*,r^*)$, collecting all the inequalities we obtain the following generalization bound with probability at least $1- \delta$: \begin{flalign} L(\hat{h}^*,\hat{r}^*) &\leq L(h^*,r^*) + \mathfrak{R}_n(\mathcal{H}) \nonumber + \mathfrak{R}_{n}(\mathcal{R}) + 2\sqrt{\frac{\log{(\frac{2}{\delta})}}{2n}} \nonumber &\\& + \frac{\mathbb{P}(M\neq Y)}{2} \exp\left(- \frac{n \mathbb{P}(M \neq Y)}{8} \right) + \mathfrak{R}_{n \mathbb{P}(M \neq Y)/2}(\mathcal{R}) \nonumber \end{flalign} \end{proof} \section{Introduction} Machine learning systems are now being deployed in settings to complement human decision makers such as in healthcare \cite{hamid2017machine,raghu2019algorithmic}, risk assessment \cite{green2019disparate} and content moderation \cite{link2016human}. These models are either used as a tool to help the downstream human decision maker: judges relying on algorithmic risk assessment tools \cite{green2019principles} and risk scores being used in the ICU \cite{sepsistrial}, or instead these learning models are solely used to make the final prediction on a selected subset of examples \cite{madras2018predict,raghu2019algorithmic}. A current application of the latter setting is Facebook's and other online platforms content moderation approach \cite{fbmoderate,jhaver2019human}: an algorithm is used to filter easily detectible inappropriate content and the rest of the examples are screened by a team of human moderators. Another motivating application arises in health care settings, for example deep neural networks can outperform radiologists in detecting pneumonia from chest X-rays \cite{irvin2019chexpert}, however, many obstacles are limiting complete automation, an intermediate step to automating this task will be the use of models as triage tools to complement radiologist expertise. Our focus in this work is to give theoretically sound approaches for machine learning models that can either predict or defer the decision to a downstream expert to complement and augment their capabilities. The learned model should adapt to the underlying human expert in order to achieve better performance than deploying the model or expert individually. In situations where we have limited data or model capacity, the gains from allowing the model to focus on regions where the expert is less accurate are expected to be more significant. However, even when data or model capacity are not concerns, the expert may have access to side-information unavailable to the learner due to privacy concerns for example, the hard task is then to identify when we should defer without having access to this side-information. We will only assume in this work that we are allowed access to samples of the experts decisions or to costs of deferring, we believe that this is a reasonable assumption that can be achieved in practical settings. Inspired by the literature on rejection learning \cite{cortes2016learning}, our approach will be to learn two functions: a classifier that can predict the target and a rejector which decides whether the classifier or the expert should predict. We start by formulating a natural loss function for the combined machine-expert system in section \ref{sec:problem} and show a reduction from the expert deferral setting to cost sensitive learning. With this reduction in hand, we are able to give a novel convex surrogate loss that upper bounds our system loss and that is furthermore consistent in section \ref{sec:surrog}. This surrogate loss settles the open problem posed by \cite{ni2019possibility} for finding a consistent loss for multiclass rejection learning. Our proposed surrogate loss and approach requires only adding an additional output layer to existing model architectures and changing the loss function, hence it necessitates minimal to no added computational costs. In section \ref{sec:theory}, we show the limitations of approaches in the literature from a consistency point-of-view and then provide generalization bounds for minimizing the empirical loss. To show the efficacy of our approach, we give experimental evidence on image classification datasets CIFAR-10 and CIFAR-100 using synthetic and human experts based on \texttt{CIFAR10H} \cite{peterson2019human}, on a hate speech and offensive language detection task \cite{davidson2017automated}, and on classification of chest X-rays with synthetic experts in section \ref{sec:experiments}. To summarize, the contributions of this paper are the following: \begin{itemize} \item We formalize the expert deferral setup and analyze it theoretically giving a generalization bound for solving the empirical problem. \item We propose a novel convex consistent surrogate loss $L_{CE}$ \eqref{eq:proposed_CE_loss} for expert deferral easily integrated into current learning pipelines. \item We provide a detailed experimental evaluation of our method and baselines from the literature on image and text classification tasks. \end{itemize} \section{Related Work} Learning with a reject option, \emph{rejection learning}, has long been studied starting with \cite{chow1970optimum} who investigated the trade-off between accuracy and the rejection rate. The framework of rejection learning assumes a constant cost $c$ of deferring and hence the problem becomes to predict only if one is $1-c$ confident. Numerous works have proposed surrogate losses and uncertainty estimation methods to solve the problem \cite{bartlett2008classification, ramaswamy2018consistent,ni2019possibility, jiang2018trust}. \cite{cortes2016learning,cortes2016boosting} proposed a different approach by learning two functions: a classifier and a rejection function and analyzed the approach giving a kernel based algorithm in the binary setting. \cite{ni2019possibility} tried to extend their approach to the multiclass setting but failed to give a consistent surrogate loss and hence resorted to confidence based methods. Recent work has started to explore models that defer to downstream experts, \cite{madras2018predict} considers an identical framework to the one considered here however their approach does not allow the model to adapt to the underlying expert and the loss used is not consistent and requires an uncertainty estimate of the expert decisions. On the other hand, \cite{de2019regression} gives an approximate procedure to learn a linear model that picks a subset of the training data on which to defer and uses a nearest neighbor algorithm to defer on new examples, the approach used is only feasible for small dataset sizes and does not generalize beyond ridge regression. \cite{raghu2019algorithmic} considers binary classification with expert deferral, their approach is to learn a classifier ignoring the expert and obtain uncertainty estimates for both the expert and classifier and then defer based on which is higher, we detail the limitations of this approach in section \ref{sec:theory}. Concurrent work \cite{wilder2020learning} learns a model with the mixtures of expert loss first introduced in \cite{madras2018predict} and defers based on estimated model and expert confidence as in \cite{raghu2019algorithmic}. Work on AI-assisted decision making has focused on the reverse setting considered here: the expert chooses to accept or reject the decision of the classifier instead of a learned rejector \cite{bansal2019updates,bansal2020optimizing}. Additionally, the fairness in machine learning community has started to consider the fairness impact of having downstream decision makers \cite{madras2018predict,canetti2019soft,green2019disparate,dwork2018fairness} but in slightly different frameworks than the ones considered here and work has started to consider deferring in reinforcement learning \cite{meresht2020learning}. A related framework to our setting is selective classification \cite{el2010foundations} where instead of setting a cost for rejecting to predict one sets a constraint on the probability of rejection; here is no assumed downstream expert. Approaches range from deferring based on confidence scores \cite{geifman2017selective}, learning a deep network with two heads, one for predicting and the other for deferring \cite{geifman2019selectivenet} and learning with portfolio theory inspired loss functions \cite{ziyin2019deep}. Finally, our work bears resemblance to active learning with weak (the expert) and strong labelers (the ground truth) \cite{zhang2015active}. \section{Problem Formulation}\label{sec:problem} We are interested in predicting a target $Y \in \mathcal{Y}=\{1,\cdots,K\}$ based on covariates $X \in \mathcal{X}$ where $X,Y \sim \mathbf{P}$. We assume that we have query access to an expert $M$ that has access to a domain $\mathcal{Z}$ that may contain additional information than $\mathcal{X}$ to classify instances according to the target $\mathcal{Y}$. Querying the expert implies deferring the decision which incurs a cost $l_{exp}(x,y,m)$ that depends on the target $y$, covariate $x$ and the expert's prediction $m$. On the other hand, predicting without querying the expert implies that a classifier makes the final decision and incurs a cost $l(x,y,\hat{y})$ where $\hat{y}$ is the prediction of the classifier. Our goal is to build a predictor $\hat{Y}: \mathcal{X} \to \mathcal{Y} \cup \{ \bot\}$ that can either predict or defer the decision to the expert denoted by $\bot$. We can now formulate a natural system loss function $L$ for the system consisting of the classifier in conjunction with the expert: \begin{align}\label{eq:system_loss_general} & L(\hat{Y}) = \mathbb{E}_{(x,y)\sim \mathbf{P},m \sim M|(x,y)} \ [ \ \underbrace{l(x,y,\hat{Y}(x))}_{\text{classifier cost}} \overbrace{\mathbb{I}_{\hat{Y}(x)\neq \bot}}^{\text{predict}} + \underbrace{l_{\textrm{exp}}(x,y,m)}_{\text{expert cost}} \overbrace{\mathbb{I}_{\hat{Y}(x)= \bot}}^{\text{defer}} \ ] \end{align} Our strategy for learning the predictor $\hat{Y}$ will be to learn two separate functions $h: \mathcal{X} \to \mathcal{Y}$ (classifier) and $r: \mathcal{X} \to \{0, 1 \}$ (rejector) and hence we write our loss as: \begin{align} &L(h,r)= \label{eq:original_reject_loss} \mathbb{E}_{(x,y)\sim \mathbf{P},m \sim M|(x,y)} \ [ \ l(x,y,h(x)) \mathbb{I}_{r(X) = 0} + l_{\textrm{exp}}(x,y,m) \mathbb{I}_{r(x)=1} \ ] \end{align} \begin{figure}[h] \centering \includegraphics[clip,scale=0.6,trim={2cm 6.5cm 2cm 6cm}]{figures/setup_diagram_1.pdf} \caption{The expert deferral pipeline, the rejector first $r(x)$ decides who between the classifier $h(x)$ and expert $M(z)$ should predict and then whoever makes the final prediction incurs a specific cost.} \label{fig:setup} \end{figure} Figure \ref{fig:setup} illustrates our expert deferral setting with it's different components. The above formulation is a generalization of the learning with rejection framework studied by \cite{cortes2016learning} as by setting $l_{\textrm{exp}}(x,y,m)=c$ for a constant $c>0$ the two objectives coincide. In \cite{madras2018predict}, the loss proposed assumes that the classifier and expert costs are the logistic loss between the target and their predictions in the binary target setting. While our treatment extends to general forms of expert and classifier costs, we will pay particular attention in our theoretical analysis when the costs are the misclassification error with the target. Formally, we define a $0{-}1$ loss version of our system loss: \begin{align} &L_{0{-}1}(h,r)= \label{eq:01_reject_loss} \mathbb{E}_{(x,y)\sim \mathbf{P},m \sim M|(x,y)} \ [ \ \mathbb{I}_{h(x) \neq y} \mathbb{I}_{r(x) = 0} + \mathbb{I}_{m \neq y}\mathbb{I}_{r(x)=1} \ ] \end{align} One may also assume a constant additive cost function $c(x)$ for querying the expert depending on the instance $x$ making $l_{\textrm{exp}}(x,y,m)= \mathbb{I}_{m \neq y} + c(x)$; such additive costs can be easily integrated into our analysis. Our approach will be to cast this problem as a \emph{cost sensitive learning} problem over an augmented label space that includes the action of deferral. Let the random costs $\mathbf{c} \in \mathbb{R}_+^{K+1}$ where for $i \in [K]$, $c(i)$ is the $i'$th component of $\mathbf{c}$ represents the cost of predicting $i \in \mathcal{Y}$ while $c[K+1]$ represents the cost of deferring to the expert. The goal of this setup is to learn a predictor $h: \mathcal{X} \to [K+1]$ minimizing the cost sensitive loss $\tilde{L}(h) :=\mathbb{E}[c(h(x))]$. For example, giving an instance $(x,y)$, our loss \eqref{eq:original_reject_loss} is obtained by setting $c(i)=l(x,y,i)$ for $i \in [K]$ and $c(K+1)=l_{\textrm{exp}}(x,y,m)$. For the majority of this paper we assume access to samples $S=\{(x_i,y_i,m_i)\}_{i=1}^n$ where $\{(x_i,y_i)\}_{i=1}^n$ are drawn i.i.d. from the unknown distribution $\mathbf{P}$ and $m_i$ is drawn from the distribution of the random variable $M|(X=x_i,Y=y_i)$ and access to the realizations of $l_{exp}$ and $l$ when required . \section{Proposed Surrogate Loss}\label{sec:surrog} It is clear that the system loss function \eqref{eq:original_reject_loss} is not only non-convex but also computationally hard to optimize. The usual approach in machine learning is to formulate upper bounding convex surrogate loss functions and optimize them in hopes of approximating the minimizers of the original loss \cite{bartlett2006convexity}. Work from rejection learning \cite{cortes2016learning,ni2019possibility} suggested learning two separate functions $h$ and $r$ and provided consistent convex surrogate loss functions only for the binary setting. We extend their proposed surrogates for our expert deferral setting for binary labels with slight modifications in appendix \ref{apx:proofs}. Consistency is used to prove that a proposed surrogate loss is a good candidate and is often treated as a necessary condition. The issue with the proposed surrogates in \cite{cortes2016learning} for rejection learning is that when extended to the multiclass setting, it is impossible for them to be consistent as was shown by \cite{ni2019possibility}. Aside the consistency issue, \cite{ni2019possibility} found that simple baselines can outperform the proposed losses in practice. The construction of our proposed surrogate loss for the multiclass expert deferral setting will be motivated via two ways, the first is through a novel reduction to cost sensitive learning and the second is inspired by the Bayes minimizer for the $0{-}1$ system loss \eqref{eq:01_reject_loss}. Let $g_i: \mathcal{X} \to \mathbb{R}$ for $i \in [K+1]$ and define $h(x) = \arg \max_{i \in [K+1]}g_i$, motivated by the success of the cross entropy loss, our proposed surrogate for cost-sensitive learning $\tilde{L}_{CE}$ takes the following form: \begin{align} &\tilde{L}_{CE}(g_1,\cdots,g_{K+1},x,c(1),\cdots,c(K+1)) = - \sum_{i=1}^{K+1} (\max_{j \in [K+1]} c(j) - c(i)) \log\left( \frac{\exp(g_i(x))}{\sum_k \exp(g_k(x))} \right) \end{align} The loss $\tilde{L}_{CE}$ is a novel surrogate loss for cost sensitive learning that generalizes the cross entropy loss when the costs correspond to multiclass misclassification. The following proposition shows that the loss is consistent, meaning it's minimizer over all measurable functions agrees with the Bayes solution. \begin{proposition} $\tilde{L}_{CE}$ is convex in $\mathbf{g}$ and is a consistent loss function for $\tilde{L}$: \begin{center} let $\bm{\tilde{g}}=\arg \inf_{\mathbf{g}} \mathbb{E}\left[ \tilde{L}_{CE}(\mathbf{g},\mathbf{c}) |X=x\right]$, then: $\arg \max_{i \in [K+1]}\bm{\tilde{g}}_i = \arg \min_{i \in [K+1]} \mathbb{E}[c(i)|X=x]$ \end{center} \label{prop:lce_cost_sensitive} \end{proposition} Proof of Proposition \ref{prop:lce_cost_sensitive} can be found in Appendix \ref{apx:proofs}; $\tilde{L}_{CE}$ is a simpler consistent alternative to the surrogates derived in \cite{chen2019surrogate} for cost sensitive learning. Now we consider when the system loss function is $L_{0{-}1}$ \eqref{eq:01_reject_loss}, our approach is to treat deferral as a new class and construct a new label space $\mathcal{Y}^\bot=\mathcal{Y} \cup \bot$ and a corresponding distribution $\mathbb{P}(Y^\bot|X=x)$ such that minimizing the misclassification loss on this new space will be equivalent to minimizing our system loss $L_{0{-}1}$. The Bayes optimal classifier on $\mathcal{Y}^\bot$ is clearly $h^\bot= \arg \max_{y^\bot\in\mathcal{Y}^\bot} \mathbb{P}(\mathcal{Y}^\bot=y^\bot|X=x)$, and we need it to match the decision of the Bayes solution $h^B,r^B$ of $L_{0{-}1}$ \eqref{eq:01_reject_loss}: \begin{equation} h^B, r^B = \arg\inf_{h,r} L_{0{-}1}(h,r) \end{equation} where the infimum is over all measurable functions. Denote by $\eta_y(x)=\mathbb{P}(Y=y|X=x)$, it is clear that for $x\in \mathcal{X}$ the best classifier is the same as the Bayes solution for standard classification since if we don't defer we have to do our best. Now we only reject the classifier if it's expected error is higher than the expected error of the expert which we formalize in the below proposition: \begin{proposition} The minimizers of the loss $L_{0{-}1}$ \eqref{eq:01_reject_loss} are defined point-wise for all $x\in \mathcal{X}$ as: \begin{align} &h^B(x) = \arg \max_{y \in \mathcal{Y}}\eta_y(x) \nonumber \\ &r^B(x)= \mathbb{I}_ {\max_{y \in \mathcal{Y}}\eta_y(x) \leq \mathbb{P}(Y = M|X=x) } \label{propeq:bayes_01} \end{align} \end{proposition} Proof of the above proposition can be found in Appendix \ref{apx:proofs} and equation \eqref{propeq:bayes_01} give us sufficient conditions for consistency to check our proposed loss. Let $g_y: \mathcal{X} \to \mathbb{R}$ for $y \in \mathcal{Y}$ and define $h(x) = \arg \max_{y \in \mathcal{Y}}g_y$, similarly let $g_\bot: \mathcal{X} \to \mathbb{R}$ and define $r(x)= \mathbb{I}_{\max_{y \in \mathcal{Y}}g_y(x) \leq g_\bot }$ the proposed surrogate loss for $L_{0{-}1}$ \eqref{eq:original_reject_loss} in the multiclass setting is then: \begin{align} \label{eq:proposed_CE_loss} & L_{CE}(h,r,x,y,m) = - \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) - \mathbb{I}_{m = y} \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \end{align} The proposed surrogate $L_{CE}$ is in fact consistent and upper bounds $L_{0{-}1}$ as the following theorem demonstrates. \begin{theorem} The loss $L_{CE}$ is convex in $\mathbf{g}$, upper bounds $L_{0{-}1}$ and is consistent: $\inf_{h,r}\mathbb{E}_{x,y,m}[L_{CE}(h,r,x,y,m)]$ is attained at $(h^*_{CE},r^*_{CE})$ such that $h^B(x)=h^*_{CE}(x)$ and $r^B(x)=r^*_{CE}(x)$ for all $x \in \mathcal{X}$. \end{theorem} \begin{sproof} Please refer to appendix \ref{apx:proofs} for the detailed proof. First the infimum over functions $h,r$ can be replaced by a point-wise infimum as: \begin{flalign*} &\inf_{h,r}\mathbb{E}_{x,y,m}[L_{CE}(h,r,x,y,m)] = \mathbb{E}_{x}\inf_{h(x),r(x)}\mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{CE}(h(x),r(x),x,y,m)]& \end{flalign*} Now let us expand the inner expectation: \begin{flalign} \mathbb{E}_{y|x}\mathbb{E}_{m|x,y}[L_{SH}(h(x),r(x),x,y,m)] &=- \sum_{y \in \mathcal{Y}} \eta_y(x) \log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \label{the_body:y|x_cross_loss_withq} \\&- \mathbb{P}(Y= M |X=x) \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \nonumber \end{flalign} For ease of notation denote the RHS of equation \eqref{the_body:y|x_cross_loss_withq} as $L_{CE}(g_1,\cdots,g_{|\mathcal{Y}|},g_\bot)$, note that it is a a convex function, hence we will take the partial derivatives with respect to each argument and set them to $0$. For any $g_\bot$ and $i \in \mathcal{Y}$ we have : \begin{flalign} & \frac{\exp(g_{i}^*(x))}{\sum_{y' \in \tilde{\mathcal{Y}}}\exp(g_{y'}(x))} = \frac{\eta_i(x)}{1 +\mathbb{P}(Y= M|X=x)}\label{the_body:optimal_g} \end{flalign} The optimal $h^*$ for any $g_\bot$ should satisfy equation \eqref{the_body:optimal_g} for every $i \in \mathcal{Y}$. Plugging $h^*$ and taking the derivative with respect to $g_\bot$ we get: \begin{flalign} &\nonumber \frac{\exp(g_{\bot}^*(x))}{\sum_{y' \in \mathcal{Y}}\exp(g_{y'}^*(x))} = \frac{\mathbb{P}(Y= M|X=x)}{1+\mathbb{P}(Y= M|X=x)} \end{flalign} since exponential is an increasing function we get that the optimal $h^*$ and $r^*$ in fact agrees with the Bayes solution. \end{sproof} When the costs $c(1), \cdots, c(K+1)$ are in accordance with our expert deferral setting the loss $\tilde{L}_{CE}$ reduces to $L_{CE}$. Now stepping back and looking more closely at our loss $L_{CE}$, we can see that the loss on examples where the expert makes a mistake becomes the cross entropy loss with the target. On the other hand, when the expert agrees with the target, the learner faces two opposing decisions whether to defer or predict the target. We can encourage or hinder the action of deferral by modifying the loss with an additional parameter $\alpha \in \mathbb{R}^+$ as $L_{CE}^\alpha(h,r,x,y,m)$: \begin{align} \nonumber L_{CE}^\alpha(h,r,x,y,m)=& -( \alpha \cdot \mathbb{I}_{m = y} + \mathbb{I}_{m \neq y} )\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y} \cup \nonumber \bot}\exp(g_{y'}(x))} \right) \\&- \mathbb{I}_{m = y} \log\left(\frac{\exp(g_{\bot}(x))}{\sum_{y' \in \mathcal{Y} \cup \bot}\exp(g_{y'}(x))} \right) \end{align} Note that $ L_{CE}^1 = L_{CE}$. The effect of $\alpha$ is to re-weight examples where the expert is correct to discourage the learner of fitting them and instead focus on examples where the expert makes a mistake. In practice, one would treat $\alpha$ as an additional hyperparameter to optimize for. \section{Theoretical analysis}\label{sec:theory} In this section we focus on the zero-one system loss function $L_{0{-}1}$ and try to understand previous proposed solutions in the literature in comparison with our method from a theoretical perspective. \subsection{Failure of Confidence Scores Method} Let us first remind ourselves of the Bayes solution for the system loss: \begin{align*} &h^B(x) = \arg \max_{y \in \mathcal{Y}}\eta_y(x), \quad r^B(x)= \mathbb{I}_ {\max_{y \in \mathcal{Y}}\eta_y(x) \leq \mathbb{P}(Y = M|X=x) } \end{align*} The form of the Bayes solution above suggests a very natural approach: 1) learn a classifier minimizing the misclassification loss with the target and obtain confidence scores for predictions, 2) obtain confidence scores for expert agreement with the target, this can be done by learning a model where the target is whether the expert agrees with the task label and extracting confidence scores from this model \cite{raghu2019direct}, and finally 3) compare who between the classifier and the expert is more confident and accordingly defer. We refer to this as the confidence score method (Confidence), this approach leads to a consistent estimator for both the rejector and classifier and was proposed by \cite{raghu2019algorithmic}. In fact this is the standard approach in rejection learning \cite{bartlett2008classification,ramaswamy2018consistent,ni2019possibility}, a host of different methods exist for estimating a classifier's confidence on new examples including trust scores \cite{jiang2018trust}, Monte-Carlo dropout for neural networks \cite{gal2016dropout} among many others. However, the key pitfall of this method in the expert deferral setup it that it does not allow $h$ to adapt to the expert's strengths and weaknesses. When we restrict our search space to a limited class of functions $\mathcal{H}$ and $\mathcal{R}$ this approach can easily fail. We now give a toy example where learning the classifier independently fails which motivates the need to jointly learn both the classifier and rejector. \begin{wrapfigure}{r}{0.50\textwidth} \centering \resizebox{0.50\textwidth}{!}{ \includegraphics[clip,scale=0.7,trim={0.0cm 9.3cm 21.0cm 0.0cm}]{figures/example_figure.pdf} } \caption{Setting of two groups, red and blue, the task is binary classification with labels $\{o,+\}$, the expert fits the red majority group, hence the classifier should attempt to fit the blue group with the rejector (black line) separating the groups.} \label{fig:expert_gauss} \end{wrapfigure} Assume that there exists two sub-populations in the data denoted $A=1$ and $A=0$ where $\mathbb{P}(A=1)\geq \mathbb{P}(A=0)$ from which $X \in \mathbb{R}^d$ is generated from and conditional on the target and population, $X|(Y=y,A=0)$ is normally distributed according to $ \mathcal{N}(\mu_{y,0}, \Sigma)$ and $X|(Y=y,A=1)$ consists of two clusters: cluster (1) is normally distributed but the means are not well separated and cluster (2) is only separable by a complex non-linear boundary; the data is illustrated in Figure \ref{fig:expert_gauss}. Finally we assume the expert to be able to perfectly classify group $A=1$, on cluster (1) the expert is able to compute the complex nonlinear boundary and on cluster (2) the expert has side-information $Z$ that allows him to separate the classes which is not possible from only $X$. Our hypothesis spaces $\mathcal{H}$ and $\mathcal{G}$ will be the set of all $d-$dimensional hyperplanes. If we start by learning $h$, then the resulting hyperplane will try to minimize the average error across both groups, this will likely result into a hyperplane that separates neither group as the data is not linearly separable, especially on group $A=1$. If we assume that the boundary between the groups is linear as shown, then we can achieve the error of the Bayes solution within our hypothesis space: the optimal behavior in this setting is clearly to have $h$ fit group $A=0$, note here the Bayes solution corresponds to a hyperplane via linear discriminant analysis for $2$ classes on $A=0$, and the rejector $r$ separating the groups as illustrated in Figure \ref{fig:expert_gauss}. This example illustrates the complexities of this setting, due to model capacity there are significant gains to be achieved from adapting to the expert by focusing only group $A=0$. Setting aside model capacity, the nonlinear boundary of cluster (1) is sample intensive to learn as we only have access to finite data. Finally, cluster (2) cannot be separated even with infinite data, the side information of the expert is needed, and so the hard task is to identify the region of cluster (2). This serves to illustrates the complexities of the setup and the importance of learning the classifier and rejector jointly. \subsection{Inconsistency of mixtures of experts loss and Realizable-consistency So far we have focused on classification consistency to verify the soundness of proposed approaches, however, we usually have specific hypothesis classes $\mathcal{H}, \mathcal{R}$ in mind, and if the Bayes predictor is not in our class then consistency might not guarantee much \cite{ben2012minimizing}. For example, for binary classification with half-spaces, any predictor learned with a convex surrogate loss can have arbitrarily high error if the best half-space has non-zero error \cite{ben2012minimizing}. The previous example illustrated in Figure \ref{fig:expert_gauss} shows an the mode of failure that exists in the expert deferral setup even in the realizable setting. Therefore, a more relevant requirement to our example is that the minimizers of a proposed surrogate loss and the original loss agree for given hypothesis classes in the \emph{realizable} setting; this is formally defined with the below notion. \begin{definition}[realizable $(\mathcal{H},\mathcal{R})$-consistency] A surrogate loss $L_{surr}$ is realizable $(\mathcal{H},\mathcal{R})$-consistent if for all distributions $\mathbf{P}$ and experts $M$ for which there exists $h^*,r^* \in \mathcal{H} \times \mathcal{R}$ that have zero error $L(h^*,r^*)=0$, we have $\forall \epsilon >0$, $\exists \delta >0$ such that if $(\hat{h},\hat{r})$ satisfies \begin{center} $\left| L_{surr}(\hat{h},\hat{r}) - \inf_{h \in \mathcal{H}, r \in \mathcal{R}} L_{surr}(h,r) \right| \leq \delta$, then: $ L(\hat{h},\hat{r}) \leq \epsilon$ \end{center} \end{definition} A similar notion was introduced for classification by \cite{long2013consistency} and by \cite{cortes2016learning} for rejection learning, however here we have the the added dimension of the expert. Note that the expert deferral setting considered here can be thought of as a hard mixture of two experts problem where one of the experts is fixed \cite{jordan1994hierarchical,shazeer2017outrageously,madras2018predict}. This observation motivates a natural mixture of experts type loss, let $g_y: \mathcal{X} \to \mathbb{R}$ for $y \in \mathcal{Y}$, $h(x) = \arg \max_{y \in \mathcal{Y}}g_y$, $r_i: \mathcal{X} \to \mathbb{R}$ for $i \in \{0,1\}$ and $r(x)= \arg\max_{i \in \{0,1\}} r_i(x)$, the mixture of experts loss is defined as: \begin{align} & L_{mix}(\mathbf{g},\mathbf{r},x,y,m) = -\log\left(\frac{\exp(g_{y}(x))}{\sum_{y' \in \mathcal{Y}}\exp(g_{y'}(x))} \right) \frac{\exp(r_{0}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} + \mathbb{I}_{m \neq y} \frac{\exp(r_{1}(x))}{\sum_{i \in \{0,1\}}\exp(r_{i}(x))} \label{eq:mix_of_exp_loss} \end{align} The above loss extends \cite{madras2018predict} approach to the multiclass setting. As the next proposition demonstrates, $L_{mix}$ is in general \emph{not} classification consistent, however, it is realizable $(\mathcal{H},\mathcal{R})$-consistent for classes closed under scaling which include linear models and neural networks. \begin{proposition} $L_{mix}$ is realizable $(\mathcal{H},\mathcal{R})$-consistent for classes closed under scaling but is not classification consistent. \label{prop:realizable_madras} \end{proposition} Proof of proposition \ref{prop:realizable_madras} can be found in Appendix \ref{apx:proofs}. Note that integrating more information about $M$ in $L_{mix}$ would not make the loss consistent, the inconsistency arises from the parameterization in $\bm{g}$, setting the classifier loss to simply be $\mathbb{I}_{h(x)\neq y}$ would make $L_{mix}$ consistent at the cost of losing the convexity and differentiability in $\bm{g}$. While $L_{mix}$ is indeed realizable consistent however it is not convex in both $\bm{g}$ and $\bm{r}$, hence it is not clear how to efficiently optimize it. Setting aside computational feasibilities, it is also not immediately clear which between consistency and realizable $(\mathcal{H},\mathcal{R})$-consistency will be more practically relevant. In our experimental section we show how the mismatch between the model and expert loss and their actual errors causes this method to learn the incorrect behavior which hints that classification consistency is crucial. \subsection{Generalization Bound For Joint Learning} In this subsection we analyze the sample complexity to jointly learn a rejector and classifier. The goal is to find the minimizer of the empirical version of our system loss when our hypothesis space for $h$ and $r$ are $\mathcal{H},\mathcal{R}$ respectively: \begin{equation} \hat{h}^*,\hat{r}^* = \arg\min_{h \in \mathcal{H}, r \in \mathcal{G}} L^S_{0{-}1}(h,r):= \frac{1}{n} \sum_{i=1}^n \ \mathbb{I}_{h(x_i) \neq y_i} \mathbb{I}_{r(x_i) = 0} + \mathbb{I}_{m_i \neq y_i}\mathbb{I}_{r(x_i)=1} \end{equation} By going after the system loss directly, we can approximate the population minimizers $h^*,r^*$ over $\mathcal{H} \times \mathcal{R}$ of $L_{0{-}1}$ \eqref{eq:01_reject_loss}. The optimum $h^*$ may not necessarily coincide with the optimal minimizer of the misclassification loss with the target which is why learning jointly is critical. We now give a generalization bound for our empirical minimization procedure for a binary target. \begin{theorem} For any expert $M$ and data distribution $\mathbf{P}$ over $\mathcal{X} \times \mathcal{Y}$, let $0<\delta<\frac{1}{2}$, then with probability at least $1-\delta$, the following holds for the empirical minimizers $(\hat{h}^*,\hat{r}^*)$: \begin{align} L_{0{-}1}(\hat{h}^*,\hat{r}^*) &\leq L_{0{-}1}(h^*,r^*) + \mathfrak{R}_n(\mathcal{H}) + \mathfrak{R}_{n}(\mathcal{R}) + \mathfrak{R}_{n \mathbb{P}(M \neq Y)/2}(\mathcal{R}) \nonumber \\ & + 2\sqrt{\frac{\log{\frac{2}{\delta}}}{2n}} +\frac{\mathbb{P}(M\neq Y)}{2} \exp\left(- \frac{n \mathbb{P}(M \neq Y)}{8} \right) \label{eq:the_generalization_bound} \end{align} \end{theorem} Proof of the above theorem can be found in Appendix \ref{apx:proofs}. We can see that the performance of our empirical minimizer is controlled by the Rademacher complexity $\mathfrak{R}_{n}(\mathcal{R})$ and $\mathfrak{R}_{n}(\mathcal{H})$ of both the classifier and rejector model classes and the error of the expert. Note that when $\mathbb{P}(M \neq Y)=0$ we recover the bound proved in Theorem 1 \cite{cortes2016learning} for rejection learning when $c=0$; this gives evidence that deferring to an expert is a more sample intensive problem then rejection learning. Both our loss $L_{CE}$ and the confidence scores approach lead to consistent estimators, however, as we will later show in our experiments, one differentiating factor will be that of sample complexity. We can already see in the bound \eqref{eq:the_generalization_bound}, that we pay the complexity of the rejector and classifier model classes, however, our approach combines the rejector and classifier in one model to avoid these added costs. \section{Experiments}\label{sec:experiments} We provide code to reproduce our experiments \footnote{\url{https://github.com/clinicalml/learn-to-defer}}. In Appendix \ref{apx:guide} we give a detailed guide on implementing our method. Additional experimental details and results are left to Appendix \ref{apx:experiments}. \subsection{Synthetic Data} As a first toy example to showcase that our proposed loss $L_{CE}^\alpha$ is able to adapt to the underlying expert behavior, we perform experiments in a Gaussian mixture setup akin to the example in section \ref{sec:theory}. The covariate space is $\mathcal{X}= \mathbb{R}^d$ and target $\mathcal{Y} = \{0,1\}$, we assume that there exists two sub-populations in the data denoted $A=1$ and $A=0$. Furthermore, $X|(Y=y,A=a)$ is normally distributed according to $ \mathcal{N}(\mu_{y,a}, \Sigma_{y,a})$. The expert follows the Bayes solution for group $A=1$ which here corresponds to a hyperplane. Our hypothesis spaces $\mathcal{H}$ and $\mathcal{R}$ will be the set of all $d-$dimensional hyperplanes. \textbf{Setup:} We perform 200 trials where on each trial we generate: random group proportions $\mathbb{P}(A=1) \sim U(0,1)$ fixing $\mathbb{P}(Y=1|A=a)=0.5$, random means and variances for each Gaussian component $X|Y=y,A=a \sim \mathcal{N}(\mu_{y,a}, \Sigma_{y,a})$ where $\mu_{y,a} \sim U(0,10)^d$ and similarly for the diagonal components of $\Sigma_{y,a}(i,i) \sim U(0,10)$ keeping non-diagonal components $0$ with dimension $d=10$; we generate in total $1000$ samples each for training and testing. We compare against oracle behavior and two baselines: 1) An oracle baseline (Oracle) that trains only on $A=0$ data and trains the rejector to separate the groups with knowledge of group labels and 2) the confidence score baseline (Confidence) that trains a linear model on all the data and then trains a different linear model on all the data where labels are the expert's agreement with the target and finally compares which of the two is more confident according to the probabilities assigned by the corresponding models and 3) our implementation of the approach in \cite{madras2018predict} (MixOfExp). \textbf{Results:} We train a multiclass logistic regression model with our loss $L_{CE}^\alpha$ with $\alpha \in \{0,0.5,1\}$ and record in table \ref{table:guassian} the difference in accuracy between our method and baselines for the best performing $\alpha$. We can see that our method with $\alpha=0$ outperforms the confidence baseline by $6.39$ on average in classification accuracy and matches the oracle method with $0.22$ positive difference which shows the success of our method. When trained with loss $L_{CE}^1$ or $L_{CE}^{.5}$ the model matches the confidence baseline, the reason being is that with $\alpha \neq 0$ the model will still try to fit the target $Y$ but the model class here is not rich enough to allow the model to reasonably fit the target and adapt to the expert. \begin{table}[H] \caption{Comparison of our methods with the confidence score baseline, oracle baseline and our implementation of \cite{madras2018predict} method. We compute a 95\% confidence interval for the average difference between the baselines and our method.} \label{table:guassian} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lcr} \toprule Difference in system accuracy & Average & 95\% interval \\ \midrule $L_{CE}^0$-Confidence \cite{raghu2019algorithmic} & 6.39 & [3.71,9.06] \\ $L_{CE}^0$-Oracle & 0.22 & [-1.71,2.15] \\ $L_{CE}^0$- MixOfExp \cite{madras2018predict} & 2.01 & [0.14,4.06] \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsection{CIFAR-10} As our first real data experimental evaluation we conduct experiments on the celebrated CIFAR-10 image classification dataset \cite{krizhevsky2009learning} consisting of $32 \times32$ color images drawn from 10 classes split into 50,000 train and 10,000 test images. \textbf{Synthetic Expert.} We simulate multiple synthetic experts of varying competence in the following way: let $k \in [10]$, then if the image belongs to the first $k$ classes the expert predicts perfectly, otherwise the expert predicts uniformly over all classes. The classifier and expert costs are assumed to be the misclassification costs. \textbf{Base Network.} Our base network for classification will be the Wide Residual Networks (WideResNets) \cite{zagoruyko2016wide} which with data augmentation and hyperparameter tuning can achieve a $96.2\%$ test accuracy. Since our goal is not to achieve better accuracies but to show the merit of our approach for a given fixed model, we disadvantage the model by not using data augmentation and a smaller network size. The WideResNet with 28 layers minimizing the cross-entropy loss achieves $90.47\%$ test accuracy with training until fitting the data in 200 epochs; this will be our benchmark model. We use SGD with momentum and a cosine annealing learning rate schedule. \textbf{Proposed Approach:} Following section 4, we parameterize $h$ and $r$ (specifically $g_\bot$) by a WideResNet with $11$ output units where the first $10$ units represent $h$ and the $11'th$ unit is $g_\bot$ and minimize the proposed surrogate $L_{CE}^\alpha$ \eqref{eq:proposed_CE_loss}. We also experimented with having $h$ be a WideResNet with $10$ output units and $g_\bot$ a WideResNet with a single output unit and observed identical results. We show results for $\alpha \in \{0.5,1\}$. \textbf{Baselines:} We compare against three baselines. The first baseline trains the rejector to recognize if the image is in the first $k$ classes and accordingly defers, we call this baseline "LearnedOracle"; this rejector is a learned implementation of what the optimal rejector should do. The second baseline is the confidence score method \cite{raghu2019algorithmic} and the third is the mixture-of-experts loss of \cite{madras2018predict}, details of the implementation of this final baseline are left to Appendix \ref{apx:madras}. \begin{figure} \centering \begin{subfigure}{.5\textwidth} \centering \resizebox{3.2in}{!}{% \begin{tikzpicture} \begin{axis}[ title={}, xlabel={k (classes expert can predict)}, ylabel={Accuracy}, xmin=0, xmax=10, ymin=88, ymax=100, xtick={0,1,2,3,4,5,6,7,8,9,10}, ytick={88,89,90,91,92,93,94,95,96,97,98,99,100}, legend pos=north west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot[ color=green, mark=square, ] coordinates { (0,90.47)(1,89.54)(2,89.51)(3,89.48)(4,90.75)(5,90.64)(6,93.25)(7,95.28)(8,96.52)(9,98.16)(10,100) }; \addlegendentry{LearnedOracle} \addplot[ color=blue, mark=diamond, ] coordinates { (0,90.47)(1,90.47)(2,90.56)(3,90.71)(4,91.41)(5,92.52)(6,94.15)(7,95.5)(8,97.35)(9,98.05)(10,100) }; \addlegendentry{Confidence \cite{raghu2019algorithmic}} \addplot[ color=red, mark=*, ] coordinates { (0,90.47)(1,90.92)(2,91.01)(3,91.94)(4,92.69)(5,93.66)(6,96.03)(7,97.11)(8,98.25)(9,99)(10,100) }; \addlegendentry{$L_{CE}^{.5}$} \addplot[ color=black, mark=*, ] coordinates { (0,90.47)(1,90.41)(2,91)(3,91.47)(4,92.42)(5,93.4)(6,95.07)(7,96.49)(8,97.3)(9,97.7)(10,100) }; \addlegendentry{$L_{CE}^{1}$} \addplot[ color=violet, mark=+, ] coordinates { (0,90.47)(1,90.47)(2,90.47)(3,90.47)(4,90.47)(5,90.47)(6,90.47)(7,90.47)(8,94.48)(9,95.09)(10,100) }; \addlegendentry{MixOfExp \cite{madras2018predict}} \end{axis} \end{tikzpicture} }% \caption } \label{fig:kvssystem2} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \resizebox{3.2in}{!}{% \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Coverage (ratio of examples predicted)}, ylabel={Accuracy on non-deferred examples}, xmin=0, xmax=100, ymin=88, ymax=100, xtick={0,10,20,30,40,50,60,70,80,90,100}, ytick={88,90,91,92,93,94,95,96,97,98,99,100}, ymajorgrids=true, grid style=dashed, ] \addplot[ color=green, mark=square, ] coordinates { (100,90.47)(90.34,90)(80.33,89.27)(71.71,89.27)(62.43,90.52)(49.65,91.98)(43.12,92.76)(29.03,94.87)(18.34,95.2)(9.2,95.32)(0,100) }; \addlegendentry{LearnedOracle} \addplot[ color=blue, mark=diamond, ] coordinates { (100,90.47)(96.64,90.57)(94.63,90.44)(90.29,90.9)(84.45,91.97)(72.12,92.45)(64.91,93.93)(61.1,94.7)(37.86,94.82)(41.2,96.84)(0,100) }; \addlegendentry{Confidence \cite{raghu2019algorithmic}} \addplot[ color=red, mark=*, ] coordinates { (100,90.47)(88.78,91.48)(78.66,91.02)(69.02,91.63)(58.47,93.41)(47.72,93.56)(39.05,95.57)(29.15,95.88)(19.41,96.08)(9.53,96.75)(0,100) }; \addlegendentry{$L_{CE}^{.5}$} \addplot[ color=black, mark=*, ] coordinates { (100,90.47)(90.3,91.71)(88,91.7)(91.61,93.1)(76.79,93.92)(71.35,95.42)(61.86,95.28)(49.77,96.49)(46.48,97.3)(47.85,96.54)(0,100) }; \addlegendentry{$L_{CE}^{1}$} \addplot[ color=violet, mark=+, ] coordinates { (100,90.47)(100,90.47)(100,90.47)(100,90.47)(100,90.47)(100,90.47)(100,90.47)(100,90.47)(46.27,90.47)(40.22,90.47)(0,100) }; \addlegendentry{MixOfExp \cite{madras2018predict}} \legend{}; \end{axis} \end{tikzpicture} }% \caption } \label{fig:covvsacc} \end{subfigure} \caption{Left figure shows overall system accuracy of our method and baselines (k is the number of classes the expert can predict) and right figure compares the accuracy on the non-deferred examples versus the coverage for every $k$ } \label{fig:cifar10_2figs} \end{figure} \textbf{Results.} In figure \ref{fig:kvssystem} we plot the accuracy of the combined algorithm and expert system versus $k$, the number of classes the expert can predict perfectly. We can see that the model trained with $L_{CE}^{0.5}$ and $L_{CE}^1$ outperforms the baselines by $1.01$\% on average for the confidence score baseline and by $1.94$ on average for LearnedOracle. To look more closely at the behavior of our method, we plot in figure \ref{fig:covvsacc} the accuracy on the non-deferred examples versus the coverage, the fraction of the examples non-deferred, for each $k$. We can see that that the model trained with $L_{CE}^1$ dominates all other baselines giving better coverage and accuracy for the classifier's predictions. This gives evidence that our loss allows the model to only predict when it is highly confident. \textbf{Why do we outperform the baselines?} 1) \emph{Sample complexity}: The Confidence baseline \cite{raghu2019algorithmic} requires training two networks while ours only requires one, when data is limited our approach gives significant improvements in comparison. We experiment with increasing training set sizes while keeping the test set fixed and training our model with $L_{CE}^1$ and the Confidence baseline. Figure \ref{fig:dataregimes} plots system accuracy versus training set size when training with expert $k=5$. We can see when data is limited our approach massively improves on the baseline, for example with $2000$ training points, Confidence achieves $62.33$\% accuracy while our method achieves $70.12$\%, a $7.89$ point increase. 2) \emph{Taking into consideration both expert and model confidence}: the LearnedOracle baseline ignores model confidence entirely and only focuses on the region where the expert is correct. While this is the behavior of the Bayes classifier in this setup, when dealing with a limited model class and limited data, this no longer is the correct behavior. For this reason, our model outperforms the LearnedOracle baseline. 3) \emph{Consistency}: the mixtures of experts loss of \cite{madras2018predict} fails in this setup and learns never to defer. The reason is that when training, the loss of the classifier will converge to zero and validation classifier accuracy will still improve in the mean-time, however the loss of the expert remains constant, thus we never defer. \begin{figure}[h] \centering \resizebox{2.8in}{!}{% \begin{tikzpicture} \begin{semilogxaxis}[ title={}, xlabel={Dataset size (in thousands)}, ylabel={System accuracy}, xmin=1, xmax=50, ymin=55, ymax=94, xtick={1,2,3,5,8,10,20,50}, ytick={55,58,61,64,67,70,73,76,79,82,85,89,94}, legend pos=south east, log ticks with fixed point, ymajorgrids=true, yminorticks=false, grid style=dashed, ] \addplot[ color=blue, mark=square, ] coordinates { (1,55.06)(2,62.23)(3,68.02)(5,71.98)(8,79.12)(10,80.46)(20,86.94)(50,92.52) }; \addlegendentry{Confidence} \addplot[ color=black, mark=*, ] coordinates { (1,62.63)(2,70.12)(3,72.96)(5,75.15)(8,81.1)(10,82.79)(20,88.28)(50,93.4) }; \addlegendentry{$L_{CE}^{1}$} \end{semilogxaxis} \end{tikzpicture} }% \caption{Varying training set size when training with expert $k=5$ for Confidence baseline and our method $L_{CE}^1$.} \label{fig:dataregimes} \end{figure} \subsection{CIFAR-100} We repeat the experiments described above on the CIFAR-100 dataset \cite{krizhevsky2009learning}. A 28 layer WideResNet achieves a 79.28 \% test accuracy when training with data augmentation (random crops and flips). The simulated experts also operate in a similar fashion, for $k \in \{10,20,\cdots,100\}$, if the image is in the first $k$ classes, the expert predicts the correct label with probability $0.94$ to simulate SOTA performance on CIFAR-100 with 93.8\% test accuracy \cite{kolesnikov2019large}, otherwise the expert predicts uniformly at random. Compared against the confidence score baseline, the model trained with $L_{CE}^1$ outperforms it by a 1.60 difference in test accuracy for $30 \leq k\leq 90 $ on average and otherwise performs on par. This gives again gives evidence for the efficacy of our method, full experimental results are available in appendix \ref{apx:expcifar100}. \subsection{CIFAR10H and limited expert data} Obtaining expert labels for entire datasets may in fact be prohibitively expensive as standard dataset sizes have grown into million of points \cite{deng2009imagenet}. Therefore it is more realistic to expect that the expert has labeled only a fraction of the data. In the following experiments we assume access to fully labeled data $S_l = \{(x_i,y_i,m_i)\}_{i=1}^m$ and data without expert labels $S_u=\{ (x_i,y_i)\}_{i=m+1}^n$. The goal again is to learn a classifier $h$ and rejector $r$ from the two datasets $S_l$ and $S_u$. \textbf{Data:} To experiment in settings where we have limited expert data, we use the dataset \texttt{CIFAR10H} \cite{peterson2019human} initially developed to improve model robustness. \texttt{CIFAR10H} contains for each data point in the CIFAR-10 test set fifty crowdworker annotations recorded as counts for each of the 10 classes. The training set of CIFAR-10 will constitute $S_u$, and we randomly split the test set in half where one half constitutes $S_l$ and the other is for testing; we randomize the splitting over 10 trials. \textbf{Expert:} We simulate the behavior of an average human annotator by sampling from the class counts for each data point. The performance of our simulated expert has an average classification accuracy of 95.22 with a standard deviation of 0.18 over 100 runs. The performance of the expert is non uniform over the classes, for example on the class \textit{cat} the expert has 91.0\% accuracy while on \textit{horse} a 97.8\% accuracy. \textbf{Proposed Approach:} Our method will be to learn $f_m: \mathcal{X} \to \{0,1 \}$ to predict whether the expert errs from data $\tilde{S}_l = \{(x_i,\mathbb{I}_{y_i \neq m_i})\}_{i=1}^m$, using $f_m$ we label $S_u$ with the expert disagreement labels to use in our loss function an obtain $\hat{S}_u$. Note since our loss function does not care which label the expert predicts but whether he errs or not, our task simplifies to binary classification instead of classification over the target $\mathcal{Y}$. Finally we train using our loss $L_{CE}$ on $\hat{S}_u \cup S_l$; we refer to our method as "$L_{CE}$ impute" \begin{table}[h] \caption{Comparing our proposed methods on \texttt{CIFAR10H} and a baseline based on confidence scores recording system accuracy, coverage and classifier accuracy on non-deferred examples.} \label{table:cifar10h} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \begin{tabular}{lccr} \toprule Method & System & Coverage & Classifier \\ \midrule $L_{CE}$ impute & \textbf{96.29}$\pm$0.25 & 51.67$\pm$1.46 &\textbf{ 99.2} $\pm$ 0.08 \\ $L_{CE}$ 2-step & 96.03$\pm$0.21 & 60.81$\pm$0.87 & 98.11 $\pm$ 0.22 \\ Confidence \cite{raghu2019algorithmic} & 95.09$\pm$0.40 & \textbf{79.48}$\pm$5.93 & 96.09 $\pm$ 0.42 \\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \textbf{Results.} We compare against a confidence score baseline where we train a classifier on $S_u$ and then model the expert on $S_l$. Results are shown in table \ref{table:cifar10h} and we can see that our method outperforms the confidence method by $1.2$ points on system accuracy and an impressive $3.1$ on data points where the classifier has to predict. To show the effect of imputing expert labels on $S_u$, we train first our model using $L_{CE}$ on $S_u$ and then fine tune to learn deferral on $S_l$, we refer to this as "$L_{CE}$ 2-step". It is possible that further approaches inspired by SOTA methods in semi supervised learning methods give further improvements \cite{oliver2018realistic,berthelot2019mixmatch}. \subsection{Hate Speech and Offensive Language Detection} We conduct experiments on the dataset created by \cite{davidson2017automated} consisting of 24,783 tweets annotated as hate speech, offensive language or neither. We create a synthetic expert that has differing error rates according to the demographic of the tweet's author as described in what follows. \textbf{Expert.} \cite{blodgett-etal-2016-demographic} developed a probabilistic language model that can identify if a tweet is in African-American English (AAE), this model was used by \cite{davidson2019racial} to audit for racial bias in classifiers. We use the same model and predict that a tweet is in AAE if the probability predicted is higher than $0.5$. Our expert model is as follows: if the tweet is in AAE then with probability $p$ we predict the correct label and otherwise predict uniformly at random. On the other hand if the tweet is not in AAE, we predict with probability $q$ the correct label. We experiment with 3 different expert probabilities for $p$ and $q$: 1) a fair expert with $\{p=0.9,q=0.9\}$, 2) a biased expert towards AAE tweets $\{p=0.75,q=0.9\}$ and 3) a biased expert towards non AAE tweets $\{p=0.9,q=0.75\}$. \textbf{Our Approach.} For our model we use the CNN developed in \cite{kim2014convolutional} for text classification with 100 dimensional Glove embeddings \cite{pennington2014glove} and $300$ filters of sizes $\{3,4,5\}$ using dropout. This CNN achieves a 89.5\% average accuracy on the classification task, comparable to the 91\% achieved by \cite{davidson2017automated} with a feature heavy linear model. We randomly split the dataset with a $60,10,30$\% split into a training, validation and test set respectively; we repeat the experiments for 5 random splits. We used a grid search over the validation set to find $\alpha$. \textbf{Results.} We compare against two baselines: the first is Confidence, the second is an oracle baseline that trains first a model on the classification task and then implements the Bayes rejector $r^B(x)$ equipped with the knowledge of $p,q$ and the tweet's demographic group. Both our model trained with $L_{CE}^1$ and the confidence score baseline achieve similar accuracy and coverage with the oracle baseline performing only slightly better across the three experts. For the AAE biased expert, our model trained with $L_{CE}^1$ achieves 92.91$\pm$0.17 system accuracy, Confidence 92.42$\pm$0.40 and Oracle 93.22$\pm$0.11. This suggests that both approaches are performing optimally in this setting. \textbf{Racial Bias.} A major concern in this setting is whether the end to end system consisting of the classifier and expert is discriminatory. We define the discrimination of a predictor as the difference in the false positive rates of AAE tweets versus non AAE tweets where false positives indicate tweets that were flagged as hate speech or offensive when they were not. Surprisingly, the confidence score baseline with the fair expert doubles the discrimination of the overall system compared to the classifier acting on it's own: the classifier has a discrimination of $0.226$ on all the test data, the fair expert a discrimination of $0.03$ while the confidence score baseline has a discrimination of $0.449$. This again reiterates the established fact that fairness does not compose \cite{dwork2018fairness}. In fact, the end-to-end system can be less discriminatory even if the individual components are more discriminatory, for the second expert that has higher error rates on non AAE tweets with discrimination of $0.084$, the discrimination of the confidence score method reduces to $0.151$. While our method does not achieve significantly lower discrimination than the baseline, however integrating fairness constraints for the end-to-end system becomes easier as we can adapt the classifier. Complete experimental results can be found in Appendix \ref{apx:exphate}. \subsection{Synthetic Experts on CheXpert}\label{subsec:chexpert} \subsubsection{Setup} \textbf{Task.} CheXpert is a large chest radiograph dataset that contains over 224 thousand images of 65,240 patients automatically labeled for the presence of 14 observations using radiology reports \cite{irvin2019chexpert}. In addition to the automatically labeled training set, \cite{irvin2019chexpert} make publicly accessible a validation set of 200 patients labeled by a consensus of 3 radiologists and hide a further testing set of 500 patients labeled by 8 radiologists. We focus here on the detection of only the 5 observations that make up the "competition tasks" \cite{irvin2019chexpert}: Atelectasis, Cardiomegaly, Consolidation, Edema, and Pleural Effusion. This is a multi-task problem, we have 5 separate binary tasks, we will learn to defer on an individual task basis. \textbf{Expert.} We create a simulated expert as follows: if the chest X-ray contains support devices (the presence of support devices is part of the label) then the expert is correct with probability $p$ on all tasks independently and if the X-ray does not contain support devices, then the expert is correct with probability $q$. We vary $q \in \{0.5,0.7\}$ and $p \in \{0.7,0.8,0.9,1\}$ to obtain different experts, we let $p\geq q$ as one can think that a patient that has support devices might have a previous medical history that the expert is aware of and can use as side-information. \textbf{Data.} We use the downsampled resolution version of CheXpert \cite{irvin2019chexpert} and split the training data set with an 80-10-10 split on a patient basis for training, validation and testing respectively, no patients are shared among the splits. Images are normalized and resized to be compatible with pre-trained ImageNet models, we use data augmentation in the form of random resized crops, horizontal flips and random rotations of up to $15\degree$ while training. Note that a small subset of the training data has an uncertainty label "U" instead of a binary label that implies that the automatic annotator is uncertain, we ignore these points on a task basis while training and testing. \textbf{Baselines.} We implement two baselines: a threshold confidence baseline that learns a threshold to maximize system AU-ROC on just the confidence of the classifier model to defer (ModelConfidence), this is the post-hoc thresholding method in \cite{madras2018predict}, and the Confidence baseline \cite{raghu2019algorithmic}. We use temperature scaling \cite{guo2017calibration} to ensure calibration of all baselines on the validation set. \textbf{Model.} Following \cite{irvin2019chexpert}, we use the DenseNet121 architecture for our model with pre-trained weights on ImageNet, the loss for the baseline models is the average of the binary cross entropy for each of the tasks. We train the baseline models using Adam for 4 epochs. For our approach we train for 3 epochs using the cross entropy loss and then train for one epoch using $L_{CE}^\alpha$ with $\alpha$ chosen to maximize the area under the receiver operating characteristic curve (AU-ROC) of the combined system on the validation set for each of the 5 tasks (each task is treated separately). We also observe similar results if we train for the first three epochs with $L_{CE}^1$ and then train for one epoch with a validated choice of $\alpha$. \textbf{Experimental setup.} In a clinical setting there might be a cost associated to querying a radiologist, this then imposes a constraint on how often we can query the radiologist i.e. our model's coverage (fraction of examples where algorithm predicts). We constrain our method and the baselines to achieve $c\%$ coverage for $c \in [100]$ to simulate the spectrum between complete automation and none.\\ We achieve this for our method by first sorting the test set based on $g_{\bot}(x) - \max(g_0(x),g_1(x)):=q(x)$ across all patients $x$ in the test set, then to achieve coverage $c$, we define $\tau = q(x_c)$ where $q(x_c)$ is the $c$'th percentile of the outputs $q(x)$, then we let $r(x) =1 \iff q(x) \geq \tau$. The definition of $\tau$ ensures that we obtain exactly $c\%$ coverage. For ModelConfidence we achieve this by letting $q(x) = 1- \max(g_0(x),g_1(x))$ ($g$ is the result of a separate trained model than the one for our method), this is the natural classifier's probability of error from the softmax output, and for the Confidence we let $q(x)$ be the difference between the radiologists confidence and the classifier's confidence. \subsubsection{Results} \textbf{Results.} In Figure \ref{fig:auc_vs_cov_toy_orig} we plot the overall system (expert and algorithm combined) AU-ROC for each desired coverage for the methods and in Figure \ref{fig:ap_vs_cov_toy_orig} we plot the overall system area under the precision-recall curve (AU-PR) versus the coverage; this is for the expert with $q=0.7$ and $p=1$. We can see that the curve for our method dominates the baselines over the entire coverage range for both AU-ROC and AU-PR, moreover the curves are concave and we can achieve higher performance by combining expert and algorithm than using both separately. Our method is able to achieve a higher maximum AU-ROC and AU-PR than both baselines: the difference between the maximum attainable AU-ROC of our method and Confidence is 0.043, 0.029, 0.016, 0.022 and 0.025 respectively for each of the five tasks. There is a clear hierarchy between the 3 compared methods: our method dominates Confidence and Confidence in turn dominates ModelConfidence, in fact ModelConfidence is a special case of the Confidence baseline, since the expert does not have uniform performance over the domain there are clear gains in modeling the expert. This hierarchy continues to hold as we change the expert behavior as we vary the probabilities $p$ and $q$, in Table \ref{table:toy_expert} we show for each of the 5 tasks the difference between the average AU-ROC across all coverages (average value of the curves shown in Figure \ref{fig:auc_vs_cov_toy_orig}) for our method and the Confidence baseline for different expert probabilities and the difference between the maximum achievable AU-ROC. A positive average difference serves to show the degree of dominance of our method over the Confidence baseline, note that the difference alone cannot imply dominance of the curves however dominance is still observed. Our method improves on the baselines as the difference between $q$ and $p$ increases, this difference encodes the non-uniformity of the expert behavior over the domain. \begin{figure}[ht] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1.pdf} \caption{AU-ROC vs coverage for expert $q=0.7,p=1$, maximum AU-ROC is noted.} \label{fig:auc_vs_cov_toy_orig} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/ap_vs_cov_toyexpert7-1.pdf} \caption{AU-PR vs coverage for expert $q=0.7,p=1$, maximum AU-PR is noted.} \label{fig:ap_vs_cov_toy_orig} \end{subfigure} \caption{Plot of AU-ROC of the ROC curve (a) for each level of coverage (0 coverage means only the expert predicting and 1 coverage is only the classifier predicting) and of the area under the precision-recall curve (AU-PR) (b) for each of the 5 tasks comparing our method with the baselines on the training derived test set for the toy expert with $q=0.7,p=1$. We report the maximum AU-ROC and AU-PR achieved on each task, error bars are standard deviations derived from 10 runs (averaging over the expert's randomness).} \label{fig:plots_auc_ap_toyexpert} \end{figure} \begin{table}[h] \caption{Average difference in AU-ROC across all coverage and difference between maximum achievable AU-ROC between our method and the Confidence baseline for each of the 5 tasks and different toy expert probabilities $p$ and $q$; each entry is (average difference $\pm$ standard deviation; difference of maximums). The difference between our method and the ModelConfidence is roughly twice the values noted in table \ref{table:toy_expert}, only at Expert $(0.7,0.7)$ does Confidence and ModelConfidence achieve the same performance since the expert has uniform error over the domain.} \label{table:toy_expert} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{l|ccccc} \toprule Expert $(p,q)$ & \textbf{Cardiomegaly} & \textbf{Edema} & \textbf{Consolidation} & \textbf{Atelectasis} & \textbf{Pleural Effusion} \\ \midrule (0.5,0.7) &0.032$\pm$0.024; 0.002 &0.015$\pm$0.012; 0.007 & 0.015$\pm$0.008; 0.007 & 0.017$\pm$0.009; 0.007 & 0.007$\pm$0.003 ;0.007 \\ \hline (0.5,0.9) & 0.032$\pm$0.017; 0.014 & 0.026$\pm$0.016; 0.024 & 0.010$\pm$0.005; 0.015 & 0.016$\pm$0.008; 0.026 & 0.012$\pm$0.010; 0.004 \\ \hline (0.5,1) & 0.022$\pm$0.012; 0.029 & 0.013$\pm$0.009; 0.019 & 0.007$\pm$0.008; 0.012 & 0.013$\pm$0.006; 0.020 & 0.010$\pm$0.008; 0.012 \\ \hline (0.7,0.7) & 0.024$\pm$0.018; 0.005 & 0.011$\pm$0.009; 0.010 & 0.011$\pm$0.010; 0.009 & 0.006$\pm$0.006; 0.008 & 0.001$\pm$0.001; 0.003 \\ \hline (0.7,0.9) & 0.032$\pm$0.020; 0.024 & 0.010$\pm$0.007; 0.010 & 0.007$\pm$0.007; 0.017 & 0.014$\pm$0.008; 0.017 &0.010$\pm$0.006; 0.006 \\ \hline (0.7,1) &0.027$\pm$0.014; 0.042 & 0.016$\pm$0.010; 0.027 & 0.007$\pm$0.007; 0.019 & 0.013$\pm$0.007; 0.022 & 0.014$\pm$0.010; 0.027 \\ \hline (0.8,1) & 0.017$\pm$0.009; 0.023 & 0.011$\pm$0.008; 0.012 & 0.001$\pm$0.004; 0.007 & 0.012$\pm$0.006; 0.009 & 0.010$\pm$0.006; 0.018 \\ \bottomrule \end{tabular} } \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \subsubsection{Further Analysis} \paragraph{Sample Complexity} Training data for chest X-rays is a valuable resource that may not be abundantly available when trying to deploy a machine learning model in a new clinical setting where for example the imaging mechanism may differ. It is important to see the effectiveness of the proposed approaches when training data size is limited, this furthermore helps us understand the comparative sample complexity of our method versus the baselines. \textbf{Experimental details.} We restrict the training data size for our model and baselines while keeping the same validation and testing data as previously; the validation data is used only for calibration of models and optimizing over choice of $\alpha$. We train using the same procedure as before and report the maximum achievable AU-PR and AU-ROC. The expert we defer to is the synthetic expert described above with $q=0.7$ and $p=1$. \begin{figure}[h] \centering \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Training data size (as \% of original set)}, ylabel={mean AU-ROC}, xmin=0, xmax=100, ymin=0.84, ymax=0.93, xtick={0,5,10,25,50,75,100}, ytick={0.85,0.87,0.88,0.89,0.9,0.91,0.92,0.93,0.95}, legend pos=south west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.875, 0.002 10, 0.876, 0.002 25, 0.878, 0.002 50, 0.879, 0.002 75, 0.88, 0.002 100, 0.879, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.880, 0.002 10, 0.883, 0.002 25, 0.892, 0.002 50, 0.895, 0.002 75, 0.9, 0.003 100, 0.899, 0.003 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.889, 0.002 10, 0.896, 0.002 25, 0.908, 0.002 50, 0.914, 0.002 75, 0.924, 0.002 100, 0.925, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \end{axis} \end{tikzpicture} \caption } \label{fig:samplecmplx_auc} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Training data size (as \% of original set)}, ylabel={mean AU-PR}, xmin=0, xmax=100, ymin=0.61, ymax=0.75, xtick={0,5,10,25,50,75,100}, ytick={0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.7,0.71,0.72,0.73,0.74}, legend pos=north west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.624, 0.003 10, 0.613, 0.005 25, 0.632, 0.002 50, 0.661, 0.003 75, 0.664, 0.004 100, 0.6612, 0.001 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.639, 0.002 10, 0.647, 0.002 25, 0.680, 0.002 50, 0.704, 0.003 75, 0.711, 0.001 100, 0.709, 0.001 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.637, 0.002 10, 0.669, 0.002 25, 0.689, 0.002 50, 0.722, 0.003 75, 0.733, 0.001 100, 0.736, 0.001 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \legend{}; \end{axis} \end{tikzpicture} \caption } \label{fig:samplecmplx_pr} \end{subfigure} \caption{Left figure shows the average of the maximum achievable AU-ROC for the 5 tasks (average over the tasks) when the changing the size of the training data (as a \% of the original set) and right figure shows the same for AU-PR } \label{fig:samplecmplx_chexpert} \end{figure} \textbf{Results.} In Figure \ref{fig:samplecmplx_chexpert} we plot the average of the maximum achievable AU-ROC \ref{fig:samplecmplx_auc} and AU-PR \ref{fig:samplecmplx_pr} across the 5 tasks for the different methods as we vary the the training set size. We observe that our method consistently outperforms the baselines and continues to take advantage of further data as the baselines performance starts to saturate. If we look at the AU-ROC and AU-PR of the expert on deferred examples, we observe negligible differences as the training set size increases for each method, however if we look at classifier performance on the non-deferred examples, we start to observe a significant difference in AU-ROC and AU-PR for our method while the baselines lag behind. In Figure \ref{fig:plots_classauc_ap_toyexpert} (found in Appendix \ref{apx:exp_chexpert}) we plot the classifier AU-ROC on non-deferred examples versus the coverage for each of the 5 tasks, we can see for example on Cardiomegaly, our method at full training data obtains an AU-ROC that is at least 0.2 points greater than that of ModelConfidence at coverage levels less than 50\%. One expects ModelConfidence to achieve the best performance when looking at non-deferred examples, and this in fact is true when we look at accuracy, however for AU-ROC, what happens is that the ModelConfidence baseline never defers on negative predicted examples due to the class imbalance which makes the model very confident in it's negative predictions. Thus, any positive labeled example that the model mistakenly labels as negative with high confidence will cause the AU-ROC to be reduced at low coverage levels. This also allows us to see that our method, and to an extent the Confidence baseline, make very different deferral decisions that factor in the expert. \paragraph{Impact of input noise} In our previous experimental setup, the input domain of the classifier $\mathcal{X}$, the chest X-ray, is assumed to be sufficient to perfectly predict the label $Y$ as our golden standard is the prediction of expert radiologists from just looking at the X-ray. Therefore, given enough training data and a sufficiently rich model class, a learned classifier from $\mathcal{X}$ will be able to perfectly predict the target and won't need to defer to any expert to achieve better performance. In this set of experiments, we perform two studies: the first we hide the left part of the chest X-ray on both training and testing examples to obtain a new input domain $\tilde{\mathcal{X}}$. This now limits the power of any learned classifier even in the infinite data regime as the left part of the X-ray may hide crucial parts of the input. Figure \ref{fig:noisy_xray} shows this noise applied to a patient's X-ray, the size of the rectangular region was chosen to cover one side of the chest area, we later experiment with varying the scale of the noise. In the second experiment, we train with the original chest X-rays but evaluate with noisy X-rays with noise in the form of erasing a randomly placed rectangular region of the X-ray. This second experiment is meant to the illustrate the robustness of the different methods to input noise. \begin{figure} \centering \begin{subfigure}{.5\textwidth} \centering \includegraphics[scale=0.7,trim={10.0cm 1.0cm 10.0cm 1.0cm}]{figures/xray_original.pdf} \caption{Original X-ray } \label{fig:kvssystem} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \includegraphics[scale=0.7,trim={10.0cm 1.0cm 10.0cm 1.0cm}]{figures/xray_noisy.pdf} \caption{X-ray with hidden left part } \label{fig:covvsacc_hidden} \end{subfigure} \caption{Left figure (a) shows the chest X-ray of a patient with Cardiomegaly, the right figure (b) shows that same X-ray but now with the left part hidden which is used as input to the models. } \label{fig:noisy_xray} \end{figure} \textbf{Experimental details.} The noise in the second set of experiments consists of a 2:1 (height:width) randomly located rectangular region of scale (area) that we vary from $0.1$ to $0.66$. The expert is the synthetic expert model with $q=0.7$ and $p=1$. \textbf{Results.} In Figure \ref{fig:plots_auc_ap_toyexpert_left} we plot the AU-ROC and AU-PR of the different methods as we vary coverage when training and testing while hiding the left section of the X-rays. We can first observe that the maximum achievable performance for the different methods is significantly reduced, however the gap between the different methods is still observed. In Figure \ref{fig:noise_chexpert} we plot the average maximum AU-ROC and AU-PR across the 5 tasks as we vary the area of the rectangular region. While the performance of all the methods degrade with the scale of the noise, the gap between the methods remains constant in terms of AU-PR but diminishes in terms of AU-ROC as the performance of the baselines remains steady. \begin{figure}[h] \centering \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Noise scale}, ylabel={mean AU-ROC}, xmin=-0.1, xmax=0.7, ymin=0.84, ymax=0.93, xtick={0,0.1,0.2,0.33,0.5,0.66}, ytick={0.85,0.87,0.88,0.89,0.9,0.91,0.92,0.93,0.95}, legend pos=south west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.879, 0.002 0.1, 0.878, 0.002 0.2, 0.873, 0.002 0.33, 0.870, 0.002 0.5, 0.868, 0.002 0.66, 0.868, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.896, 0.002 0.1, 0.891, 0.002 0.2, 0.884, 0.002 0.33, 0.877, 0.002 0.5, 0.871, 0.002 0.66, 0.871, 0.002 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.922, 0.002 0.1, 0.914, 0.002 0.2, 0.903, 0.002 0.33, 0.893, 0.002 0.5, 0.881, 0.002 0.66, 0.875, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \end{axis} \end{tikzpicture} \caption } \label{fig:noise_auc} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Noise scale}, ylabel={mean AU-PR}, xmin=-0.1, xmax=0.7, ymin=0.55, ymax=0.75, xtick={0,0.1,0.2,0.33,0.5,0.66}, ytick={0.55,0.57,0.59,0.61,0.63,0.65,0.67,0.69,0.71,0.73,0.74}, legend pos=north west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.6612, 0.002 0.1, 0.648, 0.002 0.2, 0.612, 0.002 0.33, 0.595, 0.002 0.5, 0.572, 0.002 0.66, 0.560, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.701, 0.002 0.1, 0.682, 0.002 0.2, 0.656, 0.002 0.33, 0.619, 0.002 0.5, 0.589, 0.002 0.66, 0.566, 0.002 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.736, 0.002 0.1, 0.702, 0.002 0.2, 0.678, 0.002 0.33, 0.654, 0.002 0.5, 0.616, 0.002 0.66, 0.580, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \legend{}; \end{axis} \end{tikzpicture} \caption } \label{fig:noise_pr} \end{subfigure} \caption{Left figure shows the average of the maximum achievable AU-ROC for the 5 tasks (average over the tasks) when the changing the scale of the noise (size of rectangular region) and right figure shows the same for AU-PR. } \label{fig:noise_chexpert} \end{figure} \begin{figure}[H] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_left.pdf} \caption{AU-ROC vs coverage when hiding left part of X-ray.} \label{fig:auc_vs_cov_toy_left} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/ap_vs_cov_toyexpert7-1_left.pdf} \caption{AU-PR vs coverage when hiding left part of X-ray.} \label{fig:ap_vs_cov_toy_left} \end{subfigure} \caption{Plots of AU-ROC and AU-PR as we vary coverage when training and testing with chest X-rays that have their left section hidden. The expert model is $q=0.7$ and $p=1$.} \label{fig:plots_auc_ap_toyexpert_left} \end{figure} \begin{comment} \subsection{Radiologist experts on ChestX-ray14}\label{subsec:nihchest} \textbf{Data.} The ChestX-ray14 dataset consists of 112,120 frontal-view X-ray images annotated for 14 disease findings using an automated tool on the radiology reports. The diseases in ChestX-ray14 differ from those in CheXpert with some common classes, however the quality of automated labels is inferior to that of the CheXpert labeler \cite{irvin2019chexpert}. A subset of the ChestX-ray14 dataset was further annotated in follow-up work \cite{majkowska2020chest}, 2414 points from the validation set and 1962 points from the test set received adjudicated radiologist labels for four observations: Fracture, Pneumothorax, Airspace opacity and Nodule/Mass. The adjudicated labels were obtained through asynchronous communication between 3 radiologists drawn randomly from a pool of 24 radiologists for each point. Additionally, the individual annotations of each of the 3 radiologists that decided on the adjudicated label for every point in the test and val set are made available. We split the data to obtain a training set that contains all patients that are not present in the validation and testing sets and note that the automated ChestX-ray14 labels do not contain labels for Fracture and Airspace opacity. \textbf{Expert.} From the above data, we can simulate the performance of an average expert radiologist as follows. For each point in the test and val sets, we uniformly pick one of the 3 available radiologist labels as the expert label. Since the ground truth is taken as the adjudicated radiologist label and not simply a majority vote, the random radiologist prediction will differ from the target. \textbf{Model.} We use the same model architecture and training procedure described in the previous subsection \ref{subsec:chexpert}, we frame the problem as a multi-task learning problem over the 4 observations on the val and test sets. We train initially for 3 epochs on the training set and then train for 3 more epochs on the validation set. The resulting ROC and prevision-recall curves are shown in Figure \ref{fig:roc_pr_nih}, the expert achieves a superior performance in terms of AU-ROC and AU-PR on all tasks except Airspace-opacity, however on that task the expert's operating point lies outside the classifier curves. \begin{figure}[H] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/roc_nih.pdf} \caption{ROC curve for trained classifier and expert.} \label{fig:1} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/pr_nih.pdf} \caption{Precision-Recall curve for classifier and expert.} \label{fig:2} \end{subfigure} \caption{ROC and precision-recall curves for the classifier trained on only the training data and for the classifier trained on the training data and then further trained on the validation set; shown as a point on the curves is the expert performance. Since the training data does not include labels for Fracture and Airspace opacity, performance of the classifier trained on only the training data is random, when further trained on the validation set performance improves for Airspace opacity and Fracture while marginally reducing performance on Pneumothorax and Nodule/mass.} \label{fig:roc_pr_nih} \end{figure} \textbf{Experimental setup.} As in the previous CheXpert experiments, we will defer to the expert individually on all 4 tasks. \end{comment} \section{Conclusion} In this work we explored a framework where the learning model can choose to defer to an expert or predict. We analyzed the framework theoretically and proposed a novel surrogate loss via a reduction to multiclass cost sensitive learning. Through experiments on image and text classification tasks, we showcased that our approach not only achieves better accuracy than confidence score baselines but does so with better sample complexity and computational cost. We hope that our method will inspire machine learning practitioners to integrate downstream decision makers into their learning algorithms. Future work will explore how to defer in settings where we have limited expert data, learning from biased expert data and learning to defer to multiple experts simultaneously. \section*{Acknowledgements} This work was supported by NSF CAREER award $\#1350965$. \subsection{Chest X-Ray} \textbf{Task.} CheXpert is a large chest radiograph dataset that contains over 224 thousand images of 65,240 patients automatically labeled for the presence of 14 observations using radiology reports \cite{irvin2019chexpert}. In addition to the automatically labeled training set, \cite{irvin2019chexpert} make publicly accessible a validation set of 200 patients labeled by a consensus of 3 radiologists and hide a further testing set of 500 patients labeled by 8 radiologists. We focus here on the detection of only the 5 observations that make up the "competition tasks" \cite{irvin2019chexpert}: Atelectasis, Cardiomegaly, Consolidation, Edema, and Pleural Effusion. This is a multi-task problem, we have 5 separate binary tasks, we will learn to defer on an individual task basis. \subsubsection{Expert models} We conduct experiments with two experts: the first is a toy expert where we can identify the correct deferral behavior and the second set of experts tries to emulate actual radiologist behavior. \textbf{1) Toy expert.} We create a simulated expert as follows: if the chest X-ray contains support devices (the presence of support devices is part of the label) then the expert is correct with probability $p$ on all tasks independently and if the X-ray does not contain support devices, then the expert is correct with probability $q$. We vary $q \in \{0.5,0.7\}$ and $p \in \{0.7,0.8,0.9,1\}$ to obtain different experts, we let $p\geq q$ as one can think that a patient that has support devices might have a previous medical history that the expert is aware of and can use as side-information. \textbf{2) Radiologist expert.} The hidden test set of 500 patients was labeled by 8 radiologists: the consensus of 5 of them constituted the ground truth label and the annotation of the remaining 3 were used to benchmark radiologist performance. We obtained the ground truth label and the 3 radiologist annotations that were used to benchmark on the test set without the input images. Using this test data we learn models of radiologist behavior to use as our expert. The test data consists of the following tuples $D =\{y_i,e_{1,i},e_{2,i},e_{3,i}\}_{i=1}^{500}$, where $y_i \in \{0,1\}^{14}$ is the ground truth data, $e_{1,i},e_{2,i},e_{3,i} \in \{0,1\}^{14}$ are radiologist 1,2 and 3 predictions respectively, the goal is to learn expert models $\hat{e}_1,\hat{e}_2,\hat{e}_3: \{0,1\}^{14} \to \{0,1\}^{14}$ which map a ground truth label to the radiologists predictions. We decompose $\hat{e}_i$ into 14 different components $\hat{e}_i^j:\{0,1\}^{14} \to \{0,1\}$ (for each of the 14 tasks, $j \in [14]$) and learn each separately. We evaluate different strategies to learn the expert models: (1) a class conditional noise model (CCN) of the expert based on the task label, (2) a table lookup from the empirical probabilities of $e_i(j)$ ($j$'th component of expert predictions) given the complete label vector $y$, (3) a logistic regression model treating the label vector $y$ as the covariates and $e_i(j)$ as the target (LR) and finally (4) a random forest classifier (RF). \\ \textit{Evaluation of the strategies.} We use 400 patients to train and 100 patients to test the performance of the strategies, and repeat for 50 trials with different random splits to average out the randomness in the models and data. We look in Table \ref{tab:model_rad_behavior} at the area under the ROC curve (AU-ROC) of the expert models at predicting the radiologists decisions for all 3 radiologists and in Table \ref{tab:model_rad_performance} at the average difference between the performance of the expert models in terms of false positive and true positive rates (FPR,TPR) and F1 at predicting the target and the performance of the actual radiologists. The LR, RF and table lookup method perform similarly at the task of predicting the radiologist decisions outperforming the naive CCN method, the table lookup method achieves an average AU-ROC of 0.69 over the 5 tasks for radiologist 1. Additionally, all methods are able to match the FPR of the actual experts with an approximate margin of $\pm 0.05$ and the TPR by a margin of $\pm 0.1$ on average across the 3 radiologists. We use the table lookup to construct our expert model for it's simplicity. \begin{table}[h] \centering \small \resizebox{\textwidth}{!}{ \begin{tabular}{l|c|c|c|c|c} &\textbf{Atelectasis} & \textbf{Cardiomegaly} & \textbf{Consolidation} & \textbf{Edema} & \textbf{Pleural Effusion} \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 1}} \\ \midrule CCN & 0.629, (0.585, 0.663)& 0.634, (0.601, 0.664)& 0.568, (0.528, 0.603)& 0.645, (0.606, 0.681)& 0.781, (0.748, 0.806) \\ \hline table lookup & 0.654, (0.613, 0.7)& 0.667, (0.643, 0.701)& 0.625, (0.596, 0.662)& 0.715, (0.695, 0.732)& 0.806, (0.775, 0.841) \\ \hline LR &0.646, (0.606, 0.692)& 0.638, (0.606, 0.675)& 0.624, (0.569, 0.669)& 0.719, (0.674, 0.757)& 0.791, (0.761, 0.819) \\ \hline RF & 0.638, (0.598, 0.67)& 0.657, (0.627, 0.677)& 0.637, (0.593, 0.679)& 0.720, (0.688, 0.753)& 0.806, (0.781, 0.831) \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 2}} \\ \midrule CCN & 0.672, (0.642, 0.708)& 0.622, (0.592, 0.643)& 0.540, (0.489, 0.574)& 0.633, (0.601, 0.658)& 0.656, (0.631, 0.688) \\ \hline table lookup & 0.697, (0.673, 0.726)& 0.680, (0.643, 0.709)& 0.609, (0.571, 0.65)& 0.668, (0.634, 0.7)& 0.726, (0.691, 0.752) \\ \hline LR &0.688, (0.65, 0.713)& 0.669, (0.645, 0.694)& 0.597, (0.532, 0.655)& 0.652, (0.613, 0.691)& 0.710, (0.678, 0.748) \\ \hline RF &0.702, (0.673, 0.73)& 0.684, (0.643, 0.703)& 0.617, (0.573, 0.66)& 0.672, (0.643, 0.698)& 0.724, (0.687, 0.754) \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 3}} \\ \midrule CCN & 0.681, (0.631, 0.711)& 0.636, (0.594, 0.671)& 0.571, (0.508, 0.62)& 0.662, (0.631, 0.69)& 0.726, (0.708, 0.735) \\ \hline table lookup & 0.682, (0.636, 0.711)& 0.658, (0.603, 0.707)& 0.594, (0.539, 0.645)& 0.754, (0.722, 0.775)& 0.792, (0.768, 0.822) \\ \hline LR &0.700, (0.662, 0.729)& 0.657, (0.614, 0.699)& 0.618, (0.528, 0.689)& 0.758, (0.73, 0.781)& 0.785, (0.76, 0.807) \\ \hline RF & 0.686, (0.657, 0.714)& 0.679, (0.653, 0.702)& 0.606, (0.539, 0.672)& 0.774, (0.758, 0.789)& 0.795, (0.768, 0.816) \\ \bottomrule \end{tabular}} \caption{Average AU-ROC and (25,75) quantiles AU-ROC over the trials of the models at predicting the radiologists predictions for each of the three.} \label{tab:model_rad_behavior} \end{table} \begin{table}[h] \centering \small \resizebox{\textwidth}{!}{ \begin{tabular}{l|c|c|c|c|c} &\textbf{Atelectasis} & \textbf{Cardiomegaly} & \textbf{Consolidation} & \textbf{Edema} & \textbf{Pleural Effusion} \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 1}} \\ \midrule CCN & (0.029,0.109), 0.100 & (0.040,0.138), 0.120 & (0.035,0.232), 0.104 & (0.053,0.068), 0.056 & (0.032,0.105), 0.085 \\ \hline table lookup & (0.030,0.124), 0.105 & (0.035,0.113), 0.104 & (0.028,0.252), 0.108 & (0.047,0.073), 0.052 & (0.027,0.110), 0.079 \\ \hline LR &(0.031,0.111), 0.097 & (0.034,0.156), 0.122 & (0.035,0.268), 0.135 & (0.043,0.088), 0.056 & (0.022,0.122), 0.086 \\ \hline RF & (0.034,0.127), 0.120 & (0.032,0.150), 0.118 & (0.034,0.249), 0.115 & (0.043,0.075), 0.048 & (0.022,0.112), 0.079 \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 2}} \\ \midrule CCN & (0.062,0.076), 0.062 & (0.046,0.106), 0.082 & (0.027,0.286), 0.164 & (0.054,0.105), 0.080 & (0.048,0.096), 0.069 \\ \hline table lookup & (0.056,0.059), 0.050 & (0.051,0.106), 0.085 & (0.030,0.222), 0.135 & (0.051,0.095), 0.066 & (0.052,0.093), 0.077 \\ \hline LR &(0.050,0.071), 0.044 & (0.056,0.102), 0.083 & (0.033,0.232), 0.147 & (0.051,0.108), 0.082 & (0.054,0.094), 0.070 \\ \hline RF &(0.060,0.081), 0.058 & (0.045,0.110), 0.070 & (0.027,0.240), 0.156 & (0.047,0.092), 0.062 & (0.044,0.093), 0.069 \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 3}} \\ \midrule CCN &(0.037,0.112), 0.079 & (0.035,0.165), 0.130 & (0.021,0.262), 0.214 & (0.069,0.054), 0.046 & (0.042,0.070), 0.066 \\ \hline table lookup & (0.049,0.101), 0.080 & (0.033,0.131), 0.117 & (0.020,0.252), 0.204 & (0.056,0.063), 0.050 & (0.036,0.085), 0.059 \\ \hline LR &(0.044,0.096), 0.079 & (0.038,0.165), 0.130 & (0.023,0.247), 0.194 & (0.062,0.060), 0.046 & (0.038,0.092), 0.064 \\ \hline RF &(0.044,0.105), 0.077 & (0.030,0.154), 0.117 & (0.040,0.240), 0.226 & (0.054,0.054), 0.039 & (0.033,0.083), 0.052 \\ \bottomrule \end{tabular}} \caption{Average absolute difference between the (FPR,TPR) of the expert model at predicting the target and that of the actual radiologists along with the absolute difference of F1 score. } \label{tab:model_rad_performance} \end{table} \subsubsection{Experimental details} \textbf{Data.} We use the downsampled resolution version of CheXpert \cite{irvin2019chexpert} and split the training data set with an 80-10-10 split on a patient basis for training, validation and testing respectively, no patients are shared among the splits. Images are normalized and resized to be compatible with pre-trained ImageNet models, we use data augmentation in the form of random resized crops, horizontal flips and random rotations of up to $15\degree$ while training. For the radiologist experts use the expert model "table lookup" learned to label this data with expert predictions for each of the 3 radiologists. Note that a small subset of the training data has an uncertainty label "U" instead of a binary label that implies that the automatic annotator is uncertain, we ignore these points on a task basis while training and testing, however to obtain the expert label for these points (for tasks that don't have an uncertainty label) we replace "U" by a "1" label. \textbf{Baselines.} We implement two baselines: a threshold confidence baseline that learns a threshold to maximize system AU-ROC on just the confidence of the classifier model to defer (ModelConfidence), this is the post-hoc thresholding method in \cite{madras2018predict}, and the Confidence baseline \cite{raghu2019algorithmic}. We use temperature scaling \cite{guo2017calibration} to ensure calibration of all baselines on the validation set. \textbf{Model.} Following \cite{irvin2019chexpert}, we use the DenseNet121 architecture for our model with pre-trained weights on ImageNet, the loss for the baseline models is the average of the binary cross entropy for each of the tasks. We train the baseline models using Adam for 4 epochs. For our approach we train for 3 epochs using the cross entropy loss and then train for one epoch using $L_{CE}^\alpha$ with $\alpha$ chosen to maximize the area under the receiver operating characteristic curve (AU-ROC) of the combined system on the validation set for each of the 5 tasks (each task is treated separately). We also observe similar results if we train for the first three epochs with $L_{CE}^1$ and then train for one epoch with a validated choice of $\alpha$. \textbf{Experimental setup.} In a clinical setting there might be a cost associated to querying a radiologist, this then imposes a constraint on how often we can query the radiologist i.e. our model's coverage (fraction of examples where algorithm predicts). We constrain our method and the baselines to achieve $c\%$ coverage for $c \in [100]$ to simulate the spectrum between complete automation and none.\\ We achieve this for our method by first sorting the test set based on $g_{\bot}(x) - \max(g_0(x),g_1(x)):=q(x)$ across all patients $x$ in the test set, then to achieve coverage $c$, we define $\tau = q(x_c)$ where $q(x_c)$ is the $c$'th percentile of the outputs $q(x)$, then we let $r(x) =1 \iff q(x) \geq \tau$. The definition of $\tau$ ensures that we obtain exactly $c\%$ coverage. For ModelConfidence we achieve this by letting $q(x) = 1- \max(g_0(x),g_1(x))$ ($g$ is the result of a separate trained model than the one for our method), this is the natural classifier's probability of error from the softmax output, and for the Confidence we let $q(x)$ be the difference between the radiologists confidence and the classifier's confidence. \subsubsection{Results and Analysis} \textbf{Results for toy expert.} In Figure \ref{fig:auc_vs_cov_toy} we plot the overall system (expert and algorithm combined) AU-ROC for each desired coverage for the methods and in Figure \ref{fig:ap_vs_cov_toy} we plot the overall system area under the precision-recall curve (AU-PR) versus the coverage; this is for the expert with $q=0.7$ and $p=1$. We can see that the curve for our method dominates the baselines over the entire coverage range for both AU-ROC and AU-PR, moreover the curves are concave and we can achieve higher performance by combining expert and algorithm than using both separately. Our method is able to achieve a higher maximum AU-ROC and AU-PR than both baselines: the difference between the maximum attainable AU-ROC of our method and Confidence is 0.043, 0.029, 0.016, 0.022 and 0.025 respectively for each of the five tasks. There is a clear hierarchy between the 3 compared methods: our method dominates Confidence and Confidence in turn dominates ModelConfidence, in fact ModelConfidence is a special case of the Confidence baseline, since the expert does not have uniform performance over the domain there are clear gains in modeling the expert. This hierarchy continues to hold as we change the expert behavior as we vary the probabilities $p$ and $q$, in Table \ref{table:toy_expert} we show for each of the 5 tasks the difference between the average AU-ROC across all coverages (average value of the curves shown in Figure \ref{fig:auc_vs_cov_toy}) for our method and the Confidence baseline for different expert probabilities and the difference between the maximum achievable AU-ROC. A positive average difference serves to show the degree of dominance of our method over the Confidence baseline, note that the difference alone cannot imply dominance of the curves however dominance is still observed. Our method improves on the baselines as the difference between $q$ and $p$ increases, this difference encodes the non-uniformity of the expert behavior over the domain. \begin{figure}[h] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1.pdf} \caption{AU-ROC vs coverage for expert $q=0.7,p=1$, maximum AU-ROC is noted.} \label{fig:auc_vs_cov_toy} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/ap_vs_cov_toyexpert7-1.pdf} \caption{AU-PR vs coverage for expert $q=0.7,p=1$, maximum AU-PR is noted.} \label{fig:ap_vs_cov_toy} \end{subfigure} \caption{Plot of AU-ROC of the ROC curve (a) for each level of coverage (0 coverage means only the expert predicting and 1 coverage is only the classifier predicting) and of the area under the precision-recall curve (AU-PR) (b) for each of the 5 tasks comparing our method with the baselines on the training derived test set for the toy expert with $q=0.7,p=1$. We report the maximum AU-ROC and AU-PR achieved on each task, error bars are standard deviations derived from 10 runs (averaging over the expert's randomness).} \label{fig:plots_auc_ap_toyexpert} \end{figure} \begin{table} \caption{Average difference in AU-ROC across all coverage and difference between maximum achievable AU-ROC between our method and the Confidence baseline for each of the 5 tasks and different toy expert probabilities $p$ and $q$; each entry is (average difference $\pm$ standard deviation; difference of maximums). The difference between our method and the ModelConfidence is roughly twice the values noted in table \ref{table:toy_expert}, only at Expert $(0.7,0.7)$ does Confidence and ModelConfidence achieve the same performance since the expert has uniform error over the domain.} \label{table:toy_expert} \vskip 0.15in \begin{center} \begin{small} \begin{sc} \resizebox{\textwidth}{!}{ \begin{tabular}{l|ccccc} \toprule Expert $(p,q)$ & \textbf{Cardiomegaly} & \textbf{Edema} & \textbf{Consolidation} & \textbf{Atelectasis} & \textbf{Pleural Effusion} \\ \midrule (0.5,0.7) &0.032$\pm$0.024; 0.002 &0.015$\pm$0.012; 0.007 & 0.015$\pm$0.008; 0.007 & 0.017$\pm$0.009; 0.007 & 0.007$\pm$0.003 ;0.007 \\ \hline (0.5,0.9) & 0.032$\pm$0.017; 0.014 & 0.026$\pm$0.016; 0.024 & 0.010$\pm$0.005; 0.015 & 0.016$\pm$0.008; 0.026 & 0.012$\pm$0.010; 0.004 \\ \hline (0.5,1) & 0.022$\pm$0.012; 0.029 & 0.013$\pm$0.009; 0.019 & 0.007$\pm$0.008; 0.012 & 0.013$\pm$0.006; 0.020 & 0.010$\pm$0.008; 0.012 \\ \hline (0.7,0.7) & 0.024$\pm$0.018; 0.005 & 0.011$\pm$0.009; 0.010 & 0.011$\pm$0.010; 0.009 & 0.006$\pm$0.006; 0.008 & 0.001$\pm$0.001; 0.003 \\ \hline (0.7,0.9) & 0.032$\pm$0.020; 0.024 & 0.010$\pm$0.007; 0.010 & 0.007$\pm$0.007; 0.017 & 0.014$\pm$0.008; 0.017 &0.010$\pm$0.006; 0.006 \\ \hline (0.7,1) &0.027$\pm$0.014; 0.042 & 0.016$\pm$0.010; 0.027 & 0.007$\pm$0.007; 0.019 & 0.013$\pm$0.007; 0.022 & 0.014$\pm$0.010; 0.027 \\ \hline (0.8,1) & 0.017$\pm$0.009; 0.023 & 0.011$\pm$0.008; 0.012 & 0.001$\pm$0.004; 0.007 & 0.012$\pm$0.006; 0.009 & 0.010$\pm$0.006; 0.018 \\ \bottomrule \end{tabular} } \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \textbf{Results for radiologist expert.} In Figure \ref{fig:auc_vs_cov_rad1} we plot the overall system (expert and algorithm combined) AU-ROC for each desired coverage for the methods and in Figure \ref{fig:ap_vs_cov_toy} we plot the overall system area under the precision-recall curve (AU-PR) versus the coverage; this is for for radiologist $\#1$. We can see in figure \ref{fig:auc_vs_cov_rad1} that our method dominates both baselines over the entire coverage range, for example the difference between the average values of AU-ROC across all coverages for our method and Confidence is 0.18 for Cardiomegaly and 0.10 for Edema. The cause of this difference is that both the Confidence and ModelConfidence baselines almost never defer on examples where their prediction is 0, this is due to the class imbalance that causes the classifier to be very confident on the negative class, hence the expert has to predict on the majority of the positive examples. However, the expert has a a low true positive rate on these two tasks that causes the overall performance of the system to drop. With respect to AU-PR in figure \ref{fig:ap_vs_cov_rad1}, our method performs on par with the baselines on Cardiomegaly, Atelectasis and Pleural Effusion. On Edema and Consolidation the average gap between the curves in Figure \ref{fig:ap_vs_cov_rad1} is in the range of $0.01-0.02$. \begin{figure}[h] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_rad1.pdf} \caption{AU-ROC vs coverage for radiologist $\#1$.} \label{fig:auc_vs_cov_rad1} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/ap_vs_cov_rad1.pdf} \caption{AU-PR vs coverage for radiologist $\#1$.} \label{fig:ap_vs_cov_rad1} \end{subfigure} \caption{Plot of AU-ROC of the ROC curve (a) for each level of coverage (0 coverage means only the expert predicting and 1 coverage is only the classifier predicting) and of the area under the precision-recall curve (AU-PR) (b) for each of the 5 tasks comparing our method with the baselines on the training derived test set for radiologist $\#1$. We report the maximum AU-ROC and AU-PR achieved on each task, error bars are standard deviations derived from 10 runs (averaging over the expert's randomness).} \label{fig:plots_auc_ap_rad1} \end{figure} \subsection{Further Analysis} \subsection{Sample Complexity} Training data for chest X-rays is a valuable resource that may not be abundantly available when trying to deploy a machine learning model in a new clinical setting where for example the imaging mechanism may differ. It is important to see the effectiveness of the proposed approaches when training data size is limited, this furthermore helps us understand the comparative sample complexity of our method versus the baselines. \textbf{Experimental details.} We restrict the training data size for our model and baselines while keeping the same validation and testing data as previously; the validation data is used only for calibration of models and optimizing over choice of $\alpha$. We train using the same procedure as before and report the maximum achievable AU-PR and AU-ROC. The expert we defer to is the synthetic expert described above with $q=0.7$ and $p=1$. \begin{figure} \centering \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Training data size (as \% of original set)}, ylabel={mean AU-ROC}, xmin=0, xmax=100, ymin=0.84, ymax=0.93, xtick={0,5,10,25,50,75,100}, ytick={0.85,0.87,0.88,0.89,0.9,0.91,0.92,0.93,0.95}, legend pos=south west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.875, 0.002 10, 0.876, 0.002 25, 0.878, 0.002 50, 0.879, 0.002 75, 0.88, 0.002 100, 0.879, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.880, 0.002 10, 0.883, 0.002 25, 0.892, 0.002 50, 0.895, 0.002 75, 0.9, 0.003 100, 0.896, 0.003 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.889, 0.002 10, 0.896, 0.002 25, 0.908, 0.002 50, 0.914, 0.002 75, 0.924, 0.002 100, 0.922, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \end{axis} \end{tikzpicture} \caption } \label{fig:samplecmplx_auc} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Training data size (as \% of original set)}, ylabel={mean AU-PR}, xmin=0, xmax=100, ymin=0.61, ymax=0.75, xtick={0,5,10,25,50,75,100}, ytick={0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.7,0.71,0.72,0.73,0.74}, legend pos=north west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.624, 0.003 10, 0.613, 0.005 25, 0.632, 0.002 50, 0.661, 0.003 75, 0.664, 0.004 100, 0.6612, 0.001 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.639, 0.002 10, 0.647, 0.002 25, 0.680, 0.002 50, 0.704, 0.003 75, 0.711, 0.001 100, 0.701, 0.001 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 5, 0.637, 0.002 10, 0.669, 0.002 25, 0.689, 0.002 50, 0.722, 0.003 75, 0.733, 0.001 100, 0.736, 0.001 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \legend{}; \end{axis} \end{tikzpicture} \caption } \label{fig:samplecmplx_pr} \end{subfigure} \caption{Left figure shows the average of the maximum achievable AU-ROC for the 5 tasks (average over the tasks) when the changing the size of the training data (as a \% of the original set) and right figure shows the same for AU-PR } \label{fig:samplecmplx_chexpert} \end{figure} \textbf{Results.} In Figure \ref{fig:samplecmplx_chexpert} we plot the average of the maximum achievable AU-ROC \ref{fig:samplecmplx_auc} and AU-PR \ref{fig:samplecmplx_pr} across the 5 tasks for the different methods as we vary the the training set size. We observe that our method consistently outperforms the baselines and continues to take advantage of further data as the baselines performance starts to saturate. If we look at the AU-ROC and AU-PR of the expert on deferred examples, we observe negligible differences as the training set size increases for each method, however if we look at classifier performance on the non-deferred examples, we start to observe a significant difference in AU-ROC and AU-PR for our method while the baselines lag behind. In Figure \ref{fig:plots_classauc_ap_toyexpert} we plot the classifier AU-ROC on non-deferred examples versus the coverage for each of the 5 tasks, we can see for example on Cardiomegaly, our method at full training data obtains an AU-ROC that is at least 0.2 points greater than that of ModelConfidence at coverage levels less than 50\%. One expects ModelConfidence to achieve the best performance when looking at non-deferred examples, and this in fact is true when we look at accuracy, however for AU-ROC, what happens is that the ModelConfidence baseline never defers on negative predicted examples due to the class imbalance which makes the model very confident in it's negative predictions. Thus, any positive labeled example that the model mistakenly labels as negative with high confidence will cause the AU-ROC to be reduced at low coverage levels. This also allows us to see that our method, and to an extent the Confidence baseline, make very different deferral decisions that factor in the expert. \begin{figure}[h] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_classifier.pdf} \caption{classifier AU-ROC on non-deferred examples vs coverage for expert $q=0.7,p=1$ with 100\% of training data.} \label{fig:classauc_vs_cov_toy} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_trainsize-0.1_classifier.pdf} \caption{classifier AU-ROC on non-deferred examples vs coverage for expert $q=0.7,p=1$ with 10\% of training data.} \label{fig:classauc_vs_cov_toy_10} \end{subfigure} \caption{Plot of classifier AU-ROC on non-deferred examples versus coverage for (a) for systems learned with 100\% of training data (b) and learned with 10\% of training data. Noise at low coverage is due to reduced data size.} \label{fig:plots_classauc_ap_toyexpert} \end{figure} \subsubsection{Impact of Input Noise} In our previous experimental setup, the input domain of the classifier $\mathcal{X}$, the chest X-ray, is assumed to be sufficient to perfectly predict the label $Y$ as our golden standard is the prediction of expert radiologists from just looking at the X-ray. Therefore, given enough training data and a sufficiently rich model class, a learned classifier from $\mathcal{X}$ will be able to perfectly predict the target and won't need to defer to any expert to achieve better performance. In this set of experiments, we perform two studies: the first we hide the left part of the chest X-ray on both training and testing examples to obtain a new input domain $\tilde{\mathcal{X}}$. This now limits the power of any learned classifier even in the infinite data regime as the left part of the X-ray may hide crucial parts of the input. Figure \ref{fig:noisy_xray} shows this noise applied to a patient's X-ray, the size of the rectangular region was chosen to cover one side of the chest area, we later experiment with varying the scale of the noise. In the second experiment, we train with the original chest X-rays but evaluate with noisy X-rays with noise in the form of erasing a randomly placed rectangular region of the X-ray. This second experiment is meant to the illustrate the robustness of the different methods to input noise. \begin{figure} \centering \begin{subfigure}{.5\textwidth} \centering \includegraphics[scale=0.7,trim={10.0cm 1.0cm 10.0cm 1.0cm}]{figures/xray_original.pdf} \caption{Original X-ray } \label{fig:kvssystem} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \includegraphics[scale=0.7,trim={10.0cm 1.0cm 10.0cm 1.0cm}]{figures/xray_noisy.pdf} \caption{X-ray with hidden left part } \label{fig:covvsacc} \end{subfigure} \caption{Left figure (a) shows the chest X-ray of a patient with Cardiomegaly, the right figure (b) shows that same X-ray but now with the left part hidden which is used as input to the models. } \label{fig:noisy_xray} \end{figure} \textbf{Experimental details.} The noise in the second set of experiments consists of a 2:1 (height:width) randomly located rectangular region of scale (area) that we vary from $0.1$ to $0.66$. The expert is the synthetic expert model with $q=0.7$ and $p=1$. \textbf{Results.} In Figure \ref{fig:plots_auc_ap_toyexpert_left} we plot the AU-ROC and AU-PR of the different methods as we vary coverage when training and testing while hiding the left section of the X-rays. We can first observe that the maximum achievable performance for the different methods is significantly reduced, however the gap between the different methods is still observed. In Figure \ref{fig:noise_chexpert} we plot the average maximum AU-ROC and AU-PR across the 5 tasks as we vary the area of the rectangular region. While the performance of all the methods degrade with the scale of the noise, the gap between the methods remains constant in terms of AU-PR but diminishes in terms of AU-ROC as the performance of the baselines remains steady. \begin{figure} \centering \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Noise scale}, ylabel={mean AU-ROC}, xmin=-0.1, xmax=0.7, ymin=0.84, ymax=0.93, xtick={0,0.1,0.2,0.33,0.5,0.66}, ytick={0.85,0.87,0.88,0.89,0.9,0.91,0.92,0.93,0.95}, legend pos=south west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.879, 0.002 0.1, 0.878, 0.002 0.2, 0.873, 0.002 0.33, 0.870, 0.002 0.5, 0.868, 0.002 0.66, 0.868, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.896, 0.002 0.1, 0.891, 0.002 0.2, 0.884, 0.002 0.33, 0.877, 0.002 0.5, 0.871, 0.002 0.66, 0.871, 0.002 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.922, 0.002 0.1, 0.914, 0.002 0.2, 0.903, 0.002 0.33, 0.893, 0.002 0.5, 0.881, 0.002 0.66, 0.875, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \end{axis} \end{tikzpicture} \caption } \label{fig:noise_auc} \end{subfigure}% \begin{subfigure}{.5\textwidth} \centering \begin{tikzpicture} \begin{axis}[ title={}, xlabel={Noise scale}, ylabel={mean AU-PR}, xmin=-0.1, xmax=0.7, ymin=0.55, ymax=0.75, xtick={0,0.1,0.2,0.33,0.5,0.66}, ytick={0.55,0.57,0.59,0.61,0.63,0.65,0.67,0.69,0.71,0.73,0.74}, legend pos=north west, legend style={nodes={scale=0.8, transform shape}}, ymajorgrids=true, grid style=dashed, ] \addplot+[ black, mark options={black, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.6612, 0.002 0.1, 0.648, 0.002 0.2, 0.612, 0.002 0.33, 0.595, 0.002 0.5, 0.572, 0.002 0.66, 0.560, 0.002 }; \addlegendentry{ModelConfidence} \addplot+[ blue, mark options={blue, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.701, 0.002 0.1, 0.682, 0.002 0.2, 0.656, 0.002 0.33, 0.619, 0.002 0.5, 0.589, 0.002 0.66, 0.566, 0.002 }; \addlegendentry{Confidence} \addplot+[ red, mark options={red, scale=0.75}, smooth, error bars/.cd, y fixed, y dir=both, y explicit ] table [x=x, y=y,y error=error, col sep=comma] { x, y, error 0, 0.736, 0.002 0.1, 0.702, 0.002 0.2, 0.678, 0.002 0.33, 0.654, 0.002 0.5, 0.616, 0.002 0.66, 0.580, 0.002 }; \addlegendentry{$L_{CE}^\alpha$ (ours)} \legend{}; \end{axis} \end{tikzpicture} \caption } \label{fig:noise_pr} \end{subfigure} \caption{Left figure shows the average of the maximum achievable AU-ROC for the 5 tasks (average over the tasks) when the changing the scale of the noise (size of rectangular region) and right figure shows the same for AU-PR. } \label{fig:noise_chexpert} \end{figure} \begin{figure}[h] \centering \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/auc_vs_cov_toyexpert7-1_left.pdf} \caption{AU-ROC vs coverage when hiding left part of X-ray.} \label{fig:auc_vs_cov_toy_left} \end{subfigure} \begin{subfigure}{.9\textwidth} \centering \includegraphics[width=\textwidth,trim={5.0cm 0.0cm 5.0cm 0.0cm}]{figures/ap_vs_cov_toyexpert7-1_left.pdf} \caption{AU-PR vs coverage when hiding left part of X-ray.} \label{fig:ap_vs_cov_toy_left} \end{subfigure} \caption{Plots of AU-ROC and AU-PR as we vary coverage when training and testing with chest X-rays that have their left section hidden. The expert model is $q=0.7$ and $p=1$.} \label{fig:plots_auc_ap_toyexpert_left} \end{figure} \section{Radiologist models} \textbf{2) Radiologist expert.} The hidden test set of 500 patients was labeled by 8 radiologists: the consensus of 5 of them constituted the ground truth label and the annotation of the remaining 3 were used to benchmark radiologist performance. We obtained the ground truth label, the 3 radiologist annotations that were used to benchmark on the test set and the CheXpert automated labels without the input images. Using this test data we learn models of radiologist behavior to use as our expert. The test data consists of the following tuples $D =\{y_i,e_{1,i},e_{2,i},e_{3,i}\}_{i=1}^{500}$, where $y_i \in \{0,1\}^{14}$ is the CheXpert label, we use the CheXpert labels instead of the concensus to be consistent with the training set, $e_{1,i},e_{2,i},e_{3,i} \in \{0,1\}^{14}$ are radiologist 1,2 and 3 predictions respectively, the goal is to learn expert models $\hat{e}_1,\hat{e}_2,\hat{e}_3: \{0,1\}^{14} \to \{0,1\}^{14}$ which map a ground truth label to the radiologists predictions. We decompose $\hat{e}_i$ into 14 different components $\hat{e}_i^j:\{0,1\}^{14} \to \{0,1\}$ (for each of the 14 tasks, $j \in [14]$) and learn each separately. We evaluate different strategies to learn the expert models: (1) a class conditional noise model (CCN) of the expert based on the task label, (2) a table lookup from the empirical probabilities of $e_i(j)$ ($j$'th component of expert predictions) given the complete label vector $y$, (3) a logistic regression model treating the label vector $y$ as the covariates and $e_i(j)$ as the target (LR) and finally (4) a random forest classifier (RF). \\ \textit{Evaluation of the strategies.} We use 400 patients to train and 100 patients to test the performance of the strategies, and repeat for 50 trials with different random splits to average out the randomness in the models and data. We look in Table \ref{tab:model_rad_behavior} at the area under the ROC curve (AU-ROC) of the expert models at predicting the radiologists decisions for all 3 radiologists and in Table \ref{tab:model_rad_performance} at the average difference between the performance of the expert models in terms of false positive and true positive rates (FPR,TPR) and F1 at predicting the target and the performance of the actual radiologists. The LR, RF and table lookup method perform similarly at the task of predicting the radiologist decisions outperforming the naive CCN method, the table lookup method achieves an average AU-ROC of 0.69 over the 5 tasks for radiologist 1. Additionally, all methods are able to match the FPR of the actual experts with an approximate margin of $\pm 0.05$ and the TPR by a margin of $\pm 0.1$ on average across the 3 radiologists. We use the table lookup to construct our expert model for it's simplicity. \begin{table}[h] \centering \small \resizebox{\textwidth}{!}{ \begin{tabular}{l|c|c|c|c|c} &\textbf{Atelectasis} & \textbf{Cardiomegaly} & \textbf{Consolidation} & \textbf{Edema} & \textbf{Pleural Effusion} \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 1}} \\ \midrule CCN & 0.629, (0.585, 0.663)& 0.634, (0.601, 0.664)& 0.568, (0.528, 0.603)& 0.645, (0.606, 0.681)& 0.781, (0.748, 0.806) \\ \hline table lookup & 0.654, (0.613, 0.7)& 0.667, (0.643, 0.701)& 0.625, (0.596, 0.662)& 0.715, (0.695, 0.732)& 0.806, (0.775, 0.841) \\ \hline LR &0.646, (0.606, 0.692)& 0.638, (0.606, 0.675)& 0.624, (0.569, 0.669)& 0.719, (0.674, 0.757)& 0.791, (0.761, 0.819) \\ \hline RF & 0.638, (0.598, 0.67)& 0.657, (0.627, 0.677)& 0.637, (0.593, 0.679)& 0.720, (0.688, 0.753)& 0.806, (0.781, 0.831) \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 2}} \\ \midrule CCN & 0.672, (0.642, 0.708)& 0.622, (0.592, 0.643)& 0.540, (0.489, 0.574)& 0.633, (0.601, 0.658)& 0.656, (0.631, 0.688) \\ \hline table lookup & 0.697, (0.673, 0.726)& 0.680, (0.643, 0.709)& 0.609, (0.571, 0.65)& 0.668, (0.634, 0.7)& 0.726, (0.691, 0.752) \\ \hline LR &0.688, (0.65, 0.713)& 0.669, (0.645, 0.694)& 0.597, (0.532, 0.655)& 0.652, (0.613, 0.691)& 0.710, (0.678, 0.748) \\ \hline RF &0.702, (0.673, 0.73)& 0.684, (0.643, 0.703)& 0.617, (0.573, 0.66)& 0.672, (0.643, 0.698)& 0.724, (0.687, 0.754) \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 3}} \\ \midrule CCN & 0.681, (0.631, 0.711)& 0.636, (0.594, 0.671)& 0.571, (0.508, 0.62)& 0.662, (0.631, 0.69)& 0.726, (0.708, 0.735) \\ \hline table lookup & 0.682, (0.636, 0.711)& 0.658, (0.603, 0.707)& 0.594, (0.539, 0.645)& 0.754, (0.722, 0.775)& 0.792, (0.768, 0.822) \\ \hline LR &0.700, (0.662, 0.729)& 0.657, (0.614, 0.699)& 0.618, (0.528, 0.689)& 0.758, (0.73, 0.781)& 0.785, (0.76, 0.807) \\ \hline RF & 0.686, (0.657, 0.714)& 0.679, (0.653, 0.702)& 0.606, (0.539, 0.672)& 0.774, (0.758, 0.789)& 0.795, (0.768, 0.816) \\ \bottomrule \end{tabular}} \caption{Average AU-ROC and (25,75) quantiles AU-ROC over the trials of the models at predicting the radiologists predictions for each of the three.} \label{tab:model_rad_behavior} \end{table} \begin{table}[h] \centering \small \resizebox{\textwidth}{!}{ \begin{tabular}{l|c|c|c|c|c} &\textbf{Atelectasis} & \textbf{Cardiomegaly} & \textbf{Consolidation} & \textbf{Edema} & \textbf{Pleural Effusion} \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 1}} \\ \midrule CCN & (0.029,0.109), 0.100 & (0.040,0.138), 0.120 & (0.035,0.232), 0.104 & (0.053,0.068), 0.056 & (0.032,0.105), 0.085 \\ \hline table lookup & (0.030,0.124), 0.105 & (0.035,0.113), 0.104 & (0.028,0.252), 0.108 & (0.047,0.073), 0.052 & (0.027,0.110), 0.079 \\ \hline LR &(0.031,0.111), 0.097 & (0.034,0.156), 0.122 & (0.035,0.268), 0.135 & (0.043,0.088), 0.056 & (0.022,0.122), 0.086 \\ \hline RF & (0.034,0.127), 0.120 & (0.032,0.150), 0.118 & (0.034,0.249), 0.115 & (0.043,0.075), 0.048 & (0.022,0.112), 0.079 \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 2}} \\ \midrule CCN & (0.062,0.076), 0.062 & (0.046,0.106), 0.082 & (0.027,0.286), 0.164 & (0.054,0.105), 0.080 & (0.048,0.096), 0.069 \\ \hline table lookup & (0.056,0.059), 0.050 & (0.051,0.106), 0.085 & (0.030,0.222), 0.135 & (0.051,0.095), 0.066 & (0.052,0.093), 0.077 \\ \hline LR &(0.050,0.071), 0.044 & (0.056,0.102), 0.083 & (0.033,0.232), 0.147 & (0.051,0.108), 0.082 & (0.054,0.094), 0.070 \\ \hline RF &(0.060,0.081), 0.058 & (0.045,0.110), 0.070 & (0.027,0.240), 0.156 & (0.047,0.092), 0.062 & (0.044,0.093), 0.069 \\ \midrule \multicolumn{6}{c}{\textbf{Radiologist 3}} \\ \midrule CCN &(0.037,0.112), 0.079 & (0.035,0.165), 0.130 & (0.021,0.262), 0.214 & (0.069,0.054), 0.046 & (0.042,0.070), 0.066 \\ \hline table lookup & (0.049,0.101), 0.080 & (0.033,0.131), 0.117 & (0.020,0.252), 0.204 & (0.056,0.063), 0.050 & (0.036,0.085), 0.059 \\ \hline LR &(0.044,0.096), 0.079 & (0.038,0.165), 0.130 & (0.023,0.247), 0.194 & (0.062,0.060), 0.046 & (0.038,0.092), 0.064 \\ \hline RF &(0.044,0.105), 0.077 & (0.030,0.154), 0.117 & (0.040,0.240), 0.226 & (0.054,0.054), 0.039 & (0.033,0.083), 0.052 \\ \bottomrule \end{tabular}} \caption{Average absolute difference between the (FPR,TPR) of the expert model at predicting the target and that of the actual radiologists along with the absolute difference of F1 score. } \label{tab:model_rad_performance} \end{table}